diff --git a/CHANGELOG.md b/CHANGELOG.md index b72264f..6d76b71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,31 +35,12 @@ more deterministic account management. ### Docs - **Multi-account guide**: clarified repair/quarantine behavior and legacy handling. -## [4.5.24] - 2026-01-31 - -**UI Polish release**: Final alignment tweaks for `codex-status`. - -### Fixed -- **Dashboard Padding**: fine-tuned padding for Plan column and Usage bars to perfectly match the grid alignment. - -## [4.5.23] - 2026-01-31 - -**UI Polish release**: New "Obsidian Dashboard" theme for `codex-status`. - -### Changed -- **Visual Overhaul**: `codex-status` now features the "Obsidian Dashboard" theme: - - **High-Density Layout**: Clean, wall-less design with perfect grid alignment. - - **Status Pills**: Color-coded badges (ACTIVE/ENABLED/DISABLED) for instant status recognition. - - **Dynamic Usage Bars**: Quota bars shift color (Blue -> Yellow -> Red) based on consumption. - - **Precision Timers**: Reset timers now include dates for long-term limits (e.g., `resets 09:32 on 3 Feb`). - ## [4.5.21] - 2026-01-31 **Parallel Rotation Resilience release**: atomic sync across multiple machine sessions. ### Fixed - **Unauthorized Recovery**: main request loop now recovers from `401 Unauthorized` by re-syncing with disk and retrying, matching the official Codex CLI's robustness. -- **Ghost Rotations**: `codex-status` tool now persists rotated tokens to disk immediately, preventing drift that previously invalidated other active sessions. - **Token Overwrite Race**: implemented **Timestamp Arbitration** in storage merges; the most recently active session (`lastUsed`) now always wins, preventing stale sessions from corrupting the authoritative machine state. ### Changed @@ -67,14 +48,12 @@ more deterministic account management. ## [4.5.20] - 2026-01-31 -**Reliability & Identity Hardening release**: codex-status stability, memory safety, and identity-based tracking. +**Reliability & Identity Hardening release**: status tracking stability, memory safety, and identity-based tracking. ### Changed -- **codex-status Read-Only**: tool no longer forces token refresh; uses existing valid tokens and falls back to cached snapshots on failure. - **Identity-Based Trackers**: `HealthScoreTracker` and `TokenBucketTracker` now key on `accountId|email|plan` instead of array index for stability across account changes. - **Periodic Cleanup**: both trackers now auto-prune stale entries (24h for health, 1h for tokens) to prevent memory growth. - **Console Logging**: migrated `console.error` calls to `logWarn` to respect debug settings and avoid TUI corruption. -- **Command Templates**: updated slash commands to output results exactly as returned by tools. ### Added - **logCritical()**: new always-enabled logger for critical issues that bypass debug flags. @@ -84,36 +63,19 @@ more deterministic account management. ### Fixed - **Memory Leak**: `RateLimitTracker` now cleans up stale entries periodically (every 60s). -## [4.5.19] - 2026-01-31 - -**Slash Commands release**: register codex tools as TUI slash commands. - -### Added -- **Slash Commands**: `/codex-status`, `/codex-switch-accounts`, `/codex-toggle-account` now available via config hook. - -## [4.5.18] - 2026-01-31 - -**Tool Consolidation release**: finalized renaming of status and account management tools to match the `codex-*` namespace. - -### Changed -- **Tool Consolidation**: renamed `openai-accounts` and `status-codex` to a single authoritative `codex-status` tool. -- **Account Management**: renamed `openai-accounts-switch` to `codex-switch-accounts` and `openai-accounts-toggle` to `codex-toggle-account`. -- **Lazy Refresh**: tool calls now only refresh tokens if they are within the `tokenRefreshSkewMs` window (default 60s), reducing unnecessary network roundtrips. -- **Code Hardening**: implemented SSE stream memory guards (1MB buffer limit) and removed redundant imports and logic in `index.ts`. - ## [4.5.17] - 2026-01-31 **Authoritative Status release**: active fetching from official OpenAI `/wham/usage` endpoints and perfect protocol alignment. ### Added -- **Active Usage Fetching**: tools now actively fetch real-time rate limit data from `https://chatgpt.com/backend-api/wham/usage` (ChatGPT plans) and `https://api.openai.com/api/codex/usage` (API plans). +- **Active Usage Fetching**: status tracking now actively fetches real-time rate limit data from `https://chatgpt.com/backend-api/wham/usage` (ChatGPT plans) and `https://api.openai.com/api/codex/usage` (API plans). - **Protocol Alignment**: refactored `CodexStatusManager` to match the official `codex-rs v0.92.0` data structures and TUI formatting. - **Detailed Reset Dates**: long-term resets (>24h) now display the full date and time (e.g., `resets 18:10 on 5 Feb`). ### Changed - **Inverted Usage Display**: status bars now show **"% left"** instead of "% used", correctly representing remaining quota. - **Standardized Labels**: updated window labels to "5 hour limit:" and "Weekly limit:". -- **Proactive Tool Hydration**: tool calls now force a token refresh and identity repair to ensure the authoritative `/usage` endpoint receives a valid Bearer token. +- **Proactive Hydration**: status fetches now force a token refresh and identity repair to ensure the authoritative `/usage` endpoint receives a valid Bearer token. - **Enhanced UI Alignment**: applied strict padding and "Always Render Both" logic to ensure vertical and horizontal table stability even with missing or "unknown" data. ### Fixed @@ -137,8 +99,8 @@ more deterministic account management. **Global Cache path release**: ensure cross-process visibility. ### Fixed -- **Global Snapshot Path**: corrected `getCachePath` to always use the system configuration directory (`~/.config/opencode/cache`), ensuring that rate limit data captured by the proxy is visible to CLI tools regardless of project scope. -- **Table Alignment**: refactored `openai-accounts` and `status-codex` into a strict ASCII table format to prevent horizontal shifting. +- **Global Snapshot Path**: corrected `getCachePath` to always use the system configuration directory (`~/.config/opencode/cache`), ensuring that rate limit data captured by the proxy is visible across project scopes. +- **Table Alignment**: refactored account/status table output into a strict ASCII format to prevent horizontal shifting. ## [4.5.13] - 2026-01-30 @@ -157,7 +119,7 @@ more deterministic account management. ### Changed - **Async Status Hardening**: refactored `CodexStatusManager` to use non-blocking async I/O (`fs.promises`) and promise-based initialization gates to prevent concurrency races. - **Cross-Process Hydration**: ensured status snapshots are stored globally even when using per-project account storage, allowing all projects to share real-time rate limit visibility. -- **Tool UI Refinement**: refactored `openai-accounts` and `status-codex` output into a strictly aligned ASCII table format for better readability. +- **Status UI Refinement**: refactored account/status output into a strictly aligned ASCII table format for better readability. - **Lost Updates Prevention**: implemented timestamp-based (`updatedAt`) merge arbitration under lock (`proper-lockfile`) to ensure newest state wins across concurrent processes. - **Security Hardening**: primary accounts and snapshots cache files now use restrictive `0600` permissions. @@ -175,7 +137,7 @@ more deterministic account management. - **Persistent Snapshots**: rate limit data is now persisted to `~/.config/opencode/cache/codex-snapshots.json` for cross-process visibility between the proxy and CLI tools. ### Tests -- **Status Fixtures**: added `codex-status-snapshots.json` and `codex-headers.json` for deterministic testing of rate limit parsing and rendering. +- **Status Fixtures**: added snapshot and header fixtures for deterministic testing of rate limit parsing and rendering. ## [4.5.10] - 2026-01-30 diff --git a/README.md b/README.md index 6dd5951..4a37051 100644 --- a/README.md +++ b/README.md @@ -99,18 +99,6 @@ Personality descriptions come from: The filename (case-insensitive) defines the key (e.g., `Idiot.md`), and the file contents are used verbatim. Built-ins: `none`, `default` (uses model runtime defaults), `friendly`, `pragmatic` (fallback if unset). Any other key requires a matching personality file. ---- -## ⌨️ Slash Commands (TUI) -In the OpenCode TUI, you can use these commands to manage your accounts and monitor usage: - -| Command | Description | -|---------|-------------| -| `/codex-status` | Shows current rate limits (5h/Weekly), credits, and account status (percent left). | -| `/codex-switch-accounts ` | Switch the active account by its 1-based index from the status list. | -| `/codex-toggle-account ` | Enable or disable an account by its 1-based index (prevents auto-selection). | -| `/codex-remove-account ` | Remove an account by its 1-based index. | - ---- ## ✅ Features - ChatGPT Plus/Pro OAuth authentication (official flow) - Model presets across GPT‑5.3 Codex / GPT‑5.2 / GPT‑5.2 Codex / GPT‑5.1 families @@ -124,8 +112,8 @@ In the OpenCode TUI, you can use these commands to manage your accounts and moni - Strict account identity matching (`accountId` + `email` + `plan`) - Hybrid account selection strategy (health score + token bucket + LRU bias) - Optional round-robin account rotation (maximum throughput) -- OpenCode TUI toasts + `codex-status` / `codex-switch-accounts` tools -- **Authoritative Codex Status**: Real-time rate limit monitoring (5h/Weekly) with ASCII status bars +- OpenCode TUI toasts for account switching and rate-limit events +- **Authoritative Status Tracking**: Real-time rate limit monitoring (5h/Weekly) with cached snapshots --- ## 🛡️ Safety & Reliability - Hard-stop safety gate for all-accounts rate-limit/auth-failure loops diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 36c8574..d836c19 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -12,6 +12,6 @@ Bugfix release: toast debounce configuration and legacy account removal. **Reliability & Data Safety** - **Toast Spam Fix**: Notifications for rate-limit account switching now respect the 60s debounce timer (`rateLimitToastDebounceMs`) instead of the 2s dedupe window, preventing UI spam during heavy load. -- **Account Removal Safety**: `codex-remove-account` now safely removes accounts even if their tokens were rotated in memory during the same session, preventing "zombie" accounts from persisting in storage. +- **Account Removal Safety**: account removal now safely handles in-memory token rotations during the same session, preventing "zombie" accounts from persisting in storage. **Full Changelog**: https://github.com/iam-brain/opencode-openai-codex-multi-auth/compare/v4.5.24...v4.5.25 diff --git a/config/README.md b/config/README.md index 09f1d52..51ea760 100644 --- a/config/README.md +++ b/config/README.md @@ -6,19 +6,21 @@ This directory contains the official opencode configuration files for the OpenAI **Two configuration files are available based on your OpenCode version:** -| File | OpenCode Version | Description | -|------|------------------|-------------| -| [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | -| [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | +| File | OpenCode Version | Description | +| ------------------------------------------------ | ------------------------- | ------------------------------------------------------------------------------------------------------- | +| [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | +| [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | ### Which one should I use? **If you have OpenCode v1.0.210 or newer** (check with `opencode --version`): + ```bash cp config/opencode-modern.json ~/.config/opencode/opencode.jsonc ``` **If you have OpenCode v1.0.209 or older**: + ```bash cp config/opencode-legacy.json ~/.config/opencode/opencode.jsonc ``` @@ -29,12 +31,13 @@ OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoni **What you get:** -| Config File | Model Families | Reasoning Variants | Total Models | -|------------|----------------|-------------------|--------------| -| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | -| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | +| Config File | Model Families | Reasoning Variants | Total Models | +| ---------------------- | -------------- | ----------------------------------------- | ------------------------------------ | +| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | +| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | Both configs provide: + - ✅ All supported GPT 5.x variants: gpt-5.3-codex, gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini - ✅ Proper reasoning effort settings for each variant (including `xhigh` for Codex Max/5.2) - ✅ Context limits (272k context / 128k output for all Codex families) @@ -52,11 +55,13 @@ Both configs provide: ## Usage 1. **Check your OpenCode version**: + ```bash opencode --version ``` 2. **Copy the appropriate config** based on your version: + ```bash # For v1.0.210+ (recommended): cp config/opencode-modern.json ~/.config/opencode/opencode.jsonc @@ -66,6 +71,7 @@ Both configs provide: ``` 3. **Run opencode**: + ```bash # Modern config (v1.0.210+): opencode run "task" --model=openai/gpt-5.3-codex --variant=medium diff --git a/config/opencode-legacy.json b/config/opencode-legacy.json index d9d3049..4084769 100644 --- a/config/opencode-legacy.json +++ b/config/opencode-legacy.json @@ -1,667 +1,481 @@ { "$schema": "https://opencode.ai/config.json", - "plugin": [ - "opencode-openai-codex-multi-auth" - ], + "plugin": ["opencode-openai-codex-multi-auth"], "provider": { "openai": { "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false }, "models": { "gpt-5.2-none": { - "name": "GPT 5.2 None (Codex)", + "name": "GPT 5.2 None (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "none", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-low": { - "name": "GPT 5.2 Low (Codex)", + "name": "GPT 5.2 Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-medium": { - "name": "GPT 5.2 Medium (Codex)", + "name": "GPT 5.2 Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-high": { - "name": "GPT 5.2 High (Codex)", + "name": "GPT 5.2 High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-xhigh": { - "name": "GPT 5.2 Extra High (Codex)", + "name": "GPT 5.2 Extra High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "xhigh", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.3-codex-low": { - "name": "GPT 5.3 Codex Low (Codex)", + "name": "GPT 5.3 Codex Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.3-codex-medium": { - "name": "GPT 5.3 Codex Medium (Codex)", + "name": "GPT 5.3 Codex Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.3-codex-high": { - "name": "GPT 5.3 Codex High (Codex)", + "name": "GPT 5.3 Codex High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.3-codex-xhigh": { - "name": "GPT 5.3 Codex Extra High (Codex)", + "name": "GPT 5.3 Codex Extra High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "xhigh", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-codex-low": { - "name": "GPT 5.2 Codex Low (Codex)", + "name": "GPT 5.2 Codex Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-codex-medium": { - "name": "GPT 5.2 Codex Medium (Codex)", + "name": "GPT 5.2 Codex Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-codex-high": { - "name": "GPT 5.2 Codex High (Codex)", + "name": "GPT 5.2 Codex High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.2-codex-xhigh": { - "name": "GPT 5.2 Codex Extra High (Codex)", + "name": "GPT 5.2 Codex Extra High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "xhigh", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-max-low": { - "name": "GPT 5.1 Codex Max Low (Codex)", + "name": "GPT 5.1 Codex Max Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-max-medium": { - "name": "GPT 5.1 Codex Max Medium (Codex)", + "name": "GPT 5.1 Codex Max Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-max-high": { - "name": "GPT 5.1 Codex Max High (Codex)", + "name": "GPT 5.1 Codex Max High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-max-xhigh": { - "name": "GPT 5.1 Codex Max Extra High (Codex)", + "name": "GPT 5.1 Codex Max Extra High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "xhigh", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-low": { - "name": "GPT 5.1 Codex Low (Codex)", + "name": "GPT 5.1 Codex Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-medium": { - "name": "GPT 5.1 Codex Medium (Codex)", + "name": "GPT 5.1 Codex Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-high": { - "name": "GPT 5.1 Codex High (Codex)", + "name": "GPT 5.1 Codex High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-mini-medium": { - "name": "GPT 5.1 Codex Mini Medium (Codex)", + "name": "GPT 5.1 Codex Mini Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-codex-mini-high": { - "name": "GPT 5.1 Codex Mini High (Codex)", + "name": "GPT 5.1 Codex Mini High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-none": { - "name": "GPT 5.1 None (Codex)", + "name": "GPT 5.1 None (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "none", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-low": { - "name": "GPT 5.1 Low (Codex)", + "name": "GPT 5.1 Low (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "low", "reasoningSummary": "auto", "textVerbosity": "low", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-medium": { - "name": "GPT 5.1 Medium (Codex)", + "name": "GPT 5.1 Medium (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } }, "gpt-5.1-high": { - "name": "GPT 5.1 High (Codex)", + "name": "GPT 5.1 High (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "options": { "reasoningEffort": "high", "reasoningSummary": "detailed", "textVerbosity": "high", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } } diff --git a/config/opencode-modern.json b/config/opencode-modern.json index 06c397b..613601a 100644 --- a/config/opencode-modern.json +++ b/config/opencode-modern.json @@ -1,41 +1,27 @@ { "$schema": "https://opencode.ai/config.json", - "plugin": [ - "opencode-openai-codex-multi-auth" - ], + "plugin": ["opencode-openai-codex-multi-auth@latest"], "provider": { "openai": { "options": { "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false }, "models": { - "gpt-5.2": { - "name": "GPT 5.2 (Codex)", + "gpt-5.3-codex": { + "name": "GPT 5.3 Codex (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { - "none": { - "reasoningEffort": "none", - "reasoningSummary": "auto", - "textVerbosity": "medium" - }, "low": { "reasoningEffort": "low", "reasoningSummary": "auto", @@ -58,20 +44,15 @@ } } }, - "gpt-5.3-codex": { - "name": "GPT 5.3 Codex (Codex)", + "gpt-5.2-codex": { + "name": "GPT 5.2 Codex (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { "low": { @@ -96,22 +77,22 @@ } } }, - "gpt-5.2-codex": { - "name": "GPT 5.2 Codex (Codex)", + "gpt-5.2": { + "name": "GPT 5.2 (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, "low": { "reasoningEffort": "low", "reasoningSummary": "auto", @@ -135,19 +116,14 @@ } }, "gpt-5.1-codex-max": { - "name": "GPT 5.1 Codex Max (Codex)", + "name": "GPT 5.1 Codex Max (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { "low": { @@ -173,19 +149,14 @@ } }, "gpt-5.1-codex": { - "name": "GPT 5.1 Codex (Codex)", + "name": "GPT 5.1 Codex (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { "low": { @@ -206,19 +177,14 @@ } }, "gpt-5.1-codex-mini": { - "name": "GPT 5.1 Codex Mini (Codex)", + "name": "GPT 5.1 Codex Mini (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { "medium": { @@ -234,19 +200,14 @@ } }, "gpt-5.1": { - "name": "GPT 5.1 (Codex)", + "name": "GPT 5.1 (Codex)", "limit": { "context": 272000, "output": 128000 }, "modalities": { - "input": [ - "text", - "image" - ], - "output": [ - "text" - ] + "input": ["text", "image"], + "output": ["text"] }, "variants": { "none": { diff --git a/docs/audit/agent-findings.md b/docs/audit/agent-findings.md index 9b042ae..406e15b 100644 --- a/docs/audit/agent-findings.md +++ b/docs/audit/agent-findings.md @@ -12,14 +12,14 @@ Chronological record of all agent audit findings (spec + quality + general audit - Per-request disk/CPU: - Reads instruction cache/meta each request (`lib/prompts/codex.ts:148`). - Reads models cache + static defaults each request (`lib/prompts/codex-models.ts:93`, `lib/prompts/codex-models.ts:227`). - - Codex status snapshots write per response + per SSE token_count (`lib/codex-status.ts:156`, `lib/codex-status.ts:330`). + - Status snapshots write per response + per SSE token_count (`lib/codex-status.ts:156`, `lib/codex-status.ts:330`). - Existing caching: - Instructions: ETag + 15-min TTL, on-disk cache (`lib/prompts/codex.ts`). - Models: on-disk cache exists but server fetch happens before cache (`lib/prompts/codex-models.ts`). - Suggested caching points: - In-memory cache for instructions/model catalog; ETag for `/codex/models`. - Memoize static template defaults to avoid repeated disk reads. - - Debounce codex-status disk writes. + - Debounce status snapshot disk writes. ## 2026-02-06 – General Audit (second pass) diff --git a/docs/configuration.md b/docs/configuration.md index b8d9e63..43a7779 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -18,7 +18,7 @@ Complete reference for configuring the OpenCode OpenAI Codex Auth Plugin. "store": false }, "models": { - "gpt-5.3-codex-low": { + "gpt-5.3-codex": { "name": "GPT 5.3 Codex Low (Codex)", "limit": { "context": 272000, @@ -47,12 +47,14 @@ Complete reference for configuring the OpenCode OpenAI Codex Auth Plugin. Controls computational effort for reasoning. **GPT-5.3-Codex Values:** + - `low` - Fastest for code - `medium` - Balanced (default) - `high` - Maximum code quality - `xhigh` - Extra depth for long-horizon tasks **GPT-5.2 Values** (per OpenAI API docs and Codex CLI `ReasoningEffort` enum): + - `none` - No dedicated reasoning phase (disables reasoning) - `low` - Light reasoning - `medium` - Balanced (default) @@ -60,28 +62,33 @@ Controls computational effort for reasoning. - `xhigh` - Extra depth for long-horizon tasks **GPT-5.2-Codex Values:** + - `low` - Fastest for code - `medium` - Balanced (default) - `high` - Maximum code quality - `xhigh` - Extra depth for long-horizon tasks **GPT-5.1 Values** (per OpenAI API docs and Codex CLI `ReasoningEffort` enum): + - `none` - No dedicated reasoning phase (disables reasoning) - `low` - Light reasoning - `medium` - Balanced (default) - `high` - Deep reasoning **GPT-5.1-Codex / GPT-5.1-Codex-Max Values:** + - `low` - Fastest for code - `medium` - Balanced (default) - `high` - Maximum code quality - `xhigh` - Extra depth (Codex Max only) **GPT-5.1-Codex-Mini Values:** + - `medium` - Balanced (default) - `high` - Maximum code quality **Notes**: + - `none` is supported for GPT-5.2 and GPT-5.1 (general purpose) per OpenAI API documentation - `none` is NOT supported for Codex variants (including GPT-5.2 Codex) - it auto-converts to `low` for Codex/Codex Max or `medium` for Codex Mini - `minimal` auto-converts to `low` for Codex models @@ -89,6 +96,7 @@ Controls computational effort for reasoning. - Codex Mini only supports `medium` or `high`; lower settings clamp to `medium` **Example:** + ```json { "options": { @@ -102,6 +110,7 @@ Controls computational effort for reasoning. Controls reasoning summary verbosity. **Values:** + - `auto` - Automatically adapts (default) - `concise` - Short summaries - `detailed` - Verbose summaries @@ -110,6 +119,7 @@ Controls reasoning summary verbosity. `on` is accepted in legacy configs but normalized to `auto`. **Example:** + ```json { "options": { @@ -123,14 +133,17 @@ Controls reasoning summary verbosity. Controls output length. **GPT-5 Values:** + - `low` - Concise - `medium` - Balanced (default) - `high` - Verbose **GPT-5.2-Codex / GPT-5.1-Codex / Codex Max:** + - `medium` or `high` (Codex Max defaults to `medium`) **Example:** + ```json { "options": { @@ -148,6 +161,7 @@ Array of additional response fields to include. **Why needed**: Enables multi-turn conversations with `store: false` (stateless mode) **Example:** + ```json { "options": { @@ -163,6 +177,7 @@ Controls server-side conversation persistence. **⚠️ Required**: `false` (for AI SDK 2.0.50+ compatibility) **Values:** + - `false` - Stateless mode (required for Codex API) - `true` - Server-side storage (not supported by Codex API) @@ -170,6 +185,7 @@ Controls server-side conversation persistence. AI SDK 2.0.50+ automatically uses `item_reference` items when `store: true`. The Codex API requires stateless operation (`store: false`), where references cannot be resolved. **Example:** + ```json { "options": { @@ -264,6 +280,7 @@ Different settings for different models: - **`id` field**: DEPRECATED - not used by OpenAI provider **Example Usage:** + ```bash # Use the config key in CLI opencode run "task" --model=openai/my-custom-id @@ -286,21 +303,21 @@ Create named variants for common tasks: ```json { "models": { - "codex-quick": { + "codex-5.3-codex-quick": { "name": "⚡ Quick Code", "options": { "reasoningEffort": "low", "store": false } }, - "codex-balanced": { + "codex-5.3-codex-balanced": { "name": "⚖️ Balanced Code", "options": { "reasoningEffort": "medium", "store": false } }, - "codex-quality": { + "codex-5.3-codex-max-quality": { "name": "🎯 Max Quality", "options": { "reasoningEffort": "high", @@ -320,11 +337,11 @@ Different agents use different models: { "agent": { "commit": { - "model": "openai/gpt-5.3-codex-low", + "model": "openai/gpt-5.3-codex", "prompt": "Generate concise commit messages" }, "review": { - "model": "openai/gpt-5.3-codex-high", + "model": "openai/gpt-5.3-codex", "prompt": "Thorough code review" } } @@ -336,6 +353,7 @@ Different agents use different models: Global config has defaults, project overrides for specific work: **~/.config/opencode/opencode.jsonc** (global, preferred): + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -351,6 +369,7 @@ Global config has defaults, project overrides for specific work: ``` **my-project/.opencode.json** (project): + ```json { "provider": { @@ -398,6 +417,7 @@ These settings are merged on top of the OpenCode config at request time, but `op `thinking_summaries` toggles reasoning summaries globally. When omitted, summaries default to on for models that support them and off for models that report `reasoning_summary_format: "none"`. Personality descriptions come from: + - `.opencode/Personalities/*.md` (project-local) - `~/.config/opencode/Personalities/*.md` (global) @@ -425,60 +445,63 @@ Add `$schema` for editor autocompletion: } ``` -| Field | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `accountSelectionStrategy` | `string` | `"sticky"` | Strategy for selecting accounts (`sticky`, `round-robin`, `hybrid`). | -| `pidOffsetEnabled` | `boolean` | `true` | Enable PID-based offset for parallel agent rotation. | -| `perProjectAccounts` | `boolean` | `false` | If `true`, the plugin will look for and use account storage in `.opencode/openai-codex-accounts.json` relative to the current project. | -| `quietMode` | `boolean` | `false` | Disable TUI toasts for background operations (e.g., token refreshes). | -| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce account/rate-limit toasts. | -| `tokenRefreshSkewMs` | `number` | `60000` | Refresh OAuth tokens this early (ms) before expiry. | -| `proactiveTokenRefresh` | `boolean` | `false` | Enable background token refresh queue (when available). | -| `authDebug` | `boolean` | `false` | Enable debug logging (env aliases supported). | +| Field | Type | Default | Description | +| :------------------------- | :-------- | :--------- | :------------------------------------------------------------------------------------------------------------------------------------- | +| `accountSelectionStrategy` | `string` | `"sticky"` | Strategy for selecting accounts (`sticky`, `round-robin`, `hybrid`). | +| `pidOffsetEnabled` | `boolean` | `true` | Enable PID-based offset for parallel agent rotation. | +| `perProjectAccounts` | `boolean` | `false` | If `true`, the plugin will look for and use account storage in `.opencode/openai-codex-accounts.json` relative to the current project. | +| `quietMode` | `boolean` | `false` | Disable TUI toasts for background operations (e.g., token refreshes). | +| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce account/rate-limit toasts. | +| `tokenRefreshSkewMs` | `number` | `60000` | Refresh OAuth tokens this early (ms) before expiry. | +| `proactiveTokenRefresh` | `boolean` | `false` | Enable background token refresh queue (when available). | +| `authDebug` | `boolean` | `false` | Enable debug logging (env aliases supported). | #### Hard-Stop Settings -| Field | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `hardStopMaxWaitMs` | `number` | `10000` | Maximum wait before returning a hard-stop error when no accounts are available. | -| `hardStopOnUnknownModel` | `boolean` | `true` | Return a hard-stop error for models not in the server catalog. | -| `hardStopOnAllAuthFailed` | `boolean` | `true` | Return a hard-stop error when all accounts are in auth-failure cooldown. | -| `hardStopMaxConsecutiveFailures` | `number` | `5` | Maximum consecutive failures before returning a hard-stop error. | +| Field | Type | Default | Description | +| :------------------------------- | :-------- | :------ | :------------------------------------------------------------------------------ | +| `hardStopMaxWaitMs` | `number` | `10000` | Maximum wait before returning a hard-stop error when no accounts are available. | +| `hardStopOnUnknownModel` | `boolean` | `true` | Return a hard-stop error for models not in the server catalog. | +| `hardStopOnAllAuthFailed` | `boolean` | `true` | Return a hard-stop error when all accounts are in auth-failure cooldown. | +| `hardStopMaxConsecutiveFailures` | `number` | `5` | Maximum consecutive failures before returning a hard-stop error. | Default hard-stop wait is 10 seconds; increase `hardStopMaxWaitMs` if you prefer longer waits. #### Scheduling & Retry Settings -| Field | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | -| `maxCacheFirstWaitSeconds` | `number` | `60` | Max seconds to wait in cache-first mode before switching. | -| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on the first rate-limit response. | -| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global retry loop when all accounts are rate-limited. | -| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for all-accounts retry (0 disables the limit). | -| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles when all accounts are rate-limited. | +| Field | Type | Default | Description | +| :---------------------------- | :-------- | :-------------- | :------------------------------------------------------------------- | +| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | +| `maxCacheFirstWaitSeconds` | `number` | `60` | Max seconds to wait in cache-first mode before switching. | +| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on the first rate-limit response. | +| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global retry loop when all accounts are rate-limited. | +| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for all-accounts retry (0 disables the limit). | +| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles when all accounts are rate-limited. | #### Rate-Limit Tuning -| Field | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events within this window. | -| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after this idle time. | -| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers are missing. | -| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff for rate-limit retries. | -| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | +| Field | Type | Default | Description | +| :----------------------- | :------- | :------- | :------------------------------------------------ | +| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events within this window. | +| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after this idle time. | +| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers are missing. | +| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff for rate-limit retries. | +| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | #### Per-Project Storage **What it does:** + - `true`: Looks for `.opencode/openai-codex-accounts.json` in the current working directory (or parent directories). If found, it uses that file for account storage instead of the global file. - `false` (default): Always uses the global accounts file (`~/.config/opencode/openai-codex-accounts.json`). **Use case:** + - Isolating accounts for specific projects (e.g., client projects with dedicated credentials). - Keeping credentials inside a project directory (ensure `.opencode/` is gitignored!). **Behavior:** + - If `perProjectAccounts: true` AND a project-local file exists: Uses project storage. - If `perProjectAccounts: true` AND NO project-local file exists: Falls back to global storage. - If `perProjectAccounts: false`: Always uses global storage. @@ -492,42 +515,42 @@ For a detailed guide, see [docs/multi-account.md](multi-account.md). #### Strategy Guide -| Your Setup | Recommended Setting | Why | -|------------|---------------------|-----| -| 1 account | `accountSelectionStrategy: "sticky"` | No rotation needed; best caching | -| 2-4 accounts | `sticky` + `pidOffsetEnabled: true` | Sticky preserves caching, PID offset spreads parallel agents | -| 5+ accounts / best overall | `accountSelectionStrategy: "hybrid"` | Health score + token bucket + LRU bias | -| 5+ accounts / max throughput | `accountSelectionStrategy: "round-robin"` | Maximum distribution (less caching) | +| Your Setup | Recommended Setting | Why | +| ---------------------------- | ----------------------------------------- | ------------------------------------------------------------ | +| 1 account | `accountSelectionStrategy: "sticky"` | No rotation needed; best caching | +| 2-4 accounts | `sticky` + `pidOffsetEnabled: true` | Sticky preserves caching, PID offset spreads parallel agents | +| 5+ accounts / best overall | `accountSelectionStrategy: "hybrid"` | Health score + token bucket + LRU bias | +| 5+ accounts / max throughput | `accountSelectionStrategy: "round-robin"` | Maximum distribution (less caching) | #### Environment Variable Overrides All options can be overridden with env vars: -| Field | Env Var | Notes | -| :--- | :--- | :--- | -| `accountSelectionStrategy` | `CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY` | `sticky`, `round-robin`, `hybrid` | -| `pidOffsetEnabled` | `CODEX_AUTH_PID_OFFSET_ENABLED` | Boolean | -| `perProjectAccounts` | `CODEX_AUTH_PER_PROJECT_ACCOUNTS` | Boolean | -| `quietMode` | `CODEX_AUTH_QUIET` | Boolean | -| `rateLimitToastDebounceMs` | `CODEX_AUTH_RATE_LIMIT_TOAST_DEBOUNCE_MS` | Milliseconds | -| `tokenRefreshSkewMs` | `CODEX_AUTH_TOKEN_REFRESH_SKEW_MS` | Milliseconds | -| `proactiveTokenRefresh` | `CODEX_AUTH_PROACTIVE_TOKEN_REFRESH` | Boolean | -| `authDebug` | `CODEX_AUTH_DEBUG` | Aliases supported (see below) | -| `schedulingMode` | `CODEX_AUTH_SCHEDULING_MODE` | `cache_first`, `balance`, `performance_first` | -| `maxCacheFirstWaitSeconds` | `CODEX_AUTH_MAX_CACHE_FIRST_WAIT_SECONDS` | Seconds | -| `switchOnFirstRateLimit` | `CODEX_AUTH_SWITCH_ON_FIRST_RATE_LIMIT` | Boolean | -| `rateLimitDedupWindowMs` | `CODEX_AUTH_RATE_LIMIT_DEDUP_WINDOW_MS` | Milliseconds | -| `rateLimitStateResetMs` | `CODEX_AUTH_RATE_LIMIT_STATE_RESET_MS` | Milliseconds | -| `defaultRetryAfterMs` | `CODEX_AUTH_DEFAULT_RETRY_AFTER_MS` | Milliseconds | -| `maxBackoffMs` | `CODEX_AUTH_MAX_BACKOFF_MS` | Milliseconds | -| `requestJitterMaxMs` | `CODEX_AUTH_REQUEST_JITTER_MAX_MS` | Milliseconds | -| `retryAllAccountsRateLimited` | `CODEX_AUTH_RETRY_ALL_RATE_LIMITED` | Boolean | -| `retryAllAccountsMaxWaitMs` | `CODEX_AUTH_RETRY_ALL_MAX_WAIT_MS` | Milliseconds | -| `retryAllAccountsMaxRetries` | `CODEX_AUTH_RETRY_ALL_MAX_RETRIES` | Number | -| `hardStopMaxWaitMs` | `CODEX_AUTH_HARD_STOP_MAX_WAIT_MS` | Milliseconds | -| `hardStopOnUnknownModel` | `CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL` | Boolean | -| `hardStopOnAllAuthFailed` | `CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED` | Boolean | -| `hardStopMaxConsecutiveFailures` | `CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES` | Number | +| Field | Env Var | Notes | +| :------------------------------- | :---------------------------------------------- | :-------------------------------------------- | +| `accountSelectionStrategy` | `CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY` | `sticky`, `round-robin`, `hybrid` | +| `pidOffsetEnabled` | `CODEX_AUTH_PID_OFFSET_ENABLED` | Boolean | +| `perProjectAccounts` | `CODEX_AUTH_PER_PROJECT_ACCOUNTS` | Boolean | +| `quietMode` | `CODEX_AUTH_QUIET` | Boolean | +| `rateLimitToastDebounceMs` | `CODEX_AUTH_RATE_LIMIT_TOAST_DEBOUNCE_MS` | Milliseconds | +| `tokenRefreshSkewMs` | `CODEX_AUTH_TOKEN_REFRESH_SKEW_MS` | Milliseconds | +| `proactiveTokenRefresh` | `CODEX_AUTH_PROACTIVE_TOKEN_REFRESH` | Boolean | +| `authDebug` | `CODEX_AUTH_DEBUG` | Aliases supported (see below) | +| `schedulingMode` | `CODEX_AUTH_SCHEDULING_MODE` | `cache_first`, `balance`, `performance_first` | +| `maxCacheFirstWaitSeconds` | `CODEX_AUTH_MAX_CACHE_FIRST_WAIT_SECONDS` | Seconds | +| `switchOnFirstRateLimit` | `CODEX_AUTH_SWITCH_ON_FIRST_RATE_LIMIT` | Boolean | +| `rateLimitDedupWindowMs` | `CODEX_AUTH_RATE_LIMIT_DEDUP_WINDOW_MS` | Milliseconds | +| `rateLimitStateResetMs` | `CODEX_AUTH_RATE_LIMIT_STATE_RESET_MS` | Milliseconds | +| `defaultRetryAfterMs` | `CODEX_AUTH_DEFAULT_RETRY_AFTER_MS` | Milliseconds | +| `maxBackoffMs` | `CODEX_AUTH_MAX_BACKOFF_MS` | Milliseconds | +| `requestJitterMaxMs` | `CODEX_AUTH_REQUEST_JITTER_MAX_MS` | Milliseconds | +| `retryAllAccountsRateLimited` | `CODEX_AUTH_RETRY_ALL_RATE_LIMITED` | Boolean | +| `retryAllAccountsMaxWaitMs` | `CODEX_AUTH_RETRY_ALL_MAX_WAIT_MS` | Milliseconds | +| `retryAllAccountsMaxRetries` | `CODEX_AUTH_RETRY_ALL_MAX_RETRIES` | Number | +| `hardStopMaxWaitMs` | `CODEX_AUTH_HARD_STOP_MAX_WAIT_MS` | Milliseconds | +| `hardStopOnUnknownModel` | `CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL` | Boolean | +| `hardStopOnAllAuthFailed` | `CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED` | Boolean | +| `hardStopMaxConsecutiveFailures` | `CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES` | Number | ```bash CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY=round-robin @@ -557,6 +580,7 @@ CODEX_AUTH_NO_BROWSER=1 ``` Deprecated environment aliases (still supported): + - `OPENCODE_OPENAI_AUTH_DEBUG`, `DEBUG_CODEX_PLUGIN` → `CODEX_AUTH_DEBUG` - `OPENCODE_NO_BROWSER`, `OPENCODE_HEADLESS` → `CODEX_AUTH_NO_BROWSER` @@ -594,12 +618,14 @@ Note: legacy `codex-models-cache.json` files are ignored after the per-account c ## Configuration Files **Provided Examples:** + - [config/opencode-modern.json](../config/opencode-modern.json) - Variants-based config for OpenCode v1.0.210+ - [config/opencode-legacy.json](../config/opencode-legacy.json) - Legacy full list for v1.0.209 and below > **⚠️ REQUIRED:** You MUST use the config that matches your OpenCode version (`opencode-modern.json` or `opencode-legacy.json`). Minimal configs are NOT supported for GPT 5 models and will fail unpredictably. OpenCode's auto-compaction and usage widgets also require the full config's per-model `limit` metadata. **Your Configs:** + - `~/.config/opencode/opencode.jsonc` - Global config (preferred) - `~/.config/opencode/opencode.json` - Global config (fallback) - `/.opencode.json` - Project-specific config @@ -624,6 +650,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/your-model-name ``` Look for: + ``` [openai-codex-plugin] Model config lookup: "your-model-name" → normalized to "gpt-5.3-codex" for API { hasModelSpecificConfig: true, @@ -653,6 +680,7 @@ Old verbose names still work: **⚠️ IMPORTANT:** Old configs with GPT 5.0 models are deprecated. You MUST migrate to the new GPT 5.x configs (`opencode-modern.json` or `opencode-legacy.json`). **Old config (deprecated):** + ```json { "models": { @@ -690,73 +718,13 @@ Use the official config file (`opencode-modern.json` for v1.0.210+, `opencode-le ``` **Benefits:** + - GPT 5.2/5.1 support (5.0 is deprecated) - Proper limit metadata for OpenCode features - Verified configuration that works reliably --- -## Common Patterns - -### Pattern: Task-Based Presets - -```json -{ - "models": { - "quick-chat": { - "name": "Quick Chat", - "options": { - "reasoningEffort": "minimal", - "textVerbosity": "low", - "store": false - } - }, - "code-gen": { - "name": "Code Generation", - "options": { - "reasoningEffort": "medium", - "store": false - } - }, - "debug-help": { - "name": "Debug Analysis", - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "store": false - } - } - } -} -``` - -### Pattern: Cost vs Quality - -```json -{ - "models": { - "economy": { - "name": "Economy Mode", - "options": { - "reasoningEffort": "low", - "textVerbosity": "low", - "store": false - } - }, - "premium": { - "name": "Premium Mode", - "options": { - "reasoningEffort": "high", - "textVerbosity": "high", - "store": false - } - } - } -} -``` - ---- - ## Troubleshooting Config ### Model Not Found @@ -766,9 +734,11 @@ Use the official config file (`opencode-modern.json` for v1.0.210+, `opencode-le **Cause**: Config key doesn't match model name in command **Fix**: Use exact config key: + ```json { "models": { "my-model": { ... } } } ``` + ```bash opencode run "test" --model=openai/my-model # Must match exactly ``` @@ -788,14 +758,16 @@ Look for `hasModelSpecificConfig: true` in debug output. **Cause**: Config key in `opencode.json` doesn't match the model name used in CLI **Example Problem:** + ```json { "models": { "gpt-5.3-codex": { "options": { ... } } } } ``` + ```bash --model=openai/gpt-5.3-codex-low # Plugin looks for "gpt-5.3-codex-low" in config ``` -**Fix**: Use exact name you specify in CLI as config key (normalization for API happens *after* config lookup). +**Fix**: Use exact name you specify in CLI as config key (normalization for API happens _after_ config lookup). > **⚠️ Best Practice:** Use the official `opencode-modern.json` or `opencode-legacy.json` configuration instead of creating custom configs. This ensures proper model normalization and compatibility with GPT 5 models. diff --git a/docs/development/ARCHITECTURE.md b/docs/development/ARCHITECTURE.md index b711854..e82ef72 100644 --- a/docs/development/ARCHITECTURE.md +++ b/docs/development/ARCHITECTURE.md @@ -3,6 +3,7 @@ This document explains the technical design decisions, architecture, and implementation details of the OpenAI Codex OAuth plugin for OpenCode. ## Table of Contents + - [Architecture Overview](#architecture-overview) - [Stateless vs Stateful Mode](#stateless-vs-stateful-mode) - [Message ID Handling](#message-id-handling) @@ -11,7 +12,6 @@ This document explains the technical design decisions, architecture, and impleme - [Comparison with Codex CLI](#comparison-with-codex-cli) - [Design Rationale](#design-rationale) - [Multi-Process State Management](#multi-process-state-management) -- [Codex Status Tool Implementation](#codex-status-tool-implementation) --- @@ -27,9 +27,9 @@ This document explains the technical design decisions, architecture, and impleme ▼ ┌──────────────────────────────┐ │ OpenCode Provider System │ -│ - Loads plugin │ -│ - Calls plugin.auth.loader() │ -│ - Passes provider config │ +│ - Loads plugin │ +│ - Calls plugin.auth.loader()│ +│ - Passes provider config │ └──────┬───────────────────────┘ │ │ Custom fetch() @@ -64,9 +64,10 @@ This document explains the technical design decisions, architecture, and impleme The plugin uses **`store: false`** (stateless mode) because: 1. **ChatGPT Backend Requirement** (confirmed via testing): + ```json // Attempt with store:true → 400 Bad Request - {"detail":"Store must be set to false"} + { "detail": "Store must be set to false" } ``` 2. **Codex CLI Behavior** (`tmp/codex/codex-rs/core/src/client.rs:215-232`): @@ -77,6 +78,7 @@ The plugin uses **`store: false`** (stateless mode) because: ``` **Key Points**: + 1. ✅ **ChatGPT backend REQUIRES store:false** (not optional) 2. ✅ **Codex CLI uses store:false for ChatGPT** 3. ✅ **Azure requires store:true** (different endpoint, not supported by this plugin) @@ -106,6 +108,7 @@ input: [ ``` **Context is maintained through**: + - ✅ Full message history (LLM sees all previous messages) - ✅ Full tool call history (LLM sees what it did) - ✅ `reasoning.encrypted_content` (preserves reasoning between turns) @@ -114,14 +117,14 @@ input: [ ### Store Comparison -| Aspect | store:false (This Plugin) | store:true (Azure Only) | -|--------|---------------------------|-------------------------| -| **ChatGPT Support** | ✅ Required | ❌ Rejected by API | -| **Message History** | ✅ Sent in each request (no IDs) | Stored on server | -| **Message IDs** | ❌ Must strip all | ✅ Required | -| **AI SDK Compat** | ❌ Must filter `item_reference` | ✅ Works natively | -| **Context** | Full history + encrypted reasoning | Server-stored conversation | -| **Codex CLI Parity** | ✅ Perfect match | ❌ Different mode | +| Aspect | store:false (This Plugin) | store:true (Azure Only) | +| -------------------- | ---------------------------------- | -------------------------- | +| **ChatGPT Support** | ✅ Required | ❌ Rejected by API | +| **Message History** | ✅ Sent in each request (no IDs) | Stored on server | +| **Message IDs** | ❌ Must strip all | ✅ Required | +| **AI SDK Compat** | ❌ Must filter `item_reference` | ✅ Works natively | +| **Context** | Full history + encrypted reasoning | Server-stored conversation | +| **Codex CLI Parity** | ✅ Perfect match | ❌ Different mode | **Decision**: Use **`store:false`** (only option for ChatGPT backend). @@ -132,6 +135,7 @@ input: [ ### The Problem **OpenCode/AI SDK sends two incompatible constructs**: + ```typescript // Multi-turn request from OpenCode const body = { @@ -145,15 +149,18 @@ const body = { ``` **Two issues**: + 1. `item_reference` - AI SDK construct for server state lookup (not in Codex API spec) 2. Message IDs - Cause "item not found" with `store: false` **ChatGPT Backend Requirement** (confirmed via testing): + ```json -{"detail":"Store must be set to false"} +{ "detail": "Store must be set to false" } ``` **Errors that occurred**: + ``` ❌ "Item with id 'msg_abc' not found. Items are not persisted when `store` is set to false." ❌ "Missing required parameter: 'input[3].id'" (when item_reference has no ID) @@ -162,15 +169,16 @@ const body = { ### The Solution **Filter AI SDK Constructs + Strip IDs** (`lib/request/request-transformer.ts:114-135`): + ```typescript export function filterInput(input: InputItem[]): InputItem[] { return input .filter((item) => { // Remove AI SDK constructs not supported by Codex API if (item.type === "item_reference") { - return false; // AI SDK only - references server state + return false; // AI SDK only - references server state } - return true; // Keep all other items + return true; // Keep all other items }) .map((item) => { // Strip IDs from all items (stateless mode) @@ -184,6 +192,7 @@ export function filterInput(input: InputItem[]): InputItem[] { ``` **Why this approach?** + 1. ✅ **Filter `item_reference`** - Not in Codex API, AI SDK-only construct 2. ✅ **Keep all messages** - LLM needs full conversation history for context 3. ✅ **Strip ALL IDs** - Matches Codex CLI stateless behavior @@ -195,13 +204,21 @@ The plugin logs ID filtering for debugging: ```typescript // Before filtering -console.log(`[openai-codex-plugin] Filtering ${originalIds.length} message IDs from input:`, originalIds); +console.log( + `[openai-codex-plugin] Filtering ${originalIds.length} message IDs from input:`, + originalIds, +); // After filtering -console.log(`[openai-codex-plugin] Successfully removed all ${originalIds.length} message IDs`); +console.log( + `[openai-codex-plugin] Successfully removed all ${originalIds.length} message IDs`, +); // Or warning if IDs remain -console.warn(`[openai-codex-plugin] WARNING: ${remainingIds.length} IDs still present after filtering:`, remainingIds); +console.warn( + `[openai-codex-plugin] WARNING: ${remainingIds.length} IDs still present after filtering:`, + remainingIds, +); ``` **Source**: `lib/request/request-transformer.ts:287-301` @@ -221,6 +238,7 @@ body.include = modelConfig.include || ["reasoning.encrypted_content"]; ``` **How it works**: + 1. **Turn 1**: Model generates reasoning, encrypted content returned 2. **Client**: Stores encrypted content locally 3. **Turn 2**: Client sends encrypted content back in request @@ -228,6 +246,7 @@ body.include = modelConfig.include || ["reasoning.encrypted_content"]; 5. **Model**: Has full context without server-side storage **Flow Diagram**: + ``` Turn 1: Client → [Request without IDs] → Server @@ -241,6 +260,7 @@ Client → [Request with encrypted content, no IDs] → Server ``` **Codex CLI equivalent** (`tmp/codex/codex-rs/core/src/client.rs:190-194`): + ```rust let include: Vec = if reasoning.is_some() { vec!["reasoning.encrypted_content".to_string()] @@ -327,26 +347,28 @@ let include: Vec = if reasoning.is_some() { ### What We Match -| Feature | Codex CLI | This Plugin | Match? | -|---------|-----------|-------------|--------| -| **OAuth Flow** | ✅ PKCE + ChatGPT login | ✅ Same | ✅ | -| **store Parameter** | `false` (ChatGPT) | `false` | ✅ | -| **Message IDs** | Stripped in stateless | Stripped | ✅ | -| **reasoning.encrypted_content** | ✅ Included | ✅ Included | ✅ | -| **Model Normalization** | Canonical Codex slugs | Canonical Codex slugs | ✅ | -| **Reasoning Effort** | medium (default) | medium (default) | ✅ | -| **Text Verbosity** | Model/config dependent | Model/config dependent | ✅ | +| Feature | Codex CLI | This Plugin | Match? | +| ------------------------------- | ------------------------------------------------------------------ | --------------------------------------------- | ------ | +| **OAuth Flow** | ✅ PKCE + ChatGPT login | ✅ Same | ✅ | +| **store Parameter** | `false` (ChatGPT) | `false` | ✅ | +| **Message IDs** | Stripped in stateless | Stripped | ✅ | +| **reasoning.encrypted_content** | ✅ Included | ✅ Included | ✅ | +| **Model Normalization** | Canonical Codex slugs | Canonical Codex slugs | ✅ | +| **Reasoning Effort** | medium (default) | medium (default) | ✅ | +| **Text Verbosity** | Model/config dependent | Model/config dependent | ✅ | +| **Personality** | Model/config dependent - Pragmatic or Friendly (Default Pragmatic) | Model/config dependent - Default is Pragmatic | ✅ | ### What We Add -| Feature | Codex CLI | This Plugin | Why? | -|---------|-----------|-------------|------| -| **OpenCode Runtime Metadata Preservation** | Native runtime | ✅ Preserve env/AGENTS developer messages | Keep harness context intact without duplicating tool contracts | -| **Authoritative Model Catalog** | Native model manager | ✅ `/codex/models` → per-account cache (server-derived), fail closed if unavailable | Strict allowlist + runtime defaults | -| **Orphan Tool Output Handling** | ✅ Drop orphans | ✅ Convert to messages | Preserve context + avoid 400s | -| **Usage-limit messaging** | CLI prints status | ✅ Friendly error summary | Surface 5h/weekly windows in OpenCode | -| **Per-Model Options** | CLI flags | ✅ Config file | Better UX in OpenCode | -| **Custom Model Names** | No | ✅ Display names | UI convenience | +| Feature | Codex CLI | This Plugin | Why? | +| ------------------------------------------ | -------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------- | +| **OpenCode Runtime Metadata Preservation** | Native runtime | ✅ Preserve env/AGENTS developer messages | Keep harness context intact without duplicating tool contracts | +| **Authoritative Model Catalog** | Native model manager | ✅ `/codex/models` → per-account cache (server-derived), fail closed if unavailable | Strict allowlist + runtime defaults | +| **Orphan Tool Output Handling** | ✅ Drop orphans | ✅ Convert to messages | Preserve context + avoid 400s | +| **Usage-limit messaging** | CLI prints status | ✅ Friendly error summary | Surface 5h/weekly windows in OpenCode | +| **Per-Model Options** | CLI flags | ✅ Config file | Better UX in OpenCode | +| **Custom Model Names** | No | ✅ Display names | UI convenience | +| **Custom Personalities** | No | ✅ Want to turn GPT into Claude? Now you can! | Enables a better custom fit and experience for each user | --- @@ -355,11 +377,13 @@ let include: Vec = if reasoning.is_some() { ### Why Not store:true? **Pros of store:true**: + - ✅ No ID filtering needed - ✅ Server manages conversation - ✅ Potentially more robust **Cons of store:true**: + - ❌ Diverges from Codex CLI behavior - ❌ Requires conversation ID management - ❌ More complex error handling @@ -372,6 +396,7 @@ let include: Vec = if reasoning.is_some() { **Alternative**: Filter specific ID patterns (`rs_*`, `msg_*`, etc.) **Problem**: + - ID patterns may change - New ID types could be added - Partial filtering is brittle @@ -379,6 +404,7 @@ let include: Vec = if reasoning.is_some() { **Solution**: Remove **ALL** IDs **Rationale**: + - Matches Codex CLI behavior exactly - Future-proof against ID format changes - Simpler implementation (no pattern matching) @@ -389,11 +415,13 @@ let include: Vec = if reasoning.is_some() { **Problem**: The legacy bridge duplicated tool/runtime instructions and drifted out of sync. **Solution**: Remove bridge injection entirely and rely on: + - Codex `instructions` - OpenCode runtime metadata (environment + AGENTS/custom instructions) - Live tool schemas **Benefits**: + - ✅ No stale tool-contract prose - ✅ Fewer conflicting top-priority instruction layers - ✅ Better parity with real Codex runtime behavior @@ -403,6 +431,7 @@ let include: Vec = if reasoning.is_some() { **Alternative**: Single global config **Problem**: + - `gpt-5.3-codex` optimal settings differ from `gpt-5.3` - Users want quick switching between quality levels - No way to save "presets" @@ -410,6 +439,7 @@ let include: Vec = if reasoning.is_some() { **Solution**: Per-model options in config **Benefits**: + - ✅ Save multiple configurations - ✅ Quick switching (no CLI args) - ✅ Descriptive names ("Fast", "Balanced", "Max Quality") @@ -424,16 +454,19 @@ let include: Vec = if reasoning.is_some() { ### Common Errors #### 1. "Item with id 'X' not found" + **Cause**: Message ID leaked through filtering **Fix**: Improved `filterInput()` removes ALL IDs **Prevention**: Debug logging catches remaining IDs #### 2. Token Expiration + **Cause**: OAuth access token expired **Fix**: `shouldRefreshToken()` checks expiration **Prevention**: Auto-refresh before requests #### 3. "store: false" Validation Error (Azure) + **Cause**: Azure doesn't support stateless mode **Workaround**: Codex CLI uses `store: true` for Azure only **This Plugin**: Only supports ChatGPT OAuth (no Azure) @@ -441,14 +474,17 @@ let include: Vec = if reasoning.is_some() { ### Hard-Stop Error Handling **Unsupported model**: + - Trigger: model not in `/codex/models` (including custom IDs) - Response: synthetic error with `type: unsupported_model`, `param: model`, and attempted model ID **Catalog unavailable**: + - Trigger: `/codex/models` unavailable and no cached catalog - Response: synthetic `unsupported_model` error with catalog context in the message **All accounts unavailable**: + - Trigger: all accounts rate-limited beyond `hardStopMaxWaitMs` or all accounts in auth-failure cooldown - Response: synthetic errors `all_accounts_rate_limited` (HTTP 429) or `all_accounts_auth_failed` (HTTP 401) @@ -459,40 +495,22 @@ let include: Vec = if reasoning.is_some() { The plugin is designed to operate safely across multiple concurrent processes (e.g., the OpenCode proxy and CLI tools). ### Account Synchronization + - **Dirty Tracking**: The plugin tracks the `originalRefreshToken` to detect if the disk state has changed since the account was loaded. - **Lazy Fallback**: If a token refresh fails, the plugin re-loads the account from disk to ensure it's not using a stale refresh token that was already rotated by another process. - **Lock-Merge-Write**: All account updates use `proper-lockfile` to ensure atomic reads and writes. ### Codex Status Snapshots + - **Disk Persistence**: Snapshots are stored in `~/.config/opencode/cache/codex-snapshots.json`. -- **Authoritative Fetching**: The plugin actively polls the official OpenAI `/wham/usage` (ChatGPT) and `/api/codex/usage` (API) endpoints during status tool calls to ensure 100% accurate telemetry. -- **Concurrency Strategy**: +- **Authoritative Fetching**: The plugin polls the official OpenAI `/wham/usage` (ChatGPT) and `/api/codex/usage` (API) endpoints during status tracking to ensure accurate telemetry. +- **Concurrency Strategy**: - **Async Locking**: Uses `proper-lockfile` to coordinate access between the proxy and CLI tools. - **Merge-on-Save**: When saving, the plugin re-loads snapshots from disk and merges them with in-memory state, using a timestamp-based (`updatedAt`) check to ensure the newest data always wins. - **Initialization Gate**: Uses a promise-based gate (`initPromise`) to ensure concurrent calls wait for the initial disk load, preventing data loss. --- -## Codex Status Tool Implementation - -The `codex-status` tool provides real-time visibility into OpenAI's backend rate limits, perfectly mimicking the behavior of `codex-rs v0.92.0`. - -### Data Capture -The plugin retrieves usage data from the authoritative `/wham/usage` endpoint: -- **Primary Window**: Maps to the "5 hour limit:". -- **Secondary Window**: Maps to the "Weekly limit:". -- **Credits**: Captures balance and unlimited status for Team/Enterprise accounts. -- **SSE Interception**: As a real-time fallback, the plugin also scans the live response stream for `token_count` events to update limits immediately during model usage. - -### Rendering -- **Inverted Logic**: Displays "% left" instead of "% used" (e.g., `100 - usedPercent`), matching the official CLI. -- **ASCII Bars**: Filled portion (`█`) represents remaining quota; empty portion (`░`) represents used quota. -- **Dynamic Labels**: Explicitly labeled as "5 hour limit:" and "Weekly limit:" for clarity. -- **Detailed Resets**: Resets more than 24 hours away include the full date (e.g., `resets 18:10 on 5 Feb`). -- **Vertical Stability**: Both Primary and Weekly bars are always rendered (showing `unknown` if no data) to maintain a consistent table layout. - ---- - ## Performance Considerations ### Request Optimization @@ -523,6 +541,7 @@ The plugin retrieves usage data from the authoritative `/wham/usage` endpoint: --- ## See Also + - [CONFIG_FLOW.md](./CONFIG_FLOW.md) - Configuration system guide - [Codex CLI Source](https://github.com/openai/codex) - Official implementation - [OpenCode Source](https://github.com/sst/opencode) - OpenCode implementation diff --git a/docs/development/CONFIG_FIELDS.md b/docs/development/CONFIG_FIELDS.md index 77ee553..af98ac2 100644 --- a/docs/development/CONFIG_FIELDS.md +++ b/docs/development/CONFIG_FIELDS.md @@ -30,6 +30,7 @@ Understanding the difference between config key, `id`, and `name` fields in Open **Example:** `"gpt-5.1-codex-low"` **Used For:** + - ✅ CLI `--model` flag: `--model=openai/gpt-5.3-codex-low` - ✅ OpenCode internal lookups: `provider.info.models["gpt-5.3-codex-low"]` - ✅ TUI persistence: Saved to `~/.config/opencode/tui` as `model_id = "gpt-5.3-codex-low"` @@ -47,16 +48,19 @@ Understanding the difference between config key, `id`, and `name` fields in Open **Example:** `"gpt-5.1-codex"` **What it's used for:** + - ⚠️ **Other providers**: Some providers use this for `sdk.languageModel(id)` - ⚠️ **Sorting**: Used for model priority sorting in OpenCode - ⚠️ **Documentation**: Indicates the "canonical" model ID **What it's NOT used for with OpenAI:** + - ❌ **NOT sent to AI SDK** (config key is sent instead) - ❌ **NOT used by plugin** (plugin receives config key) - ❌ **NOT required** (OpenCode defaults it to config key) **Code Reference:** (`tmp/opencode/packages/opencode/src/provider/provider.ts:252`) + ```typescript const parsedModel: ModelsDev.Model = { id: model.id ?? modelID, // ← Defaults to config key if omitted @@ -65,14 +69,15 @@ const parsedModel: ModelsDev.Model = { ``` **OpenAI Custom Loader:** (`tmp/opencode/packages/opencode/src/provider/provider.ts:58-65`) + ```typescript openai: async () => { return { async getModel(sdk: any, modelID: string) { - return sdk.responses(modelID) // ← Receives CONFIG KEY, not id field! - } - } -} + return sdk.responses(modelID); // ← Receives CONFIG KEY, not id field! + }, + }; +}; ``` **Our plugin receives:** `body.model = "gpt-5.3-codex-low"` (config key, NOT id field) @@ -86,10 +91,12 @@ openai: async () => { **Example:** `"GPT 5 Codex Low (Codex)"` **Used For:** + - ✅ **TUI Model Picker**: Display name shown in the model selection UI - ℹ️ **Documentation**: Human-friendly description **Code Reference:** (`tmp/opencode/packages/opencode/src/provider/provider.ts:253`) + ```typescript const parsedModel: ModelsDev.Model = { name: model.name ?? existing?.name ?? modelID, // Defaults to config key @@ -109,21 +116,21 @@ const parsedModel: ModelsDev.Model = { ├─────────────────────────────────────────────────────────────────┤ │ │ │ CLI Usage: │ -│ $ opencode run --model=openai/gpt-5.3-codex-low │ +│ $ opencode run --model=openai/gpt-5.3-codex-low │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ │ TUI Display: │ -│ ┌──────────────────────────────────┐ │ -│ │ Select Model: │ │ -│ │ │ │ -│ │ ○ GPT 5 Codex Low (Codex) ←──────┼── name field │ -│ │ ○ GPT 5 Codex Medium (Codex) │ │ -│ │ ○ GPT 5 Codex High (Codex) │ │ -│ └──────────────────────────────────┘ │ +│ ┌──────────────────────────────────┐ │ +│ │ Select Model: │ │ +│ │ │ │ +│ │ ○ GPT 5 Codex Low (Codex) ←──────┼── name field │ +│ │ ○ GPT 5 Codex Medium (Codex) │ │ +│ │ ○ GPT 5 Codex High (Codex) │ │ +│ └──────────────────────────────────┘ │ │ │ -│ Config Lookup (Plugin): │ -│ userConfig.models["gpt-5.3-codex-low"].options │ +│ Config Lookup (Plugin): │ +│ userConfig.models["gpt-5.3-codex-low"].options │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ @@ -134,44 +141,44 @@ const parsedModel: ModelsDev.Model = { ├─────────────────────────────────────────────────────────────────┤ │ │ │ 1. User Selection │ -│ opencode run --model=openai/gpt-5.3-codex-low │ +│ opencode run --model=openai/gpt-5.3-codex-low │ │ OpenCode parses: providerID="openai" │ -│ modelID="gpt-5.3-codex-low" ← CONFIG KEY │ +│ modelID="gpt-5.3-codex-low" ← CONFIG KEY │ │ │ │ 2. OpenCode Provider Lookup │ -│ provider.info.models["gpt-5.3-codex-low"] │ +│ provider.info.models["gpt-5.3-codex-low"] │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ │ 3. Custom Loader Call (OpenAI) │ -│ getModel(sdk, "gpt-5.3-codex-low") │ +│ getModel(sdk, "gpt-5.3-codex-low") │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ │ 4. AI SDK Request Creation │ -│ { model: "gpt-5.3-codex-low", ... } │ +│ { model: "gpt-5.3-codex-low", ... } │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ │ 5. Custom fetch() (Our Plugin) │ -│ body.model = "gpt-5.3-codex-low" │ +│ body.model = "gpt-5.3-codex-low" │ │ └──────┬──────┘ │ │ CONFIG KEY │ │ │ │ 6. Plugin Config Lookup │ -│ userConfig.models["gpt-5.3-codex-low"].options │ +│ userConfig.models["gpt-5.3-codex-low"].options │ │ └──────┬──────┘ │ │ CONFIG KEY │ -│ Result: { reasoningEffort: "low", ... } ✅ FOUND │ +│ Result: { reasoningEffort: "low", ... } ✅ FOUND │ │ │ │ 7. Plugin Normalization │ -│ normalizeModel("gpt-5.3-codex-low") │ -│ Returns: "gpt-5.3-codex" ← SENT TO CODEX API │ +│ normalizeModel("gpt-5.3-codex-low") │ +│ Returns: "gpt-5.3-codex" ← SENT TO CODEX API │ │ │ │ 8. TUI Persistence │ │ ~/.config/opencode/tui: │ │ provider_id = "openai" │ -│ model_id = "gpt-5.3-codex-low" ← CONFIG KEY persisted │ +│ model_id = "gpt-5.3-codex-low" ← CONFIG KEY persisted │ │ │ └─────────────────────────────────────────────────────────────────┘ ``` @@ -189,6 +196,7 @@ const parsedModel: ModelsDev.Model = { ``` **Purpose:** + - 🎯 **PRIMARY identifier** - used everywhere in OpenCode - 🎯 **Plugin receives this** - what our plugin sees in `body.model` - 🎯 **Config lookup key** - how plugin finds per-model options @@ -207,6 +215,7 @@ const parsedModel: ModelsDev.Model = { ``` **Purpose:** + - 📝 **Documents** what base model this variant uses - 📝 **Helps sorting** in model lists - 📝 **Clarity** - shows relationship between variants @@ -226,6 +235,7 @@ const parsedModel: ModelsDev.Model = { ``` **Purpose:** + - 🎨 **TUI display** - what users see in model picker - 🎨 **User-friendly** - can be descriptive - 🎨 **Differentiation** - helps distinguish from API key models @@ -249,6 +259,7 @@ const parsedModel: ModelsDev.Model = { ``` **When user selects `openai/gpt-5.3-codex-low`:** + - CLI: Uses `"gpt-5.3-codex-low"` (config key) - TUI: Shows `"GPT 5 Codex Low (Codex)"` (name field) - Plugin receives: `body.model = "gpt-5.3-codex-low"` (config key) @@ -275,6 +286,7 @@ const parsedModel: ModelsDev.Model = { ``` **Why this works:** + - Config keys are different: `"gpt-5.3-codex-low"` vs `"gpt-5.3-codex-high"` ✅ - Same `id` is fine - it's just metadata - Different `name` values help distinguish in TUI @@ -292,7 +304,8 @@ const parsedModel: ModelsDev.Model = { "name": "GPT 5 Codex Low (Codex)", "options": { "reasoningEffort": "low" } }, - "gpt-5.3-codex": { // ❌ DUPLICATE KEY ERROR! + "gpt-5.3-codex": { + // ❌ DUPLICATE KEY ERROR! "id": "gpt-5.3-codex", "name": "GPT 5 Codex High (Codex)", "options": { "reasoningEffort": "high" } @@ -314,22 +327,26 @@ const parsedModel: ModelsDev.Model = { ```json { - "gpt-5.3-codex-low": { // ← Unique config key #1 - "id": "gpt-5.3-codex", // ← Same base model + "gpt-5.3-codex-low": { + // ← Unique config key #1 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "low" } }, - "gpt-5.3-codex-medium": { // ← Unique config key #2 - "id": "gpt-5.3-codex", // ← Same base model + "gpt-5.3-codex-medium": { + // ← Unique config key #2 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "medium" } }, - "gpt-5.3-codex-high": { // ← Unique config key #3 - "id": "gpt-5.3-codex", // ← Same base model + "gpt-5.3-codex-high": { + // ← Unique config key #3 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "high" } } } ``` **Result:** + - 3 selectable variants in TUI ✅ - Same API model (`gpt-5.3-codex`) ✅ - Different reasoning settings ✅ @@ -342,24 +359,29 @@ const parsedModel: ModelsDev.Model = { ### Config Changes are Safe ✅ **Old Plugin + Old Config:** + ```json -"GPT 5 Codex Low (ChatGPT Subscription)": { +"GPT 5 Codex Low (Codex)": { "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } ``` + **Result:** ❌ Per-model options broken (existing bug in old plugin) **New Plugin + Old Config:** + ```json -"GPT 5 Codex Low (ChatGPT Subscription)": { +"GPT 5 Codex Low (Codex)": { "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } ``` + **Result:** ✅ Per-model options work! (bug fixed) **New Plugin + New Config:** + ```json "gpt-5.3-codex-low": { "id": "gpt-5.3-codex", @@ -367,9 +389,11 @@ const parsedModel: ModelsDev.Model = { "options": { "reasoningEffort": "low" } } ``` + **Result:** ✅ Per-model options work! (bug fixed + cleaner naming) **Conclusion:** + - ✅ Existing configs continue to work - ✅ New configs work better - ✅ Users can migrate at their own pace @@ -395,6 +419,7 @@ const parsedModel: ModelsDev.Model = { ``` **What it does:** + - `false` (required): Prevents AI SDK from using `item_reference` for conversation history - `true` (default): Uses server-side storage with references (incompatible with Codex API) @@ -404,50 +429,52 @@ const parsedModel: ModelsDev.Model = { Plugin config is stored in `~/.config/opencode/openai-codex-auth-config.json`. -| Field | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `accountSelectionStrategy` | `string` | `"sticky"` | Account selection (`sticky`, `round-robin`, `hybrid`). | -| `pidOffsetEnabled` | `boolean` | `true` | PID-based starting offset for parallel agents. | -| `perProjectAccounts` | `boolean` | `false` | Use `.opencode/openai-codex-accounts.json` when present. | -| `quietMode` | `boolean` | `false` | Reduce background toasts. | -| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce toast notifications. | -| `tokenRefreshSkewMs` | `number` | `60000` | Refresh tokens this early before expiry. | -| `proactiveTokenRefresh` | `boolean` | `false` | Enable proactive refresh queue when available. | -| `authDebug` | `boolean` | `false` | Debug logging (see env aliases). | -| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global wait-and-retry when all accounts rate-limited. | -| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for global retry (0 disables limit). | -| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles for global wait loop. | -| `hardStopMaxWaitMs` | `number` | `10000` | Hard-stop wait threshold for all-accounts rate limits. | -| `hardStopOnUnknownModel` | `boolean` | `true` | Hard-stop when model not in server catalog. | -| `hardStopOnAllAuthFailed` | `boolean` | `true` | Hard-stop when all accounts in auth-failure cooldown. | -| `hardStopMaxConsecutiveFailures` | `number` | `5` | Hard-stop after consecutive failures. | -| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | -| `maxCacheFirstWaitSeconds` | `number` | `60` | Cache-first wait before switching. | -| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on rate limit. | -| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events. | -| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after idle window. | -| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers missing. | -| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff. | -| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | +| Field | Type | Default | Description | +| :------------------------------- | :-------- | :-------------- | :------------------------------------------------------------------- | +| `accountSelectionStrategy` | `string` | `"sticky"` | Account selection (`sticky`, `round-robin`, `hybrid`). | +| `pidOffsetEnabled` | `boolean` | `true` | PID-based starting offset for parallel agents. | +| `perProjectAccounts` | `boolean` | `false` | Use `.opencode/openai-codex-accounts.json` when present. | +| `quietMode` | `boolean` | `false` | Reduce background toasts. | +| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce toast notifications. | +| `tokenRefreshSkewMs` | `number` | `60000` | Refresh tokens this early before expiry. | +| `proactiveTokenRefresh` | `boolean` | `false` | Enable proactive refresh queue when available. | +| `authDebug` | `boolean` | `false` | Debug logging (see env aliases). | +| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global wait-and-retry when all accounts rate-limited. | +| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for global retry (0 disables limit). | +| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles for global wait loop. | +| `hardStopMaxWaitMs` | `number` | `10000` | Hard-stop wait threshold for all-accounts rate limits. | +| `hardStopOnUnknownModel` | `boolean` | `true` | Hard-stop when model not in server catalog. | +| `hardStopOnAllAuthFailed` | `boolean` | `true` | Hard-stop when all accounts in auth-failure cooldown. | +| `hardStopMaxConsecutiveFailures` | `number` | `5` | Hard-stop after consecutive failures. | +| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | +| `maxCacheFirstWaitSeconds` | `number` | `60` | Cache-first wait before switching. | +| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on rate limit. | +| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events. | +| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after idle window. | +| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers missing. | +| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff. | +| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | **Why required:** AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce payload size when `store: true`. However: + - Codex API requires `store: false` (stateless mode) - `item_reference` items cannot be resolved without server-side storage - Without this setting, multi-turn conversations fail with: `"Item with id 'fc_xxx' not found"` **Where to set:** + ```json { "provider": { "openai": { "options": { - "store": false // ← Global: applies to all models + "store": false // ← Global: applies to all models }, "models": { "gpt-5.3-codex-low": { "options": { - "store": false // ← Per-model: redundant but explicit + "store": false // ← Per-model: redundant but explicit } } } @@ -476,12 +503,14 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ``` **Benefits:** + - ✅ Clean config key: `gpt-5.3-codex-low` (matches Codex CLI presets) - ✅ Friendly display: `"GPT 5 Codex Low (Codex)"` (UX) - ✅ No redundant fields - ✅ OpenCode auto-sets `id` to config key **Why no `id` field?** + - For OpenAI provider, the `id` field is NOT used (custom loader receives config key) - OpenCode defaults `id` to config key if omitted - Including it is redundant and creates confusion @@ -499,6 +528,7 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ``` **What happens:** + - `id` defaults to: `"gpt-5.3-codex-low"` (config key) - `name` defaults to: `"gpt-5.3-codex-low"` (config key) - TUI shows: `"gpt-5.3-codex-low"` (less friendly) @@ -520,6 +550,7 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ``` **What happens:** + - `id` field is stored but NOT used by OpenAI custom loader - Adds documentation value but is technically redundant - Works fine, just verbose @@ -528,18 +559,18 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ## Summary Table -| Use Case | Which Field? | Example Value | -|----------|-------------|---------------| -| **CLI `--model` flag** | Config Key | `openai/gpt-5.3-codex-low` | -| **Custom commands** | Config Key | `model: openai/gpt-5.3-codex-low` | -| **Agent config** | Config Key | `"model": "openai/gpt-5.3-codex-low"` | -| **TUI display** | `name` field | `"GPT 5 Codex Low (Codex)"` | -| **Plugin config lookup** | Config Key | `models["gpt-5.3-codex-low"]` | -| **AI SDK receives** | Config Key | `body.model = "gpt-5.3-codex-low"` | -| **Plugin normalizes** | Transformed | `"gpt-5.3-codex"` (sent to API) | -| **TUI persistence** | Config Key | `model_id = "gpt-5.3-codex-low"` | -| **Documentation** | `id` field | `"gpt-5.3-codex"` (base model) | -| **Model sorting** | `id` field | Used for priority ranking | +| Use Case | Which Field? | Example Value | +| ------------------------ | ------------ | ------------------------------------- | +| **CLI `--model` flag** | Config Key | `openai/gpt-5.3-codex-low` | +| **Custom commands** | Config Key | `model: openai/gpt-5.3-codex-low` | +| **Agent config** | Config Key | `"model": "openai/gpt-5.3-codex-low"` | +| **TUI display** | `name` field | `"GPT 5 Codex Low (Codex)"` | +| **Plugin config lookup** | Config Key | `models["gpt-5.3-codex-low"]` | +| **AI SDK receives** | Config Key | `body.model = "gpt-5.3-codex-low"` | +| **Plugin normalizes** | Transformed | `"gpt-5.3-codex"` (sent to API) | +| **TUI persistence** | Config Key | `model_id = "gpt-5.3-codex-low"` | +| **Documentation** | `id` field | `"gpt-5.3-codex"` (base model) | +| **Model sorting** | `id` field | Used for priority ranking | --- @@ -567,12 +598,14 @@ name field is UI sugar 🎨 ## Why The Bug Happened **Old Plugin Logic (Broken):** + ```typescript -const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex-high" → "gpt-5.2-codex" -const modelConfig = getModelConfig(normalizedModel, userConfig); // Lookup "gpt-5.2-codex" +const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex-high" → "gpt-5.2-codex" +const modelConfig = getModelConfig(normalizedModel, userConfig); // Lookup "gpt-5.2-codex" ``` **Problem:** + - Plugin received: `"gpt-5.2-codex-high"` (config key) - Plugin normalized first: `"gpt-5.2-codex"` - Plugin looked up config: `models["gpt-5.2-codex"]` ❌ NOT FOUND @@ -580,13 +613,15 @@ const modelConfig = getModelConfig(normalizedModel, userConfig); // Lookup "gpt - **Result:** Per-model options ignored! **New Plugin Logic (Fixed):** + ```typescript -const originalModel = body.model; // "gpt-5.2-codex-high" (config key) -const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex" (for API) -const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5.2-codex-high" ✅ +const originalModel = body.model; // "gpt-5.2-codex-high" (config key) +const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex" (for API) +const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5.2-codex-high" ✅ ``` **Fix:** + - Use original value (config key) for config lookup ✅ - Normalize separately for API call ✅ - **Result:** Per-model options applied correctly! @@ -598,6 +633,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ### Test Case 1: Which model does plugin send to API? **Config:** + ```json { "my-custom-name": { @@ -613,6 +649,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 **Question:** What model does plugin send to Codex API? **Answer:** + 1. Plugin receives: `body.model = "my-custom-name"` 2. Plugin normalizes: `"my-custom-name"` → `"my-custom-name"` (preserved) 3. Plugin sends to API: `"my-custom-name"` ✅ @@ -624,6 +661,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ### Test Case 2: How does TUI know what to display? **Config:** + ```json { "ugly-key-123": { @@ -644,6 +682,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ### Test Case 3: How does plugin find config? **Config:** + ```json { "gpt-5.3-codex-low": { @@ -658,6 +697,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 **Question:** How does plugin find the options? **Answer:** + 1. Plugin receives: `body.model = "gpt-5.3-codex-low"` 2. Plugin looks up: `userConfig.models["gpt-5.3-codex-low"]` ✅ 3. Plugin finds: `{ reasoningEffort: "low" }` ✅ @@ -672,7 +712,8 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ```json { - "gpt-5.3-codex": { // ❌ Can't have multiple variants + "gpt-5.3-codex": { + // ❌ Can't have multiple variants "id": "gpt-5.3-codex" } } diff --git a/docs/development/CONFIG_FLOW.md b/docs/development/CONFIG_FLOW.md index 8c2305f..bfbc0ea 100644 --- a/docs/development/CONFIG_FLOW.md +++ b/docs/development/CONFIG_FLOW.md @@ -5,6 +5,7 @@ This document explains how OpenCode configuration flows from user files through > Note: Some examples use legacy model aliases for compatibility demonstrations. Runtime normalization maps known gpt-5.x variants (like `gpt-5.3-codex-low`) to base slugs (`gpt-5.3-codex`) before API submission; unknown/legacy IDs are lowercased and preserved without substring coercion. ## Table of Contents + - [Config Loading Order](#config-loading-order) - [Provider Options Flow](#provider-options-flow) - [Model Selection & Persistence](#model-selection--persistence) @@ -19,12 +20,14 @@ This document explains how OpenCode configuration flows from user files through OpenCode loads and merges configuration from multiple sources in this order (**last wins**): ### 1. Global Config + ``` ~/.config/opencode/opencode.jsonc ~/.config/opencode/opencode.json ``` ### 2. Project Configs (traversed upward from cwd) + ``` /.opencode/opencode.jsonc /.opencode/opencode.json @@ -34,6 +37,7 @@ OpenCode loads and merges configuration from multiple sources in this order (**l ``` ### 3. Custom Config (via flags) + ```bash OPENCODE_CONFIG=/path/to/config.json opencode # or @@ -41,6 +45,7 @@ OPENCODE_CONFIG_CONTENT='{"model":"openai/gpt-5"}' opencode ``` ### 4. Auth Configs + ``` # From .well-known/opencode endpoints (for OAuth providers) https://auth.example.com/.well-known/opencode @@ -55,17 +60,21 @@ https://auth.example.com/.well-known/opencode Options are merged at multiple stages before reaching the plugin: ### Stage 1: Database Defaults + Models.dev provides baseline capabilities for each provider/model. ### Stage 2: Environment Variables + ```bash export OPENAI_API_KEY="sk-..." ``` ### Stage 3: Custom Loaders + Plugins can inject options via the `loader()` function. ### Stage 4: User Config (HIGHEST PRIORITY) + ```json { "provider": { @@ -90,6 +99,7 @@ Plugins can inject options via the `loader()` function. ### Display Names vs Internal IDs **Your Config** (`config/opencode-legacy.json`): + ```json { "provider": { @@ -105,9 +115,7 @@ Plugins can inject options via the `loader()` function. "reasoningEffort": "medium", "reasoningSummary": "auto", "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], + "include": ["reasoning.encrypted_content"], "store": false } } @@ -118,7 +126,8 @@ Plugins can inject options via the `loader()` function. ``` **What OpenCode Uses**: -- **UI Display**: "GPT 5.3 Codex Medium (Codex)" ✅ + +- **UI Display**: "GPT 5.3 Codex Medium (OAuth)" ✅ - **Persistence**: `provider_id: "openai"` + `model_id: "gpt-5.3-codex-medium"` ✅ - **Plugin lookup**: `models["gpt-5.3-codex-medium"]` → used to build Codex request ✅ @@ -144,6 +153,7 @@ last_used = 2025-10-12T10:30:00Z ### How This Plugin Receives Config **Plugin Entry Point** (`index.ts:64-86`): + ```typescript async loader(getAuth: () => Promise, provider: unknown) { const providerConfig = provider as { @@ -195,6 +205,7 @@ type UserConfig = { ### Option Precedence For a given model, options are merged: + 1. **Global options** (`provider.openai.options`) 2. **Model-specific options** (`provider.openai.models[modelName].options`) ← WINS @@ -205,6 +216,7 @@ For a given model, options are merged: ## Examples ### Example 1: Global Options Only + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -223,6 +235,7 @@ For a given model, options are merged: **Result**: All OpenAI models use these options. ### Example 2: Per-Model Override + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -254,10 +267,12 @@ For a given model, options are merged: ``` **Result**: + - `gpt-5.3-codex-high` uses `reasoningEffort: "high"` (overridden) + `textVerbosity: "medium"` (from global) - `gpt-5-nano` uses `reasoningEffort: "minimal"` + `textVerbosity: "low"` (both overridden) ### Example 3: Full Configuration + ```json { "$schema": "https://opencode.ai/config.json", @@ -296,9 +311,11 @@ For a given model, options are merged: ## Best Practices ### 1. Use Per-Model Options for Variants + Instead of duplicating global options, override only what's different: ❌ **Bad**: + ```json { "models": { @@ -323,6 +340,7 @@ Instead of duplicating global options, override only what's different: ``` ✅ **Good**: + ```json { "options": { @@ -348,6 +366,7 @@ Instead of duplicating global options, override only what's different: ``` ### 2. Keep Display Names Meaningful + Custom model names help you remember what each variant does: ```json @@ -370,6 +389,7 @@ Custom model names help you remember what each variant does: ``` ### 3. Set Defaults at Global Level + Most common settings should be global: ```json @@ -384,6 +404,7 @@ Most common settings should be global: ``` ### 4. Prefer Config Files for Plugin Settings + Use plugin config files for persistent behavior. --- @@ -391,17 +412,20 @@ Use plugin config files for persistent behavior. ## Troubleshooting ### Config Not Being Applied + 1. Check config file syntax with `jq . < config.json` 2. Verify config file location (use absolute paths) 3. Check OpenCode logs for config load errors 4. Use `OPENCODE_CONFIG_CONTENT` to test minimal configs ### Model Not Persisting + 1. TUI remembers the `id` field, not the display name 2. Check `~/.config/opencode/tui` for recently used models 3. Verify your config has the correct `id` field ### Options Not Taking Effect + 1. Model-specific options override global options 2. Plugin receives merged config from OpenCode 3. Add debug logging to verify what plugin receives @@ -409,6 +433,7 @@ Use plugin config files for persistent behavior. --- ## See Also + - [ARCHITECTURE.md](./ARCHITECTURE.md) - Plugin architecture and design decisions - [OpenCode Config Schema](https://opencode.ai/config.json) - Official schema - [Models.dev](https://models.dev) - Model capability database diff --git a/docs/development/TESTING.md b/docs/development/TESTING.md index 1fb1c25..1875af5 100644 --- a/docs/development/TESTING.md +++ b/docs/development/TESTING.md @@ -9,6 +9,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ### Scenario 1: Default OpenCode Models (No Custom Config) **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"] @@ -16,6 +17,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ``` **Available Models:** (from OpenCode's models.dev database) + - `gpt-5` - `gpt-5.3-codex` - `gpt-5-mini` @@ -23,14 +25,15 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Test Cases:** -| User Selects | Plugin Receives | Normalizes To | Config Lookup | API Receives | Result | -|--------------|-----------------|---------------|---------------|--------------|--------| -| `openai/gpt-5` | `"gpt-5"` | `"gpt-5.1"` | `models["gpt-5"]` → undefined | `"gpt-5.1"` | ✅ Uses global options | +| User Selects | Plugin Receives | Normalizes To | Config Lookup | API Receives | Result | +| ---------------------- | ----------------- | ----------------- | ------------------------------------- | ----------------- | ---------------------- | +| `openai/gpt-5` | `"gpt-5"` | `"gpt-5.1"` | `models["gpt-5"]` → undefined | `"gpt-5.1"` | ✅ Uses global options | | `openai/gpt-5.3-codex` | `"gpt-5.3-codex"` | `"gpt-5.3-codex"` | `models["gpt-5.3-codex"]` → undefined | `"gpt-5.3-codex"` | ✅ Uses global options | -| `openai/gpt-5-mini` | `"gpt-5-mini"` | `"gpt-5-mini"` | `models["gpt-5-mini"]` → undefined | `"gpt-5-mini"` | ✅ Uses global options | -| `openai/gpt-5-nano` | `"gpt-5-nano"` | `"gpt-5-nano"` | `models["gpt-5-nano"]` → undefined | `"gpt-5-nano"` | ✅ Uses global options | +| `openai/gpt-5-mini` | `"gpt-5-mini"` | `"gpt-5-mini"` | `models["gpt-5-mini"]` → undefined | `"gpt-5-mini"` | ✅ Uses global options | +| `openai/gpt-5-nano` | `"gpt-5-nano"` | `"gpt-5-nano"` | `models["gpt-5-nano"]` → undefined | `"gpt-5-nano"` | ✅ Uses global options | **Expected Behavior:** + - ✅ All models work with global options - ✅ Normalized correctly for API - ✅ No errors @@ -40,6 +43,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ### Scenario 2: Custom Config with Preset Names (New Style) **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -65,13 +69,14 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Test Cases:** -| User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | -|--------------|-----------------|---------------|------------------|--------------|--------| -| `openai/gpt-5.3-codex-low` | `"gpt-5.3-codex-low"` | Found ✅ | `{ reasoningEffort: "low" }` | `"gpt-5.3-codex"` | ✅ Per-model | -| `openai/gpt-5.3-codex-high` | `"gpt-5.3-codex-high"` | Found ✅ | `{ reasoningEffort: "high" }` | `"gpt-5.3-codex"` | ✅ Per-model | -| `openai/gpt-5.3-codex` | `"gpt-5.3-codex"` | Not found | `{ reasoningEffort: "medium" }` | `"gpt-5.3-codex"` | ✅ Global | +| User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | +| --------------------------- | ---------------------- | ------------- | ------------------------------- | ----------------- | ------------ | +| `openai/gpt-5.3-codex-low` | `"gpt-5.3-codex-low"` | Found ✅ | `{ reasoningEffort: "low" }` | `"gpt-5.3-codex"` | ✅ Per-model | +| `openai/gpt-5.3-codex-high` | `"gpt-5.3-codex-high"` | Found ✅ | `{ reasoningEffort: "high" }` | `"gpt-5.3-codex"` | ✅ Per-model | +| `openai/gpt-5.3-codex` | `"gpt-5.3-codex"` | Not found | `{ reasoningEffort: "medium" }` | `"gpt-5.3-codex"` | ✅ Global | **Expected Behavior:** + - ✅ Custom variants use per-model options - ✅ Default `gpt-5.3-codex` uses global options - ✅ Known variants normalize to base slug for API; default `gpt-5.3-codex` stays `gpt-5.3-codex` @@ -81,6 +86,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ### Scenario 3: Old Config (Backwards Compatibility) **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -90,7 +96,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit "reasoningEffort": "medium" }, "models": { - "GPT 5 Codex Low (ChatGPT Subscription)": { + "GPT 5 Codex Low (Codex)": { "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } @@ -102,11 +108,12 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Test Cases:** -| User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | -|--------------|-----------------|---------------|------------------|--------------|--------| -| `openai/GPT 5 Codex Low (ChatGPT Subscription)` | `"GPT 5 Codex Low (ChatGPT Subscription)"` | Found ✅ | `{ reasoningEffort: "low" }` | `"gpt 5 codex low (chatgpt subscription)"` | ✅ Per-model | +| User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | +| -------------------------------- | --------------------------- | ------------- | ---------------------------- | --------------------------- | ------------ | +| `openai/GPT 5 Codex Low (Codex)` | `"GPT 5 Codex Low (Codex)"` | Found ✅ | `{ reasoningEffort: "low" }` | `"gpt 5 codex low (Codex)"` | ✅ Per-model | **Expected Behavior:** + - ✅ Old config keys still work - ✅ Per-model options applied correctly - ✅ Normalizes correctly for API @@ -116,6 +123,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ### Scenario 4: Mixed Config (Default + Custom) **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -133,19 +141,21 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ``` **Available Models:** + - `gpt-5.3-codex-low` (custom) - `gpt-5.3-codex` (default from models.dev) - `gpt-5` (default from models.dev) **Test Cases:** -| User Selects | Config Lookup | Uses Options | Result | -|--------------|---------------|--------------|--------| -| `openai/gpt-5.3-codex-low` | Found ✅ | Per-model | ✅ Custom config | -| `openai/gpt-5.3-codex` | Not found | Global | ✅ Default model | -| `openai/gpt-5` | Not found | Global | ✅ Default model | +| User Selects | Config Lookup | Uses Options | Result | +| -------------------------- | ------------- | ------------ | ---------------- | +| `openai/gpt-5.3-codex-low` | Found ✅ | Per-model | ✅ Custom config | +| `openai/gpt-5.3-codex` | Not found | Global | ✅ Default model | +| `openai/gpt-5` | Not found | Global | ✅ Default model | **Expected Behavior:** + - ✅ Custom variants use per-model options - ✅ Default models use global options - ✅ Both types coexist peacefully @@ -157,6 +167,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit #### 5a: Model Name with Uppercase **Config:** + ```json { "models": { @@ -168,6 +179,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ``` **Test:** + ``` User selects: openai/GPT-5-CODEX-HIGH Plugin receives: "GPT-5-CODEX-HIGH" @@ -183,6 +195,7 @@ API receives: "gpt-5.3-codex" ✅ #### 5b: Model Name with Special Characters **Config:** + ```json { "models": { @@ -194,6 +207,7 @@ API receives: "gpt-5.3-codex" ✅ ``` **Test:** + ``` User selects: openai/my-gpt5-codex-variant Plugin receives: "my-gpt5-codex-variant" @@ -209,6 +223,7 @@ API receives: "my-gpt5-codex-variant" ✅ #### 5c: No Config, No Model Specified **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"] @@ -216,6 +231,7 @@ API receives: "my-gpt5-codex-variant" ✅ ``` **Test:** + ``` User selects: (none - uses OpenCode default) Plugin receives: undefined or default from OpenCode @@ -231,6 +247,7 @@ API receives: "gpt-5.1" ✅ #### 5d: Only `gpt-5` in Name (No `codex`) **Config:** + ```json { "models": { @@ -242,6 +259,7 @@ API receives: "gpt-5.1" ✅ ``` **Test:** + ``` User selects: openai/my-gpt-5-variant Plugin receives: "my-gpt-5-variant" @@ -259,6 +277,7 @@ API receives: "my-gpt-5-variant" ✅ **Config:** Any **Test Sequence:** + ``` Turn 1: > write hello to test.txt Turn 2: > read the file @@ -268,14 +287,15 @@ Turn 4: > now delete it **What Plugin Should Do:** -| Turn | Input Has IDs? | Filter Result | Encrypted Content | Result | -|------|---------------|---------------|-------------------|--------| -| 1 | No | No filtering needed | Received in response | ✅ Works | -| 2 | Yes (from Turn 1) | ALL removed ✅ | Sent back in request | ✅ Works | -| 3 | Yes (from Turn 1-2) | ALL removed ✅ | Sent back in request | ✅ Works | -| 4 | Yes (from Turn 1-3) | ALL removed ✅ | Sent back in request | ✅ Works | +| Turn | Input Has IDs? | Filter Result | Encrypted Content | Result | +| ---- | ------------------- | ------------------- | -------------------- | -------- | +| 1 | No | No filtering needed | Received in response | ✅ Works | +| 2 | Yes (from Turn 1) | ALL removed ✅ | Sent back in request | ✅ Works | +| 3 | Yes (from Turn 1-2) | ALL removed ✅ | Sent back in request | ✅ Works | +| 4 | Yes (from Turn 1-3) | ALL removed ✅ | Sent back in request | ✅ Works | **Expected Behavior:** + - ✅ No "item not found" errors on any turn - ✅ Context preserved via encrypted reasoning - ✅ Debug log shows: "Successfully removed all X message IDs" @@ -286,23 +306,24 @@ Turn 4: > now delete it ### Test Matrix -| Plugin Version | Config Format | Expected Result | -|----------------|--------------|-----------------| -| **Old (<2.1.2)** | Long names + id | ❌ Per-model options broken, ID errors | -| **Old (<2.1.2)** | Short names | ❌ Per-model options broken, ID errors | -| **New (2.1.2+)** | Long names + id | ✅ **ALL FIXED** | -| **New (2.1.2+)** | Short names | ✅ **ALL FIXED** | -| **New (2.1.2+)** | Short names (no id) | ✅ **OPTIMAL** | +| Plugin Version | Config Format | Expected Result | +| ---------------- | ------------------- | -------------------------------------- | +| **Old (<2.1.2)** | Long names + id | ❌ Per-model options broken, ID errors | +| **Old (<2.1.2)** | Short names | ❌ Per-model options broken, ID errors | +| **New (2.1.2+)** | Long names + id | ✅ **ALL FIXED** | +| **New (2.1.2+)** | Short names | ✅ **ALL FIXED** | +| **New (2.1.2+)** | Short names (no id) | ✅ **OPTIMAL** | ### Backwards Compatibility Tests #### Test 1: Old Plugin User Upgrades **Before (Plugin v2.1.1):** + ```json { "models": { - "GPT 5 Codex Low (ChatGPT Subscription)": { + "GPT 5 Codex Low (Codex)": { "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } @@ -311,6 +332,7 @@ Turn 4: > now delete it ``` **After (Plugin v2.1.2):** + - Keep same config - Plugin now finds per-model options ✅ - No "item not found" errors ✅ @@ -322,6 +344,7 @@ Turn 4: > now delete it #### Test 2: New User with Recommended Config **Config:** + ```json { "models": { @@ -334,6 +357,7 @@ Turn 4: > now delete it ``` **Expected:** + - CLI: `--model=openai/gpt-5.3-codex-low` ✅ - TUI: Shows "GPT 5 Codex Low (Codex)" ✅ - Plugin: Finds and applies per-model options ✅ @@ -346,6 +370,7 @@ Turn 4: > now delete it #### Test 3: Minimal Config (No Custom Models) **Config:** + ```json { "plugin": ["opencode-openai-codex-multi-auth"], @@ -354,6 +379,7 @@ Turn 4: > now delete it ``` **Expected:** + - Uses default OpenCode model: `gpt-5.3-codex` - Plugin applies: Global options + Codex defaults - No errors ✅ @@ -460,6 +486,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "write hello world to test.txt" ``` **Verify:** + - ✅ Plugin installs automatically - ✅ Auth works - ✅ Debug log shows: `hasModelSpecificConfig: false` @@ -498,6 +525,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test high" --model=openai/gpt-5.3-codex-high ``` **Verify:** + - ✅ Debug log shows: `hasModelSpecificConfig: true` for both - ✅ Different `reasoningEffort` values in logs - ✅ TUI shows friendly names @@ -519,6 +547,7 @@ DEBUG_CODEX_PLUGIN=1 opencode --model=openai/gpt-5.3-codex-medium ``` **Verify:** + - ✅ No "item not found" errors on ANY turn - ✅ Debug shows IDs removed on turns 2+ - ✅ Context is maintained across turns @@ -540,6 +569,7 @@ DEBUG_CODEX_PLUGIN=1 opencode ``` **Verify:** + - ✅ Different reasoning efforts logged for each model - ✅ Per-model options applied correctly - ✅ No errors when switching @@ -565,6 +595,7 @@ opencode ``` **Verify:** + - ✅ Last used model is `gpt-5.3-codex-high` - ✅ Model is auto-selected on restart - ✅ TUI shows correct model highlighted @@ -578,24 +609,25 @@ opencode `normalizeModel()` lowercases unknown slugs for diagnostics only; requests still reject unknown models. ```typescript -normalizeModel("gpt-5.3-codex") // → "gpt-5.3-codex" ✅ -normalizeModel("gpt-5.2-codex-high") // → "gpt-5.2-codex" ✅ -normalizeModel("gpt-5.2-xhigh") // → "gpt-5.2" ✅ -normalizeModel("gpt-5.1-codex-max-xhigh") // → "gpt-5.1-codex-max" ✅ -normalizeModel("gpt-5.1-codex-mini-high") // → "gpt-5.1-codex-mini" ✅ -normalizeModel("codex-mini-latest") // → "gpt-5.1-codex-mini" ✅ -normalizeModel("gpt-5.1-codex") // → "gpt-5.1-codex" ✅ -normalizeModel("gpt-5.1") // → "gpt-5.1" ✅ -normalizeModel("my-codex-model") // → "my-codex-model" ✅ -normalizeModel("gpt-5") // → "gpt-5.1" ✅ -normalizeModel("gpt-5-mini") // → "gpt-5-mini" ✅ -normalizeModel("gpt-5-nano") // → "gpt-5-nano" ✅ -normalizeModel("GPT 5 High") // → "gpt 5 high" ✅ -normalizeModel(undefined) // → "gpt-5.1" ✅ -normalizeModel("random-model") // → "random-model" ✅ +normalizeModel("gpt-5.3-codex"); // → "gpt-5.3-codex" ✅ +normalizeModel("gpt-5.2-codex-high"); // → "gpt-5.2-codex" ✅ +normalizeModel("gpt-5.2-xhigh"); // → "gpt-5.2" ✅ +normalizeModel("gpt-5.1-codex-max-xhigh"); // → "gpt-5.1-codex-max" ✅ +normalizeModel("gpt-5.1-codex-mini-high"); // → "gpt-5.1-codex-mini" ✅ +normalizeModel("codex-mini-latest"); // → "gpt-5.1-codex-mini" ✅ +normalizeModel("gpt-5.1-codex"); // → "gpt-5.1-codex" ✅ +normalizeModel("gpt-5.1"); // → "gpt-5.1" ✅ +normalizeModel("my-codex-model"); // → "my-codex-model" ✅ +normalizeModel("gpt-5"); // → "gpt-5.1" ✅ +normalizeModel("gpt-5-mini"); // → "gpt-5-mini" ✅ +normalizeModel("gpt-5-nano"); // → "gpt-5-nano" ✅ +normalizeModel("GPT 5 High"); // → "gpt 5 high" ✅ +normalizeModel(undefined); // → "gpt-5.1" ✅ +normalizeModel("random-model"); // → "random-model" ✅ ``` **Implementation:** + ```typescript export function normalizeModel(model: string | undefined): string { if (!model) return "gpt-5.1"; @@ -609,6 +641,7 @@ export function normalizeModel(model: string | undefined): string { ``` **Why this works:** + - ✅ Explicit model map + strict dynamic regex for known slugs - ✅ Case-insensitive normalization for unknown/legacy slugs - ✅ No substring-based coercion (reduces false positives) @@ -665,65 +698,69 @@ opencode run "test" --model=openai/gpt-5.3-codex ### Unit Tests (Future) ```typescript -describe('normalizeModel', () => { - test('handles all default models', () => { - expect(normalizeModel('gpt-5')).toBe('gpt-5.1') - expect(normalizeModel('gpt-5.3-codex')).toBe('gpt-5.3-codex') - expect(normalizeModel('gpt-5.3-codex-mini')).toBe('gpt-5.1-codex-mini') - expect(normalizeModel('gpt-5-mini')).toBe('gpt-5-mini') - expect(normalizeModel('gpt-5-nano')).toBe('gpt-5-nano') - }) - - test('handles custom preset names', () => { - expect(normalizeModel('gpt-5.3-codex-low')).toBe('gpt-5.3-codex') - expect(normalizeModel('openai/gpt-5.3-codex-mini-high')).toBe('gpt-5.1-codex-mini') - expect(normalizeModel('gpt-5-high')).toBe('gpt-5.1') - }) - - test('handles legacy names', () => { - expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe('gpt 5 codex low (chatgpt subscription)') - }) - - test('handles edge cases', () => { - expect(normalizeModel(undefined)).toBe('gpt-5.1') - expect(normalizeModel('codex-mini-latest')).toBe('codex-mini-latest') - expect(normalizeModel('random')).toBe('random') - }) -}) - -describe('getModelConfig', () => { - test('returns per-model options when found', () => { - const config = getModelConfig('gpt-5.3-codex-low', { - global: { reasoningEffort: 'medium' }, +describe("normalizeModel", () => { + test("handles all default models", () => { + expect(normalizeModel("gpt-5")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5.3-codex")).toBe("gpt-5.3-codex"); + expect(normalizeModel("gpt-5.3-codex-mini")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("gpt-5-mini")).toBe("gpt-5-mini"); + expect(normalizeModel("gpt-5-nano")).toBe("gpt-5-nano"); + }); + + test("handles custom preset names", () => { + expect(normalizeModel("gpt-5.3-codex-low")).toBe("gpt-5.3-codex"); + expect(normalizeModel("openai/gpt-5.3-codex-mini-high")).toBe( + "gpt-5.1-codex-mini", + ); + expect(normalizeModel("gpt-5-high")).toBe("gpt-5.1"); + }); + + test("handles legacy names", () => { + expect(normalizeModel("GPT 5 Codex Low (Codex)")).toBe( + "gpt 5 codex low (Codex)", + ); + }); + + test("handles edge cases", () => { + expect(normalizeModel(undefined)).toBe("gpt-5.1"); + expect(normalizeModel("codex-mini-latest")).toBe("codex-mini-latest"); + expect(normalizeModel("random")).toBe("random"); + }); +}); + +describe("getModelConfig", () => { + test("returns per-model options when found", () => { + const config = getModelConfig("gpt-5.3-codex-low", { + global: { reasoningEffort: "medium" }, models: { - 'gpt-5.3-codex-low': { - options: { reasoningEffort: 'low' } - } - } - }) - expect(config.reasoningEffort).toBe('low') - }) - - test('returns global options when model not in config', () => { - const config = getModelConfig('gpt-5.3-codex', { - global: { reasoningEffort: 'medium' }, - models: {} - }) - expect(config.reasoningEffort).toBe('medium') - }) -}) - -describe('filterInput', () => { - test('removes all message IDs', () => { + "gpt-5.3-codex-low": { + options: { reasoningEffort: "low" }, + }, + }, + }); + expect(config.reasoningEffort).toBe("low"); + }); + + test("returns global options when model not in config", () => { + const config = getModelConfig("gpt-5.3-codex", { + global: { reasoningEffort: "medium" }, + models: {}, + }); + expect(config.reasoningEffort).toBe("medium"); + }); +}); + +describe("filterInput", () => { + test("removes all message IDs", () => { const input = [ - { id: 'msg_123', role: 'user', content: [] }, - { id: 'rs_456', role: 'assistant', content: [] }, - { role: 'user', content: [] } // No ID - ] - const result = filterInput(input) - expect(result.every(item => !item.id)).toBe(true) - }) -}) + { id: "msg_123", role: "user", content: [] }, + { id: "rs_456", role: "assistant", content: [] }, + { role: "user", content: [] }, // No ID + ]; + const result = filterInput(input); + expect(result.every((item) => !item.id)).toBe(true); + }); +}); ``` --- diff --git a/docs/getting-started.md b/docs/getting-started.md index 75819ba..c86cbd5 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -45,11 +45,12 @@ OpenCode automatically installs plugins - no `npm install` needed! **Choose your configuration style:** -#### ⚠️ REQUIRED: Full Configuration (Only Supported Setup) +#### ⚠️ RECOMMENDED: Full Configuration (Now Handled Automatically) -**IMPORTANT**: You MUST use the full configuration. This is the ONLY officially supported setup for GPT 5.x models. +**IMPORTANT**: You should use the full configuration. This is the ONLY officially supported setup for GPT 5.x models. + +**Why the full config is recommended:** -**Why the full config is required:** - GPT 5 models can be temperamental and need proper configuration - Minimal configs are NOT supported and will fail unpredictably - OpenCode features require proper model metadata @@ -57,12 +58,12 @@ OpenCode automatically installs plugins - no `npm install` needed! Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): -**Tip**: The snippet below is a truncated excerpt. For the complete legacy list, copy `config/opencode-legacy.json`. For the modern variants config (OpenCode v1.0.210+), use `config/opencode-modern.json`. +**Tip**: The snippet below is a truncated excerpt. For the complete legacy list, copy the modern variants config (OpenCode v1.0.210+), `config/opencode-modern.json`. ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["opencode-openai-codex-multi-auth"], + "plugin": ["opencode-openai-codex-multi-auth@latest"], "provider": { "openai": { "options": { @@ -72,75 +73,226 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "include": ["reasoning.encrypted_content"], "store": false }, + "gpt-5.3-codex": { + "name": "GPT 5.3 Codex (Codex)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.2-codex": { + "name": "GPT 5.2 Codex (Codex)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, "models": { - "gpt-5.3-codex": { - "name": "GPT 5.3 Codex (Codex)", + "gpt-5.2": { + "name": "GPT 5.2 (Codex)", "limit": { "context": 272000, "output": 128000 }, - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } } }, - "gpt-5.3-codex-low": { - "name": "GPT 5.3 Codex Low (Codex)", + "gpt-5.1-codex-max": { + "name": "GPT 5.1 Codex Max (Codex)", "limit": { "context": 272000, "output": 128000 }, - "options": { - "reasoningEffort": "low", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } } }, - "gpt-5.3-codex-high": { - "name": "GPT 5.3 Codex High (Codex)", + "gpt-5.1-codex": { + "name": "GPT 5.1 Codex (Codex)", "limit": { "context": 272000, "output": 128000 }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } } }, - "gpt-5.2-codex": { - "name": "GPT 5.2 Codex (Codex)", + "gpt-5.1-codex-mini": { + "name": "GPT 5.1 Codex Mini (Codex)", "limit": { "context": 272000, "output": 128000 }, - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } } }, - "gpt-5.2": { - "name": "GPT 5.2 (Codex)", + "gpt-5.1": { + "name": "GPT 5.1 (Codex)", "limit": { "context": 272000, "output": 128000 }, - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "low" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "high" + } } } } @@ -149,17 +301,18 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): } ``` - **What you get:** - - ✅ GPT 5.3 Codex (low/medium/high/xhigh reasoning) - - ✅ GPT 5.2 (none/low/medium/high/xhigh reasoning) - - ✅ GPT 5.2 Codex (low/medium/high/xhigh reasoning) - - ✅ GPT 5.1 Codex Max (low/medium/high/xhigh reasoning presets) - - ✅ GPT 5.1 Codex (low/medium/high reasoning) - - ✅ GPT 5.1 Codex Mini (medium/high reasoning) - - ✅ GPT 5.1 (none/low/medium/high reasoning) - - ✅ 272k context + 128k output window for all GPT 5.x presets. - - ✅ All visible in OpenCode model selector - - ✅ Optimal settings for each reasoning level +**What you get:** + +- ✅ GPT 5.3 Codex (low/medium/high/xhigh reasoning) +- ✅ GPT 5.2 (none/low/medium/high/xhigh reasoning) +- ✅ GPT 5.2 Codex (low/medium/high/xhigh reasoning) +- ✅ GPT 5.1 Codex Max (low/medium/high/xhigh reasoning presets) +- ✅ GPT 5.1 Codex (low/medium/high reasoning) +- ✅ GPT 5.1 Codex Mini (medium/high reasoning) +- ✅ GPT 5.1 (none/low/medium/high reasoning) +- ✅ 272k context + 128k output window for all GPT 5.x presets. +- ✅ All visible in OpenCode model selector +- ✅ Optimal settings for each reasoning level **Optional: Personality configuration** @@ -183,6 +336,7 @@ Personality settings live in the plugin config file: `~/.config/opencode/openai- ``` Personality descriptions are loaded from: + - Project-local `.opencode/Personalities/*.md` - Global `~/.config/opencode/Personalities/*.md` @@ -214,6 +368,7 @@ Prompt caching is enabled out of the box: when OpenCode sends its session identi ``` **Why this doesn't work:** + - GPT 5 models need proper configuration to work reliably - Missing model metadata breaks OpenCode features - Cannot guarantee stable operation @@ -228,6 +383,7 @@ opencode auth login 2. Choose **"ChatGPT Pro/Plus (Codex Multi Auth)"** If you see other OpenAI auth options, they are OpenCode's built-in methods. This plugin's flow is the one labeled **"(Codex Multi Auth)"**. + 3. Browser opens automatically for OAuth flow 4. Log in with your ChatGPT account 5. Done! Accounts saved to `~/.config/opencode/openai-codex-accounts.json` @@ -286,6 +442,7 @@ npx -y opencode-openai-codex-multi-auth@latest --uninstall --all ``` **When to update:** + - New features released - Bug fixes available - Security updates @@ -309,6 +466,7 @@ For plugin development or testing unreleased changes: **Note**: Must point to `dist/` folder (built output), not root. **Build the plugin:** + ```bash cd opencode-openai-codex-multi-auth npm install diff --git a/docs/index.md b/docs/index.md index eeac880..944d996 100644 --- a/docs/index.md +++ b/docs/index.md @@ -21,6 +21,7 @@ Users are responsible for compliance with [OpenAI's Terms of Use](https://openai ## Quick Links ### For Users + - [Getting Started](getting-started.md) - Complete installation and setup guide - [Configuration Guide](configuration.md) - Advanced config options and patterns - [Multi-Account](multi-account.md) - Multiple accounts, rotation behavior, and account tools @@ -29,7 +30,9 @@ Users are responsible for compliance with [OpenAI's Terms of Use](https://openai - [Release Notes](https://github.com/iam-brain/opencode-openai-codex-multi-auth/releases) - Version history and updates ### For Developers + Explore the engineering depth behind this plugin: + - [Architecture](development/ARCHITECTURE.md) - Technical design, AI SDK compatibility, store:false explained - [Config System](development/CONFIG_FLOW.md) - How configuration loading and merging works - [Config Fields](development/CONFIG_FIELDS.md) - Understanding config keys, `id`, and `name` fields @@ -132,6 +135,7 @@ This plugin represents significant engineering effort to bridge OpenCode and the - **Comprehensive test coverage** with actual API verification **Explore the development docs** to see the depth of implementation: + - [Architecture Deep Dive](development/ARCHITECTURE.md) - [Configuration System Internals](development/CONFIG_FLOW.md) - [Testing & Verification](development/TESTING.md) diff --git a/docs/multi-account.md b/docs/multi-account.md index 535511b..8e09645 100644 --- a/docs/multi-account.md +++ b/docs/multi-account.md @@ -59,19 +59,9 @@ When OpenCode is running with the TUI available, the plugin shows toasts for: You can disable most toasts with `quietMode: true`. -## Account Management Tools +## Account Management -The plugin exposes a few OpenCode tools to inspect or switch accounts: - -- `codex-status` - list accounts and status -- `codex-switch-accounts` - switch active account by index (1-based) -- `codex-toggle-account` - enable/disable account by index (1-based) -- `codex-remove-account` - remove account by index (1-based) - -The remove tool requires `confirm: true` when called directly; the TUI slash command template includes confirmation automatically. - -These are primarily useful in the OpenCode TUI. -To enable or disable accounts, re-run `opencode auth login` and choose **manage**. +To inspect, enable/disable, or remove accounts, re-run `opencode auth login` and choose **manage**. ## Storage @@ -118,19 +108,19 @@ Legacy records that lack full identity are preserved but skipped for selection u ### Fields -| Field | Description | -|-------|-------------| -| `email` | Best-effort email extracted from the OAuth JWT (may be missing) | -| `accountId` | ChatGPT account ID extracted from the OAuth JWT (may be missing) | -| `plan` | ChatGPT plan name extracted from the OAuth JWT (may be missing) | -| `refreshToken` | OAuth refresh token (auto-managed) | -| `enabled` | Whether the account can be selected (defaults to true) | -| `addedAt` | Timestamp when the account was first stored | -| `lastUsed` | Timestamp when the account was last selected | -| `activeIndex` | Active account index (used by the account switch tool) | -| `activeIndexByFamily` | Per-model-family active index | -| `rateLimitResetTimes` | Optional per-family/model rate limit reset times | -| `coolingDownUntil` | Optional cooldown timestamp for failing accounts | +| Field | Description | +| --------------------- | ---------------------------------------------------------------- | +| `email` | Best-effort email extracted from the OAuth JWT (may be missing) | +| `accountId` | ChatGPT account ID extracted from the OAuth JWT (may be missing) | +| `plan` | ChatGPT plan name extracted from the OAuth JWT (may be missing) | +| `refreshToken` | OAuth refresh token (auto-managed) | +| `enabled` | Whether the account can be selected (defaults to true) | +| `addedAt` | Timestamp when the account was first stored | +| `lastUsed` | Timestamp when the account was last selected | +| `activeIndex` | Active account index (used by the account switch tool) | +| `activeIndexByFamily` | Per-model-family active index | +| `rateLimitResetTimes` | Optional per-family/model rate limit reset times | +| `coolingDownUntil` | Optional cooldown timestamp for failing accounts | Security note: this file contains OAuth refresh tokens. Treat it like a password file. @@ -163,11 +153,11 @@ Configure in `~/.config/opencode/openai-codex-auth-config.json`: } ``` -| Strategy | Behavior | Best For | -|----------|----------|----------| -| `sticky` | Same account until rate-limited | Prompt cache preservation | -| `round-robin` | Rotate to next account on every request | Maximum throughput | -| `hybrid` | Deterministic selection using health score + token bucket + LRU bias | Best overall distribution | +| Strategy | Behavior | Best For | +| ------------- | -------------------------------------------------------------------- | ------------------------- | +| `sticky` | Same account until rate-limited | Prompt cache preservation | +| `round-robin` | Rotate to next account on every request | Maximum throughput | +| `hybrid` | Deterministic selection using health score + token bucket + LRU bias | Best overall distribution | ### Hybrid Strategy Details diff --git a/docs/privacy.md b/docs/privacy.md index 8ba1de2..a8ea77b 100644 --- a/docs/privacy.md +++ b/docs/privacy.md @@ -26,12 +26,14 @@ This plugin prioritizes user privacy and data security. We believe in transparen All data is stored **locally on your machine**: ### OAuth Tokens + - **Location:** `~/.config/opencode/openai-codex-accounts.json` (plus any project-local storage seeded by OpenCode) - **Contents:** Refresh tokens, access tokens, expiration timestamps, account identity metadata - **Managed by:** This plugin's account storage (with file locking + atomic writes) - **Security:** File permissions restrict access to your user account ### Cache Files + - **Location:** `~/.config/opencode/cache/` - **Contents:** - `gpt-5.1-codex-instructions.md`, `gpt-5.3-codex-instructions.md`, `gpt-5.3-codex-instructions-v2.md`, etc. (Codex system instructions) @@ -40,11 +42,13 @@ All data is stored **locally on your machine**: - **Purpose:** Reduce GitHub API calls, preserve offline fallbacks, and improve startup/runtime performance ### Personality Cache Files + - **Location:** `~/.config/opencode/Personalities/` - **Contents:** `Friendly.md`, `Pragmatic.md` (server-derived personality fallbacks) - **Purpose:** Durable fallback when runtime defaults cannot be fetched; user-managed files are not overwritten ### Debug Logs + - **Location:** `~/.config/opencode/logs/codex-plugin/` - **Contents:** Request/response logs (only when `ENABLE_PLUGIN_REQUEST_LOGGING=1` is set) - **Includes:** @@ -59,14 +63,18 @@ All data is stored **locally on your machine**: ## Data Transmission ### Direct to OpenAI + All API requests go **directly from your machine to OpenAI's servers**: + - ✅ No intermediary proxies - ✅ No third-party data collection - ✅ HTTPS encrypted communication - ✅ OAuth-secured authentication ### What Gets Sent to OpenAI + When you use the plugin, the following is transmitted to OpenAI: + - Your prompts and conversation history - OAuth access token (for authentication) - ChatGPT account ID (from token JWT) @@ -76,6 +84,7 @@ When you use the plugin, the following is transmitted to OpenAI: **Note:** This is identical to what the official OpenAI Codex CLI sends. ### What Does NOT Get Sent + - ❌ Your filesystem contents (unless explicitly requested via tools) - ❌ Personal information beyond what's in your prompts - ❌ Usage statistics or analytics @@ -86,7 +95,9 @@ When you use the plugin, the following is transmitted to OpenAI: ## Third-Party Services ### GitHub API + The plugin fetches Codex instructions from GitHub: + - **URL:** `https://api.github.com/repos/openai/codex/releases/latest` - **Purpose:** Resolve latest release tag, then fetch prompt/model metadata files from `raw.githubusercontent.com` - **Frequency:** Instructions use ETag + 15-minute staleness checks; runtime model metadata also has local cache fallback @@ -94,7 +105,9 @@ The plugin fetches Codex instructions from GitHub: - **Rate limiting:** 60 requests/hour (unauthenticated) ### OpenAI Services + All interactions with OpenAI go through: + - **OAuth:** `https://chatgpt.com/oauth` - **API:** `https://chatgpt.com/backend-api/codex/responses` and `https://chatgpt.com/backend-api/codex/models` @@ -107,6 +120,7 @@ See [OpenAI Privacy Policy](https://openai.com/policies/privacy-policy/) for how You have complete control over your data: ### Delete OAuth Tokens + ```bash opencode auth logout # Or manually: @@ -114,16 +128,19 @@ rm ~/.config/opencode/openai-codex-accounts.json ``` ### Delete Cache Files + ```bash rm -rf ~/.config/opencode/cache/ ``` ### Delete Logs + ```bash rm -rf ~/.config/opencode/logs/codex-plugin/ ``` ### Revoke OAuth Access + 1. Visit [ChatGPT Settings → Authorized Apps](https://chatgpt.com/settings/apps) 2. Find "OpenCode" or "Codex CLI" 3. Click "Revoke" @@ -135,19 +152,24 @@ This immediately invalidates all access tokens. ## Security Measures ### Token Protection + - **Local storage only:** Tokens never leave your machine except when sent to OpenAI for authentication - **File permissions:** Auth files are readable only by your user account - **No logging:** OAuth tokens are never written to debug logs - **Automatic refresh:** Expired tokens are refreshed automatically ### PKCE Flow + The plugin uses **PKCE (Proof Key for Code Exchange)** for OAuth: + - Prevents authorization code interception attacks - Industry-standard security for OAuth 2.0 - Same method used by OpenAI's official Codex CLI ### HTTPS Encryption + All network communication uses HTTPS: + - OAuth authorization: Encrypted - API requests: Encrypted - Token refresh: Encrypted @@ -157,14 +179,18 @@ All network communication uses HTTPS: ## Compliance ### OpenAI's Privacy Policy + When using this plugin, you are subject to: + - [OpenAI Privacy Policy](https://openai.com/policies/privacy-policy/) - [OpenAI Terms of Use](https://openai.com/policies/terms-of-use/) **Your responsibility:** Ensure your usage complies with OpenAI's policies. ### GDPR Considerations + This plugin: + - ✅ Does not collect personal data - ✅ Does not process data on behalf of third parties - ✅ Stores data locally under your control @@ -177,16 +203,20 @@ However, data sent to OpenAI is subject to OpenAI's privacy practices. ## Transparency ### Open Source + The entire plugin source code is available at: + - **GitHub:** [https://github.com/iam-brain/opencode-openai-codex-multi-auth](https://github.com/iam-brain/opencode-openai-codex-multi-auth) You can: + - Review all code - Audit data handling - Verify no hidden telemetry - Inspect network requests ### No Hidden Behavior + - No obfuscated code - No minified dependencies - All network requests are documented @@ -197,6 +227,7 @@ You can: ## Questions? For privacy-related questions: + - **Plugin-specific:** [GitHub Issues](https://github.com/iam-brain/opencode-openai-codex-multi-auth/issues) - **OpenAI data handling:** [OpenAI Support](https://help.openai.com/) - **Security concerns:** See [SECURITY.md](../SECURITY.md) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2fc9c81..0a0d67c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -7,12 +7,14 @@ Common issues and debugging techniques for the OpenCode OpenAI Codex Auth Plugin ### "401 Unauthorized" Error **Symptoms:** + ``` Error: 401 Unauthorized Failed to access Codex API ``` **Causes:** + 1. Token expired 2. Not authenticated yet 3. Invalid credentials @@ -20,17 +22,20 @@ Failed to access Codex API **Solutions:** **1. Re-authenticate:** + ```bash opencode auth login ``` **2. Check auth file exists:** + ```bash cat ~/.config/opencode/openai-codex-accounts.json # Should show stored accounts with OAuth credentials ``` **3. Check token expiration:** + ```bash # Token has "expires" timestamp cat ~/.config/opencode/openai-codex-accounts.json | jq '.accounts[]?.expires' @@ -42,37 +47,44 @@ date +%s000 # Current timestamp in milliseconds ### Browser Doesn't Open for OAuth **Symptoms:** + - `opencode auth login` succeeds but no browser window - OAuth callback times out **Solutions:** **1. Manual browser open:** + ```bash # The auth URL is shown in console - copy and paste to browser manually ``` **1a. Manual URL Paste login:** + - Re-run `opencode auth login` - Select **"ChatGPT Plus/Pro (Manual URL Paste)"** - Paste the full redirect URL after login **2. Check port 1455 availability:** + ```bash # See if something is using the OAuth callback port lsof -i :1455 ``` **3. Official Codex CLI conflict:** + - Stop Codex CLI if running - Both use port 1455 for OAuth ### "Invalid Session" or "Authorization session expired" **Symptoms:** + - Browser shows: `Your authorization session was not initialized or has expired` **Solutions:** + - Re-run `opencode auth login` to generate a fresh URL - Open the **"Go to"** URL directly in your browser (don’t use a stale link) - If you’re on SSH/WSL/remote, choose **"ChatGPT Plus/Pro (Manual URL Paste)"** @@ -82,6 +94,7 @@ lsof -i :1455 **Cause**: ChatGPT subscription issue **Check:** + 1. Active ChatGPT Plus or Pro subscription 2. Subscription not expired 3. Billing is current @@ -95,23 +108,29 @@ lsof -i :1455 ### Legacy .opencode cache/logs **Symptoms:** + - Plugin logs or cache files still show under `~/.opencode` **Cause:** + - Older versions stored cache/logs in `~/.opencode` **Fix:** + - The plugin now migrates cache/logs to `~/.config/opencode` on first use. You can also remove the legacy files manually once confirmed. ### Multiple plans overwritten **Symptoms:** + - Accounts with the same accountId but different emails or plans collapse into one entry **Cause:** + - Older versions matched on accountId (or accountId + plan) without email **Fix:** + ```bash rm ~/.config/opencode/openai-codex-accounts.json opencode auth login @@ -120,57 +139,70 @@ opencode auth login ### Accounts quarantined during repair **Symptoms:** + - Login prompt mentions repair and quarantine - Toast or CLI output references a `.quarantine-.json` file stored next to `~/.config/opencode/openai-codex-accounts.json` **Cause:** + - The accounts file is corrupt, or legacy entries could not be repaired **Fix:** + 1. Review the quarantine file to inspect the removed records 2. Re-run `opencode auth login` to rebuild storage 3. If you need to restore a record, copy it from the quarantine file and re-authenticate **Notes:** + - Quarantine files contain refresh tokens (treat them like passwords). - Older quarantine files may be pruned automatically to avoid unbounded buildup. ### "All account(s) are rate-limited" **Symptoms:** + - Requests fail with HTTP 429 - Error mentions all accounts being rate-limited **What this means:** + - The plugin tried every configured account and they are all currently in cooldown / rate-limit windows. **Solutions:** **1. Wait for the shortest reset window:** + - The plugin tracks rate-limit reset times per account in `~/.config/opencode/openai-codex-accounts.json` **2. Add another account:** + ```bash opencode auth login ``` **3. Check your selection strategy:** + - Default is `sticky` (best caching) - If you want maximum throughput with many accounts, set `accountSelectionStrategy: "round-robin"` in `~/.config/opencode/openai-codex-auth-config.json` **4. For parallel agents, keep PID offset enabled:** + - `pidOffsetEnabled: true` helps parallel OpenCode sessions start on different accounts ### Hard-stop: all accounts unavailable **Symptoms:** + - Requests fail with HTTP 429 and error type `all_accounts_rate_limited` **What this means:** + - All accounts are rate-limited beyond the hard-stop wait threshold. **Solutions:** + - Increase `hardStopMaxWaitMs` in `~/.config/opencode/openai-codex-auth-config.json` - Set `hardStopMaxWaitMs: 0` to disable the hard-stop and allow longer waits - Add another account (`opencode auth login`) @@ -178,12 +210,15 @@ opencode auth login ### Hard-stop: all accounts auth-failed **Symptoms:** + - Requests fail with HTTP 401 and error type `all_accounts_auth_failed` **What this means:** + - All accounts are in auth-failure cooldown. **Solutions:** + - Re-authenticate: `opencode auth login` ### Reset Accounts @@ -197,11 +232,7 @@ opencode auth login ### Inspect / Switch Accounts -In the OpenCode TUI, you can run: - -- `codex-status` -- `codex-switch-accounts` (pass a 1-based index) - +Use `opencode auth login` and choose **manage** to inspect accounts or change their enabled status. See [Multi-Account](multi-account.md) for details. ## Model Issues @@ -213,6 +244,7 @@ See [Multi-Account](multi-account.md) for details. **Cause 1: Config key mismatch** **Check your config:** + ```json { "models": { @@ -222,11 +254,13 @@ See [Multi-Account](multi-account.md) for details. ``` **CLI Usage (Modern):** + ```bash opencode run "test" --model=openai/gpt-5.3-codex --variant=low ``` **CLI Usage (Legacy Suffix):** + ```bash opencode run "test" --model=openai/gpt-5.3-codex-low # Must match config key ``` @@ -234,11 +268,13 @@ opencode run "test" --model=openai/gpt-5.3-codex-low # Must match config key **Cause 2: Missing provider prefix** **❌ Wrong:** + ```yaml model: gpt-5.3-codex-low ``` **✅ Correct:** + ```yaml model: openai/gpt-5.3-codex variant: low @@ -247,25 +283,31 @@ variant: low ### Hard-stop: unsupported model **Symptoms:** + - Requests fail with HTTP 400 and error type `unsupported_model` **What this means:** + - The requested model is not in the server catalog. Custom model IDs are rejected. **Solutions:** + - Use a model ID that appears in `/codex/models` - Update your config to match the catalog model IDs (see `config/opencode-modern.json`) ### Hard-stop: model catalog unavailable **Symptoms:** + - Requests fail with HTTP 400 and error type `unsupported_model` - Error message mentions the model catalog being unavailable **What this means:** + - The plugin cannot access `/codex/models` and has no cached catalog. **Solutions:** + - Run once with network access to seed the catalog cache - Retry after the catalog cache is available - Check for `codex-models-cache-.json` under `~/.config/opencode/cache/` (per-account hashed) @@ -275,11 +317,13 @@ variant: low **Symptom**: All models behave the same despite different `reasoningEffort` **Debug:** + ```bash DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/your-model ``` **Look for:** + ``` hasModelSpecificConfig: true ← Should be true resolvedConfig: { reasoningEffort: 'low', ... } ← Should show your options @@ -288,6 +332,7 @@ resolvedConfig: { reasoningEffort: 'low', ... } ← Should show your options **If `false`**: Config lookup failed **Common causes:** + 1. Model name in CLI doesn't match config key 2. Typo in config file 3. Wrong config file location @@ -299,6 +344,7 @@ resolvedConfig: { reasoningEffort: 'low', ... } ← Should show your options ### "Item not found" Errors **Error:** + ``` AI_APICallError: Item with id 'msg_abc123' not found. Items are not persisted when `store` is set to false. @@ -307,6 +353,7 @@ Items are not persisted when `store` is set to false. **Cause**: Older plugin version **Solution:** + ```bash # Update plugin npx -y opencode-openai-codex-multi-auth@latest @@ -316,6 +363,7 @@ opencode ``` **Verify fix:** + ```bash DEBUG_CODEX_PLUGIN=1 opencode > write test.txt @@ -330,6 +378,7 @@ Should see: `Successfully removed all X message IDs` **Symptom**: Model doesn't remember previous turns **Check logs:** + ```bash ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode > first message @@ -337,6 +386,7 @@ ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode ``` **Verify:** + ```bash # Turn 2 should have full history cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '.body.input | length' @@ -344,6 +394,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '.b ``` **What to check:** + 1. Full message history present (not just current turn) 2. No `item_reference` items (filtered out) 3. All IDs stripped (`jq '.body.input[].id'` should all be `null`) @@ -355,6 +406,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '.b ### "400 Bad Request" **Check error details:** + ```bash ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" @@ -363,6 +415,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-error-response.json ``` **Common causes:** + 1. Invalid options for model (e.g., `minimal` for gpt-5.3-codex) 2. Malformed request body 3. Unsupported parameter @@ -370,6 +423,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-error-response.json ### "Rate Limit Exceeded" **Error:** + ``` Rate limit reached for gpt-5.3-codex ``` @@ -378,11 +432,13 @@ Rate limit reached for gpt-5.3-codex **1. Wait for reset:** Check headers in response logs: + ```bash cat ~/.config/opencode/logs/codex-plugin/request-*-response.json | jq '.headers["x-codex-primary-reset-after-seconds"]' ``` **2. Use a specific model variant:** + ```bash # Explicitly use variant via flag opencode run "task" --model=openai/gpt-5.3-codex --variant=high @@ -391,6 +447,7 @@ opencode run "task" --model=openai/gpt-5.3-codex --variant=high ### "Context Window Exceeded" **Error:** + ``` Your input exceeds the context window ``` @@ -400,6 +457,7 @@ Your input exceeds the context window **Solutions:** **1. Start new conversation:** + ```bash # Exit and restart OpenCode (clears history) ``` @@ -407,6 +465,7 @@ Your input exceeds the context window **2. Use compact mode** (if OpenCode supports it) **3. Switch to model with larger context:** + - gpt-5.3-codex / gpt-5.2-codex / gpt-5.1-codex presets have larger context windows than lightweight presets --- @@ -416,6 +475,7 @@ Your input exceeds the context window ### Rate Limit Exhausted **Error:** + ``` Failed to fetch instructions from GitHub: Failed to fetch latest release: 403 Using cached instructions @@ -424,10 +484,12 @@ Using cached instructions **Cause**: GitHub API rate limit (60 req/hour for unauthenticated) **Current behavior**: + - Instructions are ETag-cached with periodic refresh checks - Runtime model metadata is online-first (`/codex/models`) with local cache + GitHub/static fallbacks **Verify fix:** + ```bash # Check instruction cache metadata files ls -lt ~/.config/opencode/cache/*-instructions-meta.json @@ -440,6 +502,7 @@ ls -lt ~/.config/opencode/cache/codex-models-cache-*.json ``` **Manual workaround** (if on old version): + - Wait 1 hour for rate limit to reset - Or use cached instructions (automatic fallback) @@ -455,10 +518,12 @@ DEBUG_CODEX_PLUGIN=1 ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" ``` **What you get:** + - Console: Debug messages showing config resolution - Files: Complete request/response logs **Log locations:** + - `~/.config/opencode/logs/codex-plugin/request-*-before-transform.json` - `~/.config/opencode/logs/codex-plugin/request-*-after-transform.json` - `~/.config/opencode/logs/codex-plugin/request-*-response.json` @@ -480,6 +545,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '{ ``` **Verify:** + - `model`: Normalized correctly? - `reasoning.effort`: Matches your config? - `text.verbosity`: Matches your config? @@ -497,11 +563,13 @@ See [development/TESTING.md](development/TESTING.md) for expected values matrix. ### Slow Responses **Possible causes:** + 1. `reasoningEffort: "high"` - Uses more computation 2. `textVerbosity: "high"` - Generates longer outputs 3. Network latency **Solutions:** + - Use lower reasoning effort for faster responses - Check network connection - Try different time of day (server load varies) @@ -509,12 +577,14 @@ See [development/TESTING.md](development/TESTING.md) for expected values matrix. ### High Token Usage **Monitor usage:** + ```bash # Tokens shown in logs cat ~/.config/opencode/logs/codex-plugin/request-*-stream-full.json | grep -o '"total_tokens":[0-9]*' ``` **Reduce tokens:** + 1. Lower `textVerbosity` 2. Lower `reasoningEffort` 3. Keep custom system prompts concise @@ -526,6 +596,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-stream-full.json | grep -o '" ### Before Opening an Issue 1. **Enable logging:** + ```bash DEBUG_CODEX_PLUGIN=1 ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "your command" ``` @@ -533,8 +604,9 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-stream-full.json | grep -o '" 2. **Collect info:** - OpenCode version: `opencode --version` - Plugin version: Check `package.json` or npm + - Error logs from `~/.config/opencode/logs/codex-plugin/` - - Config file (redact sensitive info) + - Config file (redact sensitive info) 3. **Check existing issues:** - [GitHub Issues](https://github.com/iam-brain/opencode-openai-codex-multi-auth/issues) @@ -542,6 +614,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-stream-full.json | grep -o '" ### Reporting Bugs Include: + - ✅ Error message - ✅ Steps to reproduce - ✅ Config file (redacted) diff --git a/index.ts b/index.ts index 325d8fd..1838950 100644 --- a/index.ts +++ b/index.ts @@ -2,7 +2,7 @@ * OpenAI ChatGPT (Codex) OAuth Plugin */ -import { tool, type Plugin, type PluginInput } from "@opencode-ai/plugin"; +import type { Plugin, PluginInput } from "@opencode-ai/plugin"; import type { Auth } from "@opencode-ai/sdk"; import { createAuthorizationFlow, @@ -29,7 +29,6 @@ import { CODEX_BASE_URL, DEFAULT_MODEL_FAMILY, DUMMY_API_KEY, - MODEL_FAMILIES, PLUGIN_NAME, PROVIDER_ID, } from "./lib/constants.js"; @@ -39,27 +38,20 @@ import { extractAccountEmail, extractAccountId, extractAccountPlan, - formatAccountLabel, isOAuthAuth, sanitizeEmail, } from "./lib/accounts.js"; import { - promptLoginMode, - promptManageAccounts, + promptRepairAccounts, } from "./lib/cli.js"; import { normalizePlanTypeOrDefault } from "./lib/plan-utils.js"; -import { - configureStorageForCurrentCwd, - configureStorageForPluginConfig, -} from "./lib/storage-scope.js"; +import { configureStorageForPluginConfig } from "./lib/storage-scope.js"; import { getStoragePath, - getStorageScope, autoQuarantineCorruptAccountsFile, loadAccounts, quarantineAccounts, replaceAccountsFile, - saveAccounts, saveAccountsWithLock, toggleAccountEnabled, } from "./lib/storage.js"; @@ -68,11 +60,10 @@ import { findAccountMatchIndex } from "./lib/account-matching.js"; import type { AccountStorageV3, OAuthAuthDetails, TokenResult, TokenSuccess, UserConfig } from "./lib/types.js"; import { getHealthTracker, getTokenTracker } from "./lib/rotation.js"; import { RateLimitTracker } from "./lib/rate-limit.js"; -import { codexStatus, type CodexRateLimitSnapshot } from "./lib/codex-status.js"; -import { renderObsidianDashboard } from "./lib/codex-status-ui.js"; +import { codexStatus } from "./lib/codex-status.js"; import { renderQuotaReport } from "./lib/ui/codex-quota-report.js"; import { runAuthMenuOnce } from "./lib/ui/auth-menu-runner.js"; -import type { AuthMenuAccount } from "./lib/ui/auth-menu.js"; +import type { AccountInfo } from "./lib/ui/auth-menu.js"; import { ProactiveRefreshQueue, createRefreshScheduler, @@ -99,6 +90,14 @@ const FALLBACK_MODEL_SLUGS = new Set([ "gpt-5.1-codex-mini", ]); +const LEGACY_CODEX_COMMAND_KEYS = new Set([ + "codex-auth", + "codex-status", + "codex-switch-accounts", + "codex-toggle-account", + "codex-remove-account", +]); + function parseGptVersion(slug: string): { major: number; minor: number } | null { const match = slug.toLowerCase().match(/^gpt-(\d+)\.(\d+)/); if (!match) return null; @@ -642,32 +641,47 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { return toggleAccountEnabled(storage, index) ?? storage; }; - const buildExistingAccountLabels = (storage: AccountStorageV3) => - storage.accounts.map((account, index) => ({ - index, - email: account.email, - plan: account.plan, - accountId: account.accountId, - refreshToken: account.refreshToken, - enabled: account.enabled, - })); + const hasActiveCooldown = ( + resetTimes: Record | undefined, + now: number, + ): boolean => { + if (!resetTimes) return false; + return Object.values(resetTimes).some( + (resetAt) => typeof resetAt === "number" && Number.isFinite(resetAt) && resetAt > now, + ); + }; const buildAuthMenuAccounts = ( accounts: ReturnType, activeIndex: number, - ): AuthMenuAccount[] => - accounts.map((account) => ({ - index: account.index, - accountId: account.accountId, - email: account.email, - plan: account.plan, - enabled: account.enabled, - lastUsed: account.lastUsed, - rateLimitResetTimes: account.rateLimitResetTimes, - coolingDownUntil: account.coolingDownUntil, - cooldownReason: account.cooldownReason, - isActive: account.index === activeIndex, - })); + ): AccountInfo[] => { + const now = Date.now(); + return accounts.map((account) => { + const isCurrentAccount = account.index === activeIndex; + let status: AccountInfo["status"] = "unknown"; + if (account.cooldownReason === "auth-failure") { + status = "expired"; + } else if ( + (account.coolingDownUntil && account.coolingDownUntil > now) || + hasActiveCooldown(account.rateLimitResetTimes, now) + ) { + status = "rate-limited"; + } else if (isCurrentAccount) { + status = "active"; + } + return { + index: account.index, + accountId: account.accountId, + email: account.email, + plan: account.plan, + addedAt: account.addedAt, + lastUsed: account.lastUsed, + enabled: account.enabled, + status, + isCurrentAccount, + }; + }); + }; const runInteractiveAuthMenu = async (options: { allowExit: boolean }): Promise<"add" | "exit"> => { while (true) { @@ -675,11 +689,8 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { const accounts = accountManager.getAccountsSnapshot(); const activeIndex = accountManager.getActiveIndexForFamily(DEFAULT_MODEL_FAMILY); const menuAccounts = buildAuthMenuAccounts(accounts, activeIndex); - const now = Date.now(); - const result = await runAuthMenuOnce({ accounts: menuAccounts, - now, input: process.stdin, output: process.stdout, handlers: { @@ -755,46 +766,19 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { const oauthMethod = { label: AUTH_LABELS.OAUTH, type: "oauth" as const, - authorize: async (inputs?: Record) => { + authorize: async (_inputs?: Record) => { let replaceExisting = false; - if (inputs) { - let existingStorage = await loadAccounts(); - if (existingStorage?.accounts?.length) { - if (process.stdin.isTTY && process.stdout.isTTY) { - await runInteractiveAuthMenu({ allowExit: false }); - } else { - while (true) { - const existingLabels = buildExistingAccountLabels(existingStorage); - const mode = await promptLoginMode(existingLabels); - - if (mode === "manage") { - const action = await promptManageAccounts(existingLabels); - if (!action) { - continue; - } - - if (action.action === "toggle") { - existingStorage = await updateStorageWithLock((current) => - toggleAccountFromStorage(current, action.target), - ); - } else { - existingStorage = await updateStorageWithLock((current) => - removeAccountFromStorage(current, action.target), - ); - } - - if (existingStorage.accounts.length === 0) { - replaceExisting = true; - break; - } - continue; - } - - replaceExisting = mode === "fresh"; - break; - } - } + const existingStorage = await loadAccounts(); + if (existingStorage?.accounts?.length && process.stdin.isTTY && process.stdout.isTTY) { + const menuResult = await runInteractiveAuthMenu({ allowExit: true }); + if (menuResult === "exit") { + return { + url: "about:blank", + method: "code" as const, + instructions: "Login cancelled.", + callback: async () => ({ type: "failed" as const }), + }; } } @@ -1026,153 +1010,18 @@ async fetch(input: Request | string | URL, init?: RequestInit): Promise - !new Set(["codex-status", "codex-switch-accounts", "codex-toggle-account", "codex-remove-account"]).has( - toolName, - ), - ); - if (!cfg.experimental.primary_tools.includes("codex-auth")) { - cfg.experimental.primary_tools.push("codex-auth"); + if (cfg.command && typeof cfg.command === "object") { + for (const key of LEGACY_CODEX_COMMAND_KEYS) { + if (key in cfg.command) delete cfg.command[key]; + } + } + if (cfg.experimental?.primary_tools) { + cfg.experimental.primary_tools = cfg.experimental.primary_tools.filter( + (toolName) => !LEGACY_CODEX_COMMAND_KEYS.has(toolName), + ); } }, - tool: { - "codex-auth": tool({ - description: "Open the interactive Codex auth menu.", - args: {}, - async execute() { - if (!process.stdin.isTTY || !process.stdout.isTTY) { - return "Interactive auth menu requires a TTY. Run `opencode auth login`."; - } - const result = await runInteractiveAuthMenu({ allowExit: true }); - if (result === "add") { - return "Add accounts with `opencode auth login`."; - } - return "Done."; - }, - }), - "codex-status": tool({ - description: "List all configured OpenAI Codex accounts and their current rate limits.", - args: {}, - async execute() { - configureStorageForCurrentCwd(); - const accountManager = await AccountManager.loadFromDisk(); - const accounts = accountManager.getAccountsSnapshot(); - const { scope, storagePath } = getStorageScope(); - if (accounts.length === 0) return [`OpenAI Codex Status`, ``, ` Scope: ${scope}`, ` Accounts: 0`, ``, `Add accounts:`, ` opencode auth login`, ``, `Storage: ${storagePath}`].join("\n"); - - await Promise.all(accounts.map(async (acc, index) => { - if (acc.enabled === false) return; - const live = accountManager.getAccountByIndex(index); - if (!live) return; - - try { - const auth = accountManager.toAuthDetails(live); - if (auth.access && auth.expires > Date.now()) { - await codexStatus.fetchFromBackend(live, auth.access); - } - } catch { - } - })); - - const enabledCount = accounts.filter(a => a.enabled !== false).length; - const activeIndex = accountManager.getActiveIndexForFamily(DEFAULT_MODEL_FAMILY); - const snapshots = await codexStatus.getAllSnapshots(); - - const lines: string[] = [ - `OpenAI Codex Status`, - ``, - ` Scope: ${scope}`, - ` Accounts: ${enabledCount}/${accounts.length} enabled`, - ``, - ...renderObsidianDashboard(accounts, activeIndex, snapshots) - ]; - - lines.push(``); - lines.push(`Storage: ${storagePath}`); - return lines.join("\n"); - }, - }), - "codex-switch-accounts": tool({ - description: "Switch active OpenAI account by index (1-based).", - args: { index: tool.schema.number().describe("Account number (1-based)") }, - async execute({ index }) { - configureStorageForCurrentCwd(); - const storage = await loadAccounts(); - if (!storage || storage.accounts.length === 0) return "No OpenAI accounts configured."; - const targetIndex = Math.floor((index ?? 0) - 1); - if (targetIndex < 0 || targetIndex >= storage.accounts.length) return `Invalid account number: ${index}. Valid range: 1-${storage.accounts.length}`; - storage.activeIndex = targetIndex; - storage.activeIndexByFamily = storage.activeIndexByFamily ?? {}; - for (const family of MODEL_FAMILIES) storage.activeIndexByFamily[family] = targetIndex; - await saveAccounts(storage, { preserveRefreshTokens: true }); - if (cachedAccountManager) { cachedAccountManager.setActiveIndex(targetIndex); await cachedAccountManager.saveToDisk(); } - return `Switched to ${formatAccountLabel(storage.accounts[targetIndex], targetIndex)}`; - }, - }), - "codex-toggle-account": tool({ - description: "Enable or disable an OpenAI account by index (1-based).", - args: { index: tool.schema.number().describe("Account number (1-based)") }, - async execute({ index }) { - configureStorageForCurrentCwd(); - const storage = await loadAccounts(); - if (!storage || storage.accounts.length === 0) return "No OpenAI accounts configured."; - const targetIndex = Math.floor((index ?? 0) - 1); - if (targetIndex < 0 || targetIndex >= storage.accounts.length) return `Invalid account number: ${index}. Valid range: 1-${storage.accounts.length}`; - const updated = toggleAccountEnabled(storage, targetIndex); - if (!updated) return `Failed to toggle account number: ${index}`; - await saveAccounts(updated, { preserveRefreshTokens: true }); - if (cachedAccountManager) { - const live = cachedAccountManager.getAccountByIndex(targetIndex); - if (live) { live.enabled = updated.accounts[targetIndex]?.enabled !== false; await cachedAccountManager.saveToDisk(); } - } - return `${updated.accounts[targetIndex]?.enabled !== false ? "Enabled" : "Disabled"} ${formatAccountLabel(updated.accounts[targetIndex], targetIndex)}`; - }, - }), - "codex-remove-account": tool({ - description: "Remove an OpenAI account by index (1-based). This is permanent.", - args: { - index: tool.schema.number().describe("Account number (1-based)"), - confirm: tool.schema.boolean().optional().describe("Confirm removal (required)"), - }, - async execute({ index, confirm }) { - if (!confirm) { - return "To remove account, call with confirm: true"; - } - configureStorageForCurrentCwd(); - const accountManager = cachedAccountManager ?? await AccountManager.loadFromDisk(); - const snapshot = accountManager.getAccountsSnapshot(); - if (snapshot.length === 0) return "No OpenAI accounts configured."; - - const targetIndex = Math.floor((index ?? 0) - 1); - if (targetIndex < 0 || targetIndex >= snapshot.length) { - return `Invalid account number: ${index}.`; - } - const account = accountManager.getAccountByIndex(targetIndex); - if (!account) return `Invalid account number: ${index}.`; - - const label = formatAccountLabel(account, targetIndex); - const success = await accountManager.removeAccountByIndex(targetIndex); - - if (!success) return `Failed to remove account ${index}.`; - - return `Removed ${label}.`; - }, - }), - }, + tool: {}, }; }; diff --git a/lib/catalog-defaults.ts b/lib/catalog-defaults.ts index ef2beb5..6d887c7 100644 --- a/lib/catalog-defaults.ts +++ b/lib/catalog-defaults.ts @@ -5,32 +5,32 @@ import { getOpencodeCacheDir } from "./paths.js"; import { logWarn } from "./logger.js"; type ModelConfig = Record & { - name?: string; - limit?: { context?: number; output?: number }; - modalities?: { input?: string[]; output?: string[] }; - description?: string; - visibility?: string; - priority?: number; - supportedInApi?: boolean; - minimalClientVersion?: string; + name?: string; + limit?: { context?: number; output?: number }; + modalities?: { input?: string[]; output?: string[] }; + description?: string; + visibility?: string; + priority?: number; + supportedInApi?: boolean; + minimalClientVersion?: string; }; type ModelsCache = { - models?: Array; + models?: Array; }; type CatalogModel = { - slug?: string; - display_name?: string; - description?: string; - visibility?: string; - priority?: number; - supported_in_api?: boolean; - minimal_client_version?: string; - context_window?: number; - truncation_policy?: { mode?: string; limit?: number }; - input_modalities?: string[]; - output_modalities?: string[]; + slug?: string; + display_name?: string; + description?: string; + visibility?: string; + priority?: number; + supported_in_api?: boolean; + minimal_client_version?: string; + context_window?: number; + truncation_policy?: { mode?: string; limit?: number }; + input_modalities?: string[]; + output_modalities?: string[]; }; const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i; @@ -40,265 +40,289 @@ const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); function isObjectRecord(value: unknown): value is Record { - return typeof value === "object" && value !== null && !Array.isArray(value); + return typeof value === "object" && value !== null && !Array.isArray(value); } function normalizeBaseId(modelId: string): string { - return modelId.toLowerCase().trim().replace(EFFORT_SUFFIX_REGEX, ""); + return modelId.toLowerCase().trim().replace(EFFORT_SUFFIX_REGEX, ""); } function resolveStaticTemplateFiles(moduleDir: string = __dirname): string[] { - const candidateDirs = [ - join(moduleDir, "..", "config"), - join(moduleDir, "..", "..", "config"), - join(moduleDir, "..", "..", "..", "config"), - ]; - const files: string[] = []; - const seen = new Set(); + const candidateDirs = [ + join(moduleDir, "..", "config"), + join(moduleDir, "..", "..", "config"), + join(moduleDir, "..", "..", "..", "config"), + ]; + const files: string[] = []; + const seen = new Set(); - for (const configDir of candidateDirs) { - for (const fileName of STATIC_TEMPLATE_FILES) { - const filePath = join(configDir, fileName); - if (seen.has(filePath)) continue; - seen.add(filePath); - files.push(filePath); - } - } + for (const configDir of candidateDirs) { + for (const fileName of STATIC_TEMPLATE_FILES) { + const filePath = join(configDir, fileName); + if (seen.has(filePath)) continue; + seen.add(filePath); + files.push(filePath); + } + } - return files; + return files; } -function readStaticTemplateModels(moduleDir: string = __dirname): Map { - const models = new Map(); - const templateFiles = resolveStaticTemplateFiles(moduleDir); +function readStaticTemplateModels( + moduleDir: string = __dirname, +): Map { + const models = new Map(); + const templateFiles = resolveStaticTemplateFiles(moduleDir); - for (const filePath of templateFiles) { - try { - if (!existsSync(filePath)) continue; - const parsed = JSON.parse(readFileSync(filePath, "utf8")) as { - provider?: { openai?: { models?: Record } }; - }; - const templateModels = parsed.provider?.openai?.models ?? {}; - for (const [modelId, config] of Object.entries(templateModels)) { + for (const filePath of templateFiles) { + try { + if (!existsSync(filePath)) continue; + const parsed = JSON.parse(readFileSync(filePath, "utf8")) as { + provider?: { openai?: { models?: Record } }; + }; + const providerOpenAI = parsed.provider?.openai; + const templateModels = providerOpenAI?.models ?? {}; + for (const [modelId, config] of Object.entries(templateModels)) { + if (!isObjectRecord(config)) continue; + const baseId = normalizeBaseId(modelId); + if (models.has(baseId)) continue; + models.set(baseId, JSON.parse(JSON.stringify(config)) as ModelConfig); + } + if (providerOpenAI && isObjectRecord(providerOpenAI)) { + for (const [modelId, config] of Object.entries(providerOpenAI)) { + if (modelId === "models" || modelId === "options") continue; if (!isObjectRecord(config)) continue; const baseId = normalizeBaseId(modelId); if (models.has(baseId)) continue; models.set(baseId, JSON.parse(JSON.stringify(config)) as ModelConfig); } - } catch (error) { - logWarn(`Failed to parse static template file: ${filePath}`, error); } - } + } catch (error) { + logWarn(`Failed to parse static template file: ${filePath}`, error); + } + } - return models; + return models; } function readCachedCatalogSlugs(cacheFile: string): string[] { - try { - if (!existsSync(cacheFile)) return []; - const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; - const slugs = parsed.models?.map((model) => model.slug).filter(Boolean) ?? []; - return Array.from( - new Set(slugs.map((slug) => normalizeBaseId(slug as string))), - ); - } catch (error) { - logWarn("Failed to read codex model cache", error); - return []; - } + try { + if (!existsSync(cacheFile)) return []; + const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; + const slugs = + parsed.models?.map((model) => model.slug).filter(Boolean) ?? []; + return Array.from( + new Set(slugs.map((slug) => normalizeBaseId(slug as string))), + ); + } catch (error) { + logWarn("Failed to read codex model cache", error); + return []; + } } function readCachedCatalogModels(cacheFile: string): CatalogModel[] { - try { - if (!existsSync(cacheFile)) return []; - const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; - return parsed.models?.filter((model) => model?.slug) ?? []; - } catch (error) { - logWarn("Failed to read codex model cache", error); - return []; - } + try { + if (!existsSync(cacheFile)) return []; + const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; + return parsed.models?.filter((model) => model?.slug) ?? []; + } catch (error) { + logWarn("Failed to read codex model cache", error); + return []; + } } /** * Find the appropriate template ID for a model slug. - * + * * Rules: * - Codex models (contain "-codex") → fall back to codex templates * - Non-codex GPT models → fall back to non-codex templates * - Never mix: don't apply codex defaults to non-codex models */ -function pickTemplateId(baseId: string, defaults: Map): string | null { - // Direct match first - if (defaults.has(baseId)) return baseId; - - const isCodexModel = baseId.includes("-codex"); - - if (isCodexModel) { - // Codex model fallbacks (most specific to least specific) - if (baseId.includes("-codex-max") && defaults.has("gpt-5.1-codex-max")) { - return "gpt-5.1-codex-max"; - } - if (baseId.includes("-codex-mini") && defaults.has("gpt-5.1-codex-mini")) { - return "gpt-5.1-codex-mini"; - } - // Generic codex fallback - newest available - if (defaults.has("gpt-5.3-codex")) return "gpt-5.3-codex"; - if (defaults.has("gpt-5.2-codex")) return "gpt-5.2-codex"; - if (defaults.has("gpt-5.1-codex")) return "gpt-5.1-codex"; - } else if (baseId.startsWith("gpt-5.")) { - // Non-codex GPT model fallbacks (e.g., gpt-5.2-pro, gpt-5.3) - if (defaults.has("gpt-5.2")) return "gpt-5.2"; - if (defaults.has("gpt-5.1")) return "gpt-5.1"; - } - - return null; +function pickTemplateId( + baseId: string, + defaults: Map, +): string | null { + // Direct match first + if (defaults.has(baseId)) return baseId; + + const isCodexModel = baseId.includes("-codex"); + + if (isCodexModel) { + // Codex model fallbacks (most specific to least specific) + if (baseId.includes("-codex-max") && defaults.has("gpt-5.1-codex-max")) { + return "gpt-5.1-codex-max"; + } + if (baseId.includes("-codex-mini") && defaults.has("gpt-5.1-codex-mini")) { + return "gpt-5.1-codex-mini"; + } + // Generic codex fallback - newest available + if (defaults.has("gpt-5.3-codex")) return "gpt-5.3-codex"; + if (defaults.has("gpt-5.2-codex")) return "gpt-5.2-codex"; + if (defaults.has("gpt-5.1-codex")) return "gpt-5.1-codex"; + } else if (baseId.startsWith("gpt-5.")) { + // Non-codex GPT model fallbacks (e.g., gpt-5.2-pro, gpt-5.3) + if (defaults.has("gpt-5.2")) return "gpt-5.2"; + if (defaults.has("gpt-5.1")) return "gpt-5.1"; + } + + return null; } function formatModelDisplayName(baseId: string): string { - const parts = baseId.split("-").filter(Boolean); - if (parts.length === 0) return `${baseId} (Codex)`; - let label = ""; - if (parts[0] === "gpt" && parts[1]) { - label = `GPT ${parts[1]}`; - for (const part of parts.slice(2)) { - label += ` ${part.charAt(0).toUpperCase()}${part.slice(1)}`; - } - } else { - label = parts - .map((part) => `${part.charAt(0).toUpperCase()}${part.slice(1)}`) - .join(" "); - } - return `${label} (Codex)`; + const parts = baseId.split("-").filter(Boolean); + if (parts.length === 0) return `${baseId} (Codex)`; + let label = ""; + if (parts[0] === "gpt" && parts[1]) { + label = `GPT ${parts[1]}`; + for (const part of parts.slice(2)) { + label += ` ${part.charAt(0).toUpperCase()}${part.slice(1)}`; + } + } else { + label = parts + .map((part) => `${part.charAt(0).toUpperCase()}${part.slice(1)}`) + .join(" "); + } + return `${label} (Codex)`; } function applyCatalogMetadata( - config: ModelConfig, - model: CatalogModel, + config: ModelConfig, + model: CatalogModel, ): ModelConfig { - const next: ModelConfig = { ...config }; - if (typeof model.display_name === "string" && model.display_name.trim()) { - next.name = model.display_name.trim(); - } - if (Number.isFinite(model.context_window)) { - next.limit = { - ...next.limit, - context: model.context_window, - }; - } - const truncationLimit = model.truncation_policy?.limit; - const truncationMode = model.truncation_policy?.mode; - if (Number.isFinite(truncationLimit) && truncationMode === "tokens") { - next.limit = { - ...next.limit, - output: truncationLimit, - }; - } - if (Array.isArray(model.input_modalities) && model.input_modalities.length > 0) { - next.modalities = { - ...next.modalities, - input: [...model.input_modalities], - }; - } - if (Array.isArray(model.output_modalities) && model.output_modalities.length > 0) { - next.modalities = { - ...next.modalities, - output: [...model.output_modalities], - }; - } - if (typeof model.description === "string" && model.description.trim()) { - next.description = model.description.trim(); - } - if (typeof model.visibility === "string" && model.visibility.trim()) { - next.visibility = model.visibility.trim(); - } - if (typeof model.priority === "number" && Number.isFinite(model.priority)) { - next.priority = model.priority; - } - if (typeof model.supported_in_api === "boolean") { - next.supportedInApi = model.supported_in_api; - } - if ( - typeof model.minimal_client_version === "string" && - model.minimal_client_version.trim() - ) { - next.minimalClientVersion = model.minimal_client_version.trim(); - } - return next; + const next: ModelConfig = { ...config }; + if (typeof model.display_name === "string" && model.display_name.trim()) { + next.name = model.display_name.trim(); + } + if (Number.isFinite(model.context_window)) { + next.limit = { + ...next.limit, + context: model.context_window, + }; + } + const truncationLimit = model.truncation_policy?.limit; + const truncationMode = model.truncation_policy?.mode; + if (Number.isFinite(truncationLimit) && truncationMode === "tokens") { + next.limit = { + ...next.limit, + output: truncationLimit, + }; + } + if ( + Array.isArray(model.input_modalities) && + model.input_modalities.length > 0 + ) { + next.modalities = { + ...next.modalities, + input: [...model.input_modalities], + }; + } + if ( + Array.isArray(model.output_modalities) && + model.output_modalities.length > 0 + ) { + next.modalities = { + ...next.modalities, + output: [...model.output_modalities], + }; + } + if (typeof model.description === "string" && model.description.trim()) { + next.description = model.description.trim(); + } + if (typeof model.visibility === "string" && model.visibility.trim()) { + next.visibility = model.visibility.trim(); + } + if (typeof model.priority === "number" && Number.isFinite(model.priority)) { + next.priority = model.priority; + } + if (typeof model.supported_in_api === "boolean") { + next.supportedInApi = model.supported_in_api; + } + if ( + typeof model.minimal_client_version === "string" && + model.minimal_client_version.trim() + ) { + next.minimalClientVersion = model.minimal_client_version.trim(); + } + return next; } export function buildInternalModelDefaults(options?: { - cacheFile?: string; - moduleDir?: string; + cacheFile?: string; + moduleDir?: string; }): Record { - const moduleDir = options?.moduleDir ?? __dirname; - const defaults = readStaticTemplateModels(moduleDir); - const cacheFile = options?.cacheFile ?? join(getOpencodeCacheDir(), "codex-models-cache.json"); - const catalogModels = readCachedCatalogModels(cacheFile); - const catalogSlugs = readCachedCatalogSlugs(cacheFile); + const moduleDir = options?.moduleDir ?? __dirname; + const defaults = readStaticTemplateModels(moduleDir); + const cacheFile = + options?.cacheFile ?? + join(getOpencodeCacheDir(), "codex-models-cache.json"); + const catalogModels = readCachedCatalogModels(cacheFile); + const catalogSlugs = readCachedCatalogSlugs(cacheFile); - for (const slug of catalogSlugs) { - if (!defaults.has(slug)) { - const templateId = pickTemplateId(slug, defaults); - if (!templateId) continue; - const template = defaults.get(templateId); - if (!template) continue; - const cloned = JSON.parse(JSON.stringify(template)) as ModelConfig; - cloned.name = formatModelDisplayName(slug); - defaults.set(slug, cloned); - } - } + for (const slug of catalogSlugs) { + if (!defaults.has(slug)) { + const templateId = pickTemplateId(slug, defaults); + if (!templateId) continue; + const template = defaults.get(templateId); + if (!template) continue; + const cloned = JSON.parse(JSON.stringify(template)) as ModelConfig; + cloned.name = formatModelDisplayName(slug); + defaults.set(slug, cloned); + } + } - for (const model of catalogModels) { - const slug = model.slug ? normalizeBaseId(model.slug) : undefined; - if (!slug) continue; - const existing = defaults.get(slug); - if (!existing) continue; - const updated = applyCatalogMetadata(existing, model); - if (!updated.name) { - updated.name = formatModelDisplayName(slug); - } - defaults.set(slug, updated); - } + for (const model of catalogModels) { + const slug = model.slug ? normalizeBaseId(model.slug) : undefined; + if (!slug) continue; + const existing = defaults.get(slug); + if (!existing) continue; + const updated = applyCatalogMetadata(existing, model); + if (!updated.name) { + updated.name = formatModelDisplayName(slug); + } + defaults.set(slug, updated); + } - return Object.fromEntries(defaults); + return Object.fromEntries(defaults); } export function mergeModelDefaults( - userModels: unknown, - defaults: Record, + userModels: unknown, + defaults: Record, ): Record { - const merged: Record = { ...defaults }; - if (!isObjectRecord(userModels)) return merged; - for (const [modelId, override] of Object.entries(userModels)) { - const base = isObjectRecord(merged[modelId]) ? merged[modelId] : {}; - if (!isObjectRecord(override)) { - merged[modelId] = override as ModelConfig; - continue; - } - const next: ModelConfig = { ...base, ...override }; - if (isObjectRecord(base.limit) || isObjectRecord(override.limit)) { - next.limit = { - ...(base.limit as Record | undefined), - ...(override.limit as Record | undefined), - } as ModelConfig["limit"]; - } - if (isObjectRecord(base.options) || isObjectRecord(override.options)) { - next.options = { - ...(base.options as Record | undefined), - ...(override.options as Record | undefined), - }; - } - if (isObjectRecord(base.variants) || isObjectRecord(override.variants)) { - next.variants = { - ...(base.variants as Record | undefined), - ...(override.variants as Record | undefined), - }; - } - merged[modelId] = next; - } - return merged; + const merged: Record = { ...defaults }; + if (!isObjectRecord(userModels)) return merged; + for (const [modelId, override] of Object.entries(userModels)) { + const base = isObjectRecord(merged[modelId]) ? merged[modelId] : {}; + if (!isObjectRecord(override)) { + merged[modelId] = override as ModelConfig; + continue; + } + const next: ModelConfig = { ...base, ...override }; + if (isObjectRecord(base.limit) || isObjectRecord(override.limit)) { + next.limit = { + ...(base.limit as Record | undefined), + ...(override.limit as Record | undefined), + } as ModelConfig["limit"]; + } + if (isObjectRecord(base.options) || isObjectRecord(override.options)) { + next.options = { + ...(base.options as Record | undefined), + ...(override.options as Record | undefined), + }; + } + if (isObjectRecord(base.variants) || isObjectRecord(override.variants)) { + next.variants = { + ...(base.variants as Record | undefined), + ...(override.variants as Record | undefined), + }; + } + merged[modelId] = next; + } + return merged; } export const __internal = { - readStaticTemplateModels, - resolveStaticTemplateFiles, + readStaticTemplateModels, + resolveStaticTemplateFiles, }; diff --git a/lib/ui/auth-menu-flow.ts b/lib/ui/auth-menu-flow.ts deleted file mode 100644 index e618475..0000000 --- a/lib/ui/auth-menu-flow.ts +++ /dev/null @@ -1,61 +0,0 @@ -import type { AuthMenuAction, AuthMenuAccount, AccountAction } from "./auth-menu.js"; -import { buildAccountActionItems, buildAuthMenuItems, buildAccountSelectItems } from "./auth-menu.js"; -import { runSelect } from "./tty/select.js"; - -type SelectContext = { - input?: NodeJS.ReadStream; - output?: NodeJS.WriteStream; -}; - -export async function chooseAuthMenuAction( - args: SelectContext & { - accounts: AuthMenuAccount[]; - now?: number; - }, -): Promise { - const items = buildAuthMenuItems(args.accounts, args.now); - const selected = await runSelect({ - title: "Manage accounts", - subtitle: "Select account", - items, - input: args.input, - output: args.output, - useColor: false, - }); - return selected?.value ?? null; -} - -export async function chooseAccountAction( - args: SelectContext & { - account: AuthMenuAccount; - }, -): Promise { - const items = buildAccountActionItems(args.account); - const selected = await runSelect({ - title: "Account options", - subtitle: "Select action", - items, - input: args.input, - output: args.output, - useColor: false, - }); - return selected?.value ?? null; -} - -export async function chooseAccountFromList( - args: SelectContext & { - accounts: AuthMenuAccount[]; - now?: number; - }, -): Promise { - const items = buildAccountSelectItems(args.accounts, args.now); - const selected = await runSelect({ - title: "Manage accounts", - subtitle: "Select account", - items, - input: args.input, - output: args.output, - useColor: false, - }); - return selected?.value ?? null; -} diff --git a/lib/ui/auth-menu-runner.ts b/lib/ui/auth-menu-runner.ts index 98539c0..1cfe24c 100644 --- a/lib/ui/auth-menu-runner.ts +++ b/lib/ui/auth-menu-runner.ts @@ -1,36 +1,31 @@ -import type { AuthMenuAccount } from "./auth-menu.js"; -import { chooseAccountAction, chooseAccountFromList, chooseAuthMenuAction } from "./auth-menu-flow.js"; -import { runConfirm } from "./tty/confirm.js"; +import type { AccountInfo } from "./auth-menu.js"; +import { showAccountDetails, showAuthMenu, selectAccount } from "./auth-menu.js"; export type AuthMenuHandlers = { onCheckQuotas: () => Promise; onConfigureModels: () => Promise; onDeleteAll: () => Promise; - onToggleAccount: (account: AuthMenuAccount) => Promise; - onRefreshAccount: (account: AuthMenuAccount) => Promise; - onDeleteAccount: (account: AuthMenuAccount) => Promise; + onToggleAccount: (account: AccountInfo) => Promise; + onRefreshAccount: (account: AccountInfo) => Promise; + onDeleteAccount: (account: AccountInfo) => Promise; }; export type AuthMenuResult = "add" | "continue" | "exit"; export async function runAuthMenuOnce(args: { - accounts: AuthMenuAccount[]; + accounts: AccountInfo[]; handlers: AuthMenuHandlers; input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; - now?: number; }): Promise { - const action = await chooseAuthMenuAction({ - accounts: args.accounts, + const action = await showAuthMenu(args.accounts, { input: args.input, output: args.output, - now: args.now, }); - if (!action) return "exit"; - + if (action.type === "cancel") return "exit"; if (action.type === "add") return "add"; - if (action.type === "check-quotas") { + if (action.type === "check") { await args.handlers.onCheckQuotas(); return "continue"; } @@ -39,36 +34,24 @@ export async function runAuthMenuOnce(args: { return "continue"; } if (action.type === "delete-all") { - const confirm = await runConfirm({ - title: "Delete accounts", - message: "Delete all accounts?", - input: args.input, - output: args.output, - useColor: false, - }); - if (confirm) { - await args.handlers.onDeleteAll(); - } + await args.handlers.onDeleteAll(); return "continue"; } const account = action.type === "select-account" ? action.account - : await chooseAccountFromList({ - accounts: args.accounts, + : await selectAccount(args.accounts, { input: args.input, output: args.output, - now: args.now, }); if (!account) return "continue"; - const accountAction = await chooseAccountAction({ - account, + const accountAction = await showAccountDetails(account, { input: args.input, output: args.output, }); - if (!accountAction || accountAction === "back") return "continue"; + if (accountAction === "toggle") { await args.handlers.onToggleAccount(account); return "continue"; @@ -80,16 +63,7 @@ export async function runAuthMenuOnce(args: { return "continue"; } if (accountAction === "delete") { - const confirm = await runConfirm({ - title: "Delete account", - message: `Delete ${account.email ?? "this account"}?`, - input: args.input, - output: args.output, - useColor: false, - }); - if (confirm) { - await args.handlers.onDeleteAccount(account); - } + await args.handlers.onDeleteAccount(account); return "continue"; } diff --git a/lib/ui/auth-menu.ts b/lib/ui/auth-menu.ts index 27d27d0..daf8136 100644 --- a/lib/ui/auth-menu.ts +++ b/lib/ui/auth-menu.ts @@ -1,122 +1,227 @@ -import { formatAccountLabel } from "../accounts.js"; -import type { RateLimitStateV3 } from "../types.js"; -import type { SelectItem } from "./tty/select.js"; +import { ANSI, shouldUseColor } from "./tty/ansi.js"; +import { confirm } from "./tty/confirm.js"; +import { select, type MenuItem } from "./tty/select.js"; + +export type AccountStatus = "active" | "rate-limited" | "expired" | "unknown"; + +export interface AccountInfo { + email?: string; + plan?: string; + accountId?: string; + index: number; + addedAt?: number; + lastUsed?: number; + status?: AccountStatus; + isCurrentAccount?: boolean; + enabled?: boolean; +} export type AuthMenuAction = | { type: "add" } - | { type: "check-quotas" } + | { type: "select-account"; account: AccountInfo } + | { type: "delete-all" } + | { type: "check" } | { type: "manage" } | { type: "configure-models" } - | { type: "select-account"; account: AuthMenuAccount } - | { type: "delete-all" }; + | { type: "cancel" }; -export type AccountAction = "back" | "toggle" | "refresh" | "delete"; +export type AccountAction = "back" | "delete" | "refresh" | "toggle" | "cancel"; -export type AuthMenuAccount = { - index: number; - email?: string; - plan?: string; - accountId?: string; - enabled?: boolean; - lastUsed?: number; - rateLimitResetTimes?: RateLimitStateV3; - coolingDownUntil?: number; - cooldownReason?: "auth-failure"; - isActive?: boolean; -}; - -export function formatLastUsedHint(lastUsed: number | undefined, now = Date.now()): string { - if (!lastUsed || !Number.isFinite(lastUsed) || lastUsed <= 0) return ""; - const diff = Math.max(0, now - lastUsed); - const dayMs = 24 * 60 * 60 * 1000; - if (diff < dayMs) return "used today"; - if (diff < 2 * dayMs) return "used yesterday"; - const days = Math.floor(diff / dayMs); - return `used ${days}d ago`; +export function formatRelativeTime(timestamp: number | undefined, now = Date.now()): string { + if (!timestamp) return "never"; + const days = Math.floor((now - timestamp) / 86_400_000); + if (days <= 0) return "today"; + if (days === 1) return "yesterday"; + if (days < 7) return `${days}d ago`; + if (days < 30) return `${Math.floor(days / 7)}w ago`; + return new Date(timestamp).toLocaleDateString(); +} + +function formatDate(timestamp: number | undefined): string { + if (!timestamp) return "unknown"; + return new Date(timestamp).toLocaleDateString(); } -function isRateLimited(rateLimitResetTimes: RateLimitStateV3 | undefined, now: number): boolean { - if (!rateLimitResetTimes) return false; - return Object.values(rateLimitResetTimes).some((resetAt) => - typeof resetAt === "number" && Number.isFinite(resetAt) && resetAt > now, - ); +function colorize(text: string, color: string, useColor: boolean): string { + return useColor ? `${color}${text}${ANSI.reset}` : text; } -export function getAccountBadge(account: AuthMenuAccount, now = Date.now()): string { - if (account.enabled === false) return "[disabled]"; - if (isRateLimited(account.rateLimitResetTimes, now)) return "[rate-limited]"; - if (account.isActive) return "[active]"; - return ""; +function getStatusBadge(status: AccountStatus | undefined, useColor: boolean): string { + switch (status) { + case "rate-limited": + return colorize("[rate-limited]", ANSI.yellow, useColor); + case "expired": + return colorize("[expired]", ANSI.red, useColor); + default: + return ""; + } +} + +export function formatStatusBadges( + account: Pick, + useColor = shouldUseColor(), +): string { + const badges: string[] = []; + if (account.enabled === false) { + badges.push(colorize("[disabled]", ANSI.red, useColor)); + } else { + badges.push(colorize("[enabled]", ANSI.green, useColor)); + } + const statusBadge = getStatusBadge(account.status, useColor); + if (statusBadge) badges.push(statusBadge); + if (account.isCurrentAccount) { + badges.push(colorize("[last active]", ANSI.cyan, useColor)); + } + return badges.join(" "); +} + +function buildAccountLabel(account: AccountInfo, useColor: boolean): string { + const baseLabel = account.email || `Account ${account.index + 1}`; + const badges = formatStatusBadges(account, useColor); + return badges ? `${baseLabel} ${badges}` : baseLabel; } export function buildAuthMenuItems( - accounts: AuthMenuAccount[], - now = Date.now(), -): Array> { - const items: Array> = [ + accounts: AccountInfo[], + useColor = shouldUseColor(), +): MenuItem[] { + const items: MenuItem[] = [ { label: "Add new account", value: { type: "add" } }, - { label: "Check quotas", value: { type: "check-quotas" } }, + { label: "Check quotas", value: { type: "check" } }, { label: "Manage accounts (enable/disable)", value: { type: "manage" } }, { label: "Configure models in opencode.json", value: { type: "configure-models" } }, - ]; - - for (const account of accounts) { - const baseLabel = formatAccountLabel( - { email: account.email, plan: account.plan, accountId: account.accountId }, - account.index, - ); - const badge = getAccountBadge(account, now); - const label = badge ? `${baseLabel} ${badge}` : baseLabel; - const hint = formatLastUsedHint(account.lastUsed, now); - items.push({ - label, - hint: hint || undefined, - value: { type: "select-account", account }, - }); - } + ...accounts.map((account) => { + const label = buildAccountLabel(account, useColor); + return { + label, + hint: account.lastUsed ? `used ${formatRelativeTime(account.lastUsed)}` : "", + value: { type: "select-account" as const, account }, + }; + }), + ]; if (accounts.length > 0) { - items.push({ label: "Delete all accounts", value: { type: "delete-all" } }); + items.push({ label: "Delete all accounts", value: { type: "delete-all" }, color: "red" }); } return items; } export function buildAccountActionItems( - account: AuthMenuAccount, -): Array> { - const items: Array> = [ + account: AccountInfo, +): MenuItem[] { + return [ { label: "Back", value: "back" }, { label: account.enabled === false ? "Enable account" : "Disable account", value: "toggle", + color: account.enabled === false ? "green" : "yellow", + }, + { + label: "Refresh token", + value: "refresh", + color: "cyan", + disabled: account.enabled === false, }, + { label: "Delete this account", value: "delete", color: "red" }, ]; - - if (account.enabled !== false) { - items.push({ label: "Refresh token", value: "refresh" }); - } - - items.push({ label: "Delete this account", value: "delete" }); - return items; } export function buildAccountSelectItems( - accounts: AuthMenuAccount[], - now = Date.now(), -): Array> { - return accounts.map((account) => { - const baseLabel = formatAccountLabel( - { email: account.email, plan: account.plan, accountId: account.accountId }, - account.index, - ); - const badge = getAccountBadge(account, now); - const label = badge ? `${baseLabel} ${badge}` : baseLabel; - const hint = formatLastUsedHint(account.lastUsed, now); - return { - label, - hint: hint || undefined, - value: account, - }; + accounts: AccountInfo[], + useColor = shouldUseColor(), +): MenuItem[] { + return accounts.map((account) => ({ + label: buildAccountLabel(account, useColor), + hint: account.lastUsed ? `used ${formatRelativeTime(account.lastUsed)}` : "", + value: account, + })); +} + +export async function selectAccount( + accounts: AccountInfo[], + options: { input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; useColor?: boolean } = {}, +): Promise { + const useColor = options.useColor ?? shouldUseColor(); + const items = buildAccountSelectItems(accounts, useColor); + const result = await select(items, { + message: "Manage accounts", + subtitle: "Select account", + input: options.input, + output: options.output, + useColor, }); + return result ?? null; +} + +export async function showAuthMenu( + accounts: AccountInfo[], + options: { input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; useColor?: boolean } = {}, +): Promise { + const useColor = options.useColor ?? shouldUseColor(); + const items = buildAuthMenuItems(accounts, useColor); + + while (true) { + const result = await select(items, { + message: "Manage accounts", + subtitle: "Select account", + input: options.input, + output: options.output, + useColor, + }); + + if (!result) return { type: "cancel" }; + if (result.type === "delete-all") { + const confirmed = await confirm( + "Delete ALL accounts? This cannot be undone.", + false, + options, + ); + if (!confirmed) continue; + } + + return result; + } +} + +export async function showAccountDetails( + account: AccountInfo, + options: { input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; useColor?: boolean } = {}, +): Promise { + const useColor = options.useColor ?? shouldUseColor(); + const output = options.output ?? process.stdout; + const label = account.email || `Account ${account.index + 1}`; + const badges = formatStatusBadges(account, useColor); + + const bold = useColor ? ANSI.bold : ""; + const dim = useColor ? ANSI.dim : ""; + const reset = useColor ? ANSI.reset : ""; + + output.write("\n"); + output.write(`${bold}Account: ${label}${badges ? ` ${badges}` : ""}${reset}\n`); + output.write(`${dim}Added: ${formatDate(account.addedAt)}${reset}\n`); + output.write(`${dim}Last used: ${formatRelativeTime(account.lastUsed)}${reset}\n`); + output.write("\n"); + + while (true) { + const result = await select(buildAccountActionItems(account), { + message: "Account options", + subtitle: "Select action", + input: options.input, + output: options.output, + useColor, + }); + + if (result === "delete") { + const confirmed = await confirm(`Delete ${label}?`, false, options); + if (!confirmed) continue; + } + + if (result === "refresh") { + const confirmed = await confirm(`Re-authenticate ${label}?`, false, options); + if (!confirmed) continue; + } + + return result ?? "cancel"; + } } diff --git a/lib/ui/tty/ansi.ts b/lib/ui/tty/ansi.ts new file mode 100644 index 0000000..708d4f2 --- /dev/null +++ b/lib/ui/tty/ansi.ts @@ -0,0 +1,43 @@ +export const ANSI = { + hide: "\x1b[?25l", + show: "\x1b[?25h", + up: (n = 1) => `\x1b[${n}A`, + down: (n = 1) => `\x1b[${n}B`, + clearLine: "\x1b[2K", + clearScreen: "\x1b[2J", + moveTo: (row: number, col: number) => `\x1b[${row};${col}H`, + + cyan: "\x1b[36m", + green: "\x1b[32m", + red: "\x1b[31m", + yellow: "\x1b[33m", + dim: "\x1b[2m", + bold: "\x1b[1m", + reset: "\x1b[0m", + inverse: "\x1b[7m", +} as const; + +export type KeyAction = "up" | "down" | "enter" | "escape" | "escape-start" | null; + +export function parseKey(data: Buffer): KeyAction { + const s = data.toString(); + if (s === "\x1b[A" || s === "\x1bOA") return "up"; + if (s === "\x1b[B" || s === "\x1bOB") return "down"; + if (s === "\r" || s === "\n") return "enter"; + if (s === "\x03") return "escape"; + if (s === "\x1b") return "escape-start"; + return null; +} + +export function isTTY( + input: NodeJS.ReadStream = process.stdin, + output: NodeJS.WriteStream = process.stdout, +): boolean { + return Boolean(input.isTTY && output.isTTY); +} + +export function shouldUseColor(): boolean { + const noColor = process.env.NO_COLOR; + if (noColor && noColor !== "0") return false; + return true; +} diff --git a/lib/ui/tty/confirm.ts b/lib/ui/tty/confirm.ts index f6be25c..d26041f 100644 --- a/lib/ui/tty/confirm.ts +++ b/lib/ui/tty/confirm.ts @@ -1,27 +1,31 @@ -import { runSelect } from "./select.js"; +import { select } from "./select.js"; -export type ConfirmArgs = { - title: string; - message: string; +export type ConfirmOptions = { input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; useColor?: boolean; }; -export async function runConfirm(args: ConfirmArgs): Promise { - const result = await runSelect({ - title: args.title, - subtitle: args.message, - items: [ - { label: "Yes", value: true }, - { label: "No", value: false }, - ], - input: args.input, - output: args.output, - initialIndex: 0, - useColor: args.useColor, - }); +export async function confirm( + message: string, + defaultYes = false, + options: ConfirmOptions = {}, +): Promise { + const items = defaultYes + ? [ + { label: "Yes", value: true }, + { label: "No", value: false }, + ] + : [ + { label: "No", value: false }, + { label: "Yes", value: true }, + ]; - if (!result) return null; - return Boolean(result.value); + const result = await select(items, { + message, + input: options.input, + output: options.output, + useColor: options.useColor, + }); + return result ?? false; } diff --git a/lib/ui/tty/select.ts b/lib/ui/tty/select.ts index c801d67..67aae54 100644 --- a/lib/ui/tty/select.ts +++ b/lib/ui/tty/select.ts @@ -1,155 +1,271 @@ -export type SelectKeyAction = "up" | "down" | "enter" | "cancel" | "unknown"; +import { ANSI, isTTY, parseKey, shouldUseColor } from "./ansi.js"; -export type SelectItem = { +export interface MenuItem { label: string; - value?: T; + value: T; hint?: string; -}; - -export type RenderSelectFrameArgs = { - title: string; - subtitle?: string; - items: Array>; - selectedIndex: number; - useColor?: boolean; -}; + disabled?: boolean; + separator?: boolean; + color?: "red" | "green" | "yellow" | "cyan"; +} -export type RunSelectArgs = { - title: string; +export interface SelectOptions { + message: string; subtitle?: string; - items: Array>; input?: NodeJS.ReadStream; output?: NodeJS.WriteStream; - initialIndex?: number; useColor?: boolean; -}; +} -const ANSI = { - reset: "\x1b[0m", - dim: "\x1b[2m", - cyan: "\x1b[36m", - green: "\x1b[32m", -}; +const ESCAPE_TIMEOUT_MS = 50; +const ANSI_PATTERN = /^\x1b\[[0-9;?]*[A-Za-z]/; -function colorize(text: string, code: string, useColor?: boolean): string { - return useColor ? `${code}${text}${ANSI.reset}` : text; +function getColorCode(color: MenuItem["color"]): string { + switch (color) { + case "red": + return ANSI.red; + case "green": + return ANSI.green; + case "yellow": + return ANSI.yellow; + case "cyan": + return ANSI.cyan; + default: + return ""; + } } -export function renderSelectFrame(args: RenderSelectFrameArgs): string[] { - const lines: string[] = []; - const top = colorize("+", ANSI.dim, args.useColor); - const pipe = colorize("|", ANSI.dim, args.useColor); - lines.push(`${top} ${args.title}`); - lines.push(pipe); - if (args.subtitle) { - lines.push(`${pipe} ${args.subtitle}`); - } - if (args.items.length > 0) { - lines.push(pipe); - } +function visibleLength(text: string): number { + return text.replace(/\x1b\[[0-9;?]*[A-Za-z]/g, "").length; +} - args.items.forEach((item, index) => { - const marker = index === args.selectedIndex ? colorize(">", ANSI.green, args.useColor) : " "; - const hint = item.hint ? ` ${item.hint}` : ""; - lines.push(`${pipe} ${marker} ${item.label}${hint}`); - }); +function truncateAnsi(text: string, maxVisible: number): string { + if (maxVisible <= 0) return ""; + if (visibleLength(text) <= maxVisible) return text; + const ellipsis = "…"; + const limit = Math.max(1, maxVisible - 1); + let visible = 0; + let i = 0; + let out = ""; + let hasAnsi = false; + + while (i < text.length && visible < limit) { + if (text[i] === "\x1b") { + const match = text.slice(i).match(ANSI_PATTERN); + if (match) { + out += match[0]; + i += match[0].length; + hasAnsi = true; + continue; + } + } + out += text[i]; + visible += 1; + i += 1; + } - lines.push(`${pipe} ^/v to select, Enter: confirm`); - lines.push(top); - return lines; + out += ellipsis; + if (hasAnsi) out += ANSI.reset; + return out; } -export function parseSelectKey(input: string): SelectKeyAction { - if (input === "\r" || input === "\n") return "enter"; - if (input === "\u001b") return "cancel"; - if (input === "\u0003") return "cancel"; - if (input === "\u001b[A" || input === "\u001bOA") return "up"; - if (input === "\u001b[B" || input === "\u001bOB") return "down"; +function formatItemLabel( + item: MenuItem, + selected: boolean, + useColor: boolean, + maxWidth: number, +): string { + const colorCode = useColor ? getColorCode(item.color) : ""; + let labelText: string; - if (input === "k" || input === "K") return "up"; - if (input === "j" || input === "J") return "down"; + if (item.disabled) { + labelText = useColor ? `${ANSI.dim}${item.label} (unavailable)${ANSI.reset}` : `${item.label} (unavailable)`; + } else if (selected) { + labelText = colorCode ? `${colorCode}${item.label}${ANSI.reset}` : item.label; + if (item.hint) { + labelText += useColor ? ` ${ANSI.dim}${item.hint}${ANSI.reset}` : ` ${item.hint}`; + } + } else { + if (useColor) { + labelText = colorCode + ? `${ANSI.dim}${colorCode}${item.label}${ANSI.reset}` + : `${ANSI.dim}${item.label}${ANSI.reset}`; + } else { + labelText = item.label; + } + if (item.hint) { + labelText += useColor ? ` ${ANSI.dim}${item.hint}${ANSI.reset}` : ` ${item.hint}`; + } + } - return "unknown"; + return truncateAnsi(labelText, maxWidth); } -export function moveSelectIndex(current: number, delta: number, size: number): number { - if (size <= 0) return 0; - const next = (current + delta) % size; - return next < 0 ? next + size : next; -} +export async function select( + items: MenuItem[], + options: SelectOptions, +): Promise { + const input = options.input ?? process.stdin; + const output = options.output ?? process.stdout; + const useColor = options.useColor ?? shouldUseColor(); -export async function runSelect(args: RunSelectArgs): Promise | null> { - const input = args.input ?? process.stdin; - const output = args.output ?? process.stdout; - if (!input.isTTY || !output.isTTY || args.items.length === 0) return null; + if (!isTTY(input, output)) return null; + if (items.length === 0) return null; - let selectedIndex = Math.min( - Math.max(args.initialIndex ?? 0, 0), - Math.max(args.items.length - 1, 0), - ); - let resolved = false; + const enabledItems = items.filter((item) => !item.disabled && !item.separator); + if (enabledItems.length === 0) return null; + if (enabledItems.length === 1) return enabledItems[0]!.value; - const render = () => { - const lines = renderSelectFrame({ - title: args.title, - subtitle: args.subtitle, - items: args.items, - selectedIndex, - useColor: args.useColor, - }); - output.write("\x1b[2J\x1b[H"); - output.write(lines.join("\n") + "\n"); + const { message, subtitle } = options; + let cursor = items.findIndex((item) => !item.disabled && !item.separator); + if (cursor === -1) cursor = 0; + let escapeTimeout: ReturnType | null = null; + let isCleanedUp = false; + let isFirstRender = true; + + const getTotalLines = (): number => { + const subtitleLines = subtitle ? 3 : 0; + return 1 + subtitleLines + items.length + 1 + 1; }; - const cleanup = () => { - if (resolved) return; - resolved = true; - input.off("data", onData); - input.pause(); - if (typeof input.setRawMode === "function") { - input.setRawMode(false); + const render = () => { + const totalLines = getTotalLines(); + const columns = output.columns ?? 80; + const contentWidth = Math.max(10, columns - 6); + + if (!isFirstRender) { + output.write(ANSI.up(totalLines) + "\r"); } - }; + isFirstRender = false; - const onData = (chunk: Buffer | string) => { - const text = typeof chunk === "string" ? chunk : chunk.toString("utf8"); - const action = parseSelectKey(text); - if (action === "up") { - selectedIndex = moveSelectIndex(selectedIndex, -1, args.items.length); - render(); - return; + output.write(`${ANSI.clearLine}${useColor ? ANSI.dim : ""}┌ ${useColor ? ANSI.reset : ""}${message}\n`); + + if (subtitle) { + output.write(`${ANSI.clearLine}${useColor ? ANSI.dim : ""}│${useColor ? ANSI.reset : ""}\n`); + output.write(`${ANSI.clearLine}${useColor ? ANSI.cyan : ""}◆${useColor ? ANSI.reset : ""} ${subtitle}\n`); + output.write(`${ANSI.clearLine}\n`); } - if (action === "down") { - selectedIndex = moveSelectIndex(selectedIndex, 1, args.items.length); - render(); - return; + + for (let i = 0; i < items.length; i += 1) { + const item = items[i]; + if (!item) continue; + + if (item.separator) { + output.write(`${ANSI.clearLine}${useColor ? ANSI.dim : ""}│${useColor ? ANSI.reset : ""}\n`); + continue; + } + + const isSelected = i === cursor; + const labelText = formatItemLabel(item, isSelected, useColor, contentWidth); + if (isSelected) { + output.write( + `${ANSI.clearLine}${useColor ? ANSI.cyan : ""}│${useColor ? ANSI.reset : ""} ${useColor ? ANSI.green : ""}●${useColor ? ANSI.reset : ""} ${labelText}\n`, + ); + } else { + output.write( + `${ANSI.clearLine}${useColor ? ANSI.cyan : ""}│${useColor ? ANSI.reset : ""} ${useColor ? ANSI.dim : ""}○${useColor ? ANSI.reset : ""} ${labelText}\n`, + ); + } } - if (action === "enter") { - const selected = args.items[selectedIndex] ?? null; + + output.write( + `${ANSI.clearLine}${useColor ? ANSI.cyan : ""}│${useColor ? ANSI.reset : ""} ${useColor ? ANSI.dim : ""}↑/↓ to select • Enter: confirm${useColor ? ANSI.reset : ""}\n`, + ); + output.write(`${ANSI.clearLine}${useColor ? ANSI.cyan : ""}└${useColor ? ANSI.reset : ""}\n`); + }; + + return new Promise((resolve) => { + const wasRaw = typeof input.isRaw === "boolean" ? input.isRaw : false; + + const cleanup = () => { + if (isCleanedUp) return; + isCleanedUp = true; + + if (escapeTimeout) { + clearTimeout(escapeTimeout); + escapeTimeout = null; + } + + try { + input.removeListener("data", onKey); + if (typeof input.setRawMode === "function") { + input.setRawMode(wasRaw); + } + input.pause(); + output.write(ANSI.show); + } catch { + // best effort + } + + process.removeListener("SIGINT", onSignal); + process.removeListener("SIGTERM", onSignal); + }; + + const onSignal = () => { cleanup(); - resolvePromise(selected); - return; - } - if (action === "cancel") { + resolve(null); + }; + + const finishWithValue = (value: T | null) => { cleanup(); - resolvePromise(null); - } - }; + resolve(value); + }; - let resolvePromise: (value: SelectItem | null) => void = () => undefined; - const promise = new Promise | null>((resolve) => { - resolvePromise = resolve; - }); + const findNextSelectable = (from: number, direction: 1 | -1): number => { + if (items.length === 0) return from; + let next = from; + do { + next = (next + direction + items.length) % items.length; + } while (items[next]?.disabled || items[next]?.separator); + return next; + }; - if (typeof input.setRawMode === "function") { - input.setRawMode(true); - } - input.resume(); - input.on("data", onData); - input.setEncoding?.("utf8"); - output.write("\x1b[?25l"); - render(); - const result = await promise; - output.write("\x1b[?25h"); - return result; + const onKey = (data: Buffer) => { + if (escapeTimeout) { + clearTimeout(escapeTimeout); + escapeTimeout = null; + } + + const action = parseKey(data); + switch (action) { + case "up": + cursor = findNextSelectable(cursor, -1); + render(); + return; + case "down": + cursor = findNextSelectable(cursor, 1); + render(); + return; + case "enter": + finishWithValue(items[cursor]?.value ?? null); + return; + case "escape": + finishWithValue(null); + return; + case "escape-start": + escapeTimeout = setTimeout(() => { + finishWithValue(null); + }, ESCAPE_TIMEOUT_MS); + return; + default: + return; + } + }; + + process.once("SIGINT", onSignal); + process.once("SIGTERM", onSignal); + + try { + if (typeof input.setRawMode === "function") { + input.setRawMode(true); + } + input.resume(); + output.write(ANSI.hide); + render(); + input.on("data", onKey); + } catch { + cleanup(); + resolve(null); + } + }); } diff --git a/test/auth-menu-flow.test.ts b/test/auth-menu-flow.test.ts index 5fc3533..6342380 100644 --- a/test/auth-menu-flow.test.ts +++ b/test/auth-menu-flow.test.ts @@ -3,61 +3,62 @@ import { PassThrough } from "node:stream"; import { describe, it, expect, vi } from "vitest"; -import { - chooseAuthMenuAction, - chooseAccountAction, - chooseAccountFromList, -} from "../lib/ui/auth-menu-flow.js"; +import { showAuthMenu, showAccountDetails } from "../lib/ui/auth-menu.js"; const fixture = JSON.parse( readFileSync(new URL("./fixtures/openai-codex-accounts.json", import.meta.url), "utf-8"), ) as { accounts: Array<{ accountId: string; email: string; plan: string; lastUsed: number }> }; -describe("auth menu flow", () => { - function makeTty() { - const input = new PassThrough(); - const output = new PassThrough(); - (input as unknown as { isTTY: boolean }).isTTY = true; - (output as unknown as { isTTY: boolean }).isTTY = true; - (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = vi.fn(); - return { input, output }; - } +function makeTty() { + const input = new PassThrough(); + const output = new PassThrough(); + (input as unknown as { isTTY: boolean }).isTTY = true; + (output as unknown as { isTTY: boolean }).isTTY = true; + (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = vi.fn(); + return { input, output }; +} +describe("auth menu flow", () => { it("selects a top-level action", async () => { const { input, output } = makeTty(); - const resultPromise = chooseAuthMenuAction({ - accounts: [ + const resultPromise = showAuthMenu( + [ { index: 0, email: fixture.accounts[0]!.email, - plan: fixture.accounts[0]!.plan, - accountId: fixture.accounts[0]!.accountId, lastUsed: fixture.accounts[0]!.lastUsed, + enabled: true, }, ], - input, - output, - }); + { + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, + }, + ); input.write("\u001b[B"); input.write("\r"); const result = await resultPromise; - expect(result?.type).toBe("check-quotas"); + expect(result?.type).toBe("check"); }); it("selects an account action", async () => { const { input, output } = makeTty(); - const resultPromise = chooseAccountAction({ - account: { + const resultPromise = showAccountDetails( + { index: 0, email: fixture.accounts[0]!.email, - plan: fixture.accounts[0]!.plan, + lastUsed: fixture.accounts[0]!.lastUsed, enabled: true, + status: "active", + isCurrentAccount: true, + }, + { + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, }, - input, - output, - }); + ); input.write("\u001b[B"); input.write("\r"); @@ -65,25 +66,4 @@ describe("auth menu flow", () => { const result = await resultPromise; expect(result).toBe("toggle"); }); - - it("selects an account from list", async () => { - const { input, output } = makeTty(); - const resultPromise = chooseAccountFromList({ - accounts: [ - { - index: 0, - email: fixture.accounts[0]!.email, - plan: fixture.accounts[0]!.plan, - accountId: fixture.accounts[0]!.accountId, - lastUsed: fixture.accounts[0]!.lastUsed, - }, - ], - input, - output, - }); - - input.write("\r"); - const result = await resultPromise; - expect(result?.email).toBe(fixture.accounts[0]!.email); - }); }); diff --git a/test/auth-menu-runner.test.ts b/test/auth-menu-runner.test.ts index 58c771e..d87036e 100644 --- a/test/auth-menu-runner.test.ts +++ b/test/auth-menu-runner.test.ts @@ -43,8 +43,8 @@ describe("auth menu runner", () => { onRefreshAccount: vi.fn(), onDeleteAccount: vi.fn(), }, - input, - output, + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, }); await tick(); @@ -75,8 +75,8 @@ describe("auth menu runner", () => { onRefreshAccount: vi.fn(), onDeleteAccount: vi.fn(), }, - input, - output, + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, }); await tick(); @@ -110,8 +110,8 @@ describe("auth menu runner", () => { onRefreshAccount: vi.fn(), onDeleteAccount: vi.fn(), }, - input, - output, + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, }); await tick(); @@ -119,9 +119,6 @@ describe("auth menu runner", () => { input.write("\u001b[B"); input.write("\r"); - await tick(); - input.write("\r"); - await tick(); input.write("\u001b[B"); input.write("\r"); diff --git a/test/auth-menu.test.ts b/test/auth-menu.test.ts index 7afe688..8d7ce17 100644 --- a/test/auth-menu.test.ts +++ b/test/auth-menu.test.ts @@ -3,11 +3,10 @@ import { readFileSync } from "node:fs"; import { describe, it, expect } from "vitest"; import { - buildAuthMenuItems, buildAccountActionItems, - buildAccountSelectItems, - formatLastUsedHint, - getAccountBadge, + buildAuthMenuItems, + formatRelativeTime, + formatStatusBadges, } from "../lib/ui/auth-menu.js"; const fixture = JSON.parse( @@ -20,99 +19,82 @@ const fixture = JSON.parse( refreshToken: string; lastUsed: number; enabled?: boolean; - rateLimitResetTimes?: Record; }>; }; describe("auth menu helpers", () => { - it("formats last-used hints", () => { + it("formats relative time labels", () => { const now = Date.now(); - expect(formatLastUsedHint(now, now)).toBe("used today"); - expect(formatLastUsedHint(now - 24 * 60 * 60 * 1000, now)).toBe("used yesterday"); - expect(formatLastUsedHint(now - 3 * 24 * 60 * 60 * 1000, now)).toBe("used 3d ago"); - expect(formatLastUsedHint(0, now)).toBe(""); + expect(formatRelativeTime(now, now)).toBe("today"); + expect(formatRelativeTime(now - 24 * 60 * 60 * 1000, now)).toBe("yesterday"); + expect(formatRelativeTime(now - 3 * 24 * 60 * 60 * 1000, now)).toBe("3d ago"); }); - it("builds badges for status", () => { - const now = fixture.accounts[0]!.lastUsed; - expect(getAccountBadge({ enabled: false }, now)).toBe("[disabled]"); - expect(getAccountBadge({ enabled: true, isActive: true }, now)).toBe("[active]"); - expect( - getAccountBadge( - { - enabled: true, - rateLimitResetTimes: { codex: now + 60_000 }, - }, - now, - ), - ).toBe("[rate-limited]"); + it("builds status badges for enabled and active accounts", () => { + const badges = formatStatusBadges( + { + enabled: true, + status: "rate-limited", + isCurrentAccount: true, + }, + false, + ); + expect(badges).toContain("[enabled]"); + expect(badges).toContain("[rate-limited]"); + expect(badges).toContain("[last active]"); + }); + + it("builds status badges for disabled expired accounts", () => { + const badges = formatStatusBadges( + { + enabled: false, + status: "expired", + }, + false, + ); + expect(badges).toContain("[disabled]"); + expect(badges).toContain("[expired]"); }); it("builds auth menu items with account labels", () => { const account = fixture.accounts[0]!; - const now = account.lastUsed; const items = buildAuthMenuItems( [ - { - index: 0, - email: account.email, - plan: account.plan, - accountId: account.accountId, - lastUsed: account.lastUsed, - isActive: true, - }, - ], - now, - ); + { + index: 0, + email: account.email, + plan: account.plan, + accountId: account.accountId, + lastUsed: account.lastUsed, + enabled: true, + status: "active", + isCurrentAccount: true, + }, + ], + false, + ); - expect(items[0]?.label).toBe("Add new account"); - expect(items[1]?.label).toBe("Check quotas"); const accountItem = items.find((item) => item.label.includes(account.email)); expect(accountItem).toBeTruthy(); - expect(accountItem!.label).toContain("[active]"); - expect(accountItem!.hint).toBe("used today"); + expect(accountItem!.label).toContain("[enabled]"); + expect(accountItem!.label).toContain("[last active]"); + expect(accountItem!.hint).toContain("used"); }); - it("builds account actions and hides refresh when disabled", () => { - const account = fixture.accounts[0]!; - const enabled = buildAccountActionItems({ - index: 0, - email: account.email, - plan: account.plan, - enabled: true, - }); - expect(enabled.map((item) => item.label)).toContain("Disable account"); - expect(enabled.map((item) => item.label)).toContain("Refresh token"); + it("does not show delete-all action when there are no accounts", () => { + const items = buildAuthMenuItems([], false); + expect(items.some((item) => item.value.type === "delete-all")).toBe(false); + }); - const disabled = buildAccountActionItems({ + it("disables refresh when account is disabled", () => { + const account = fixture.accounts[0]!; + const items = buildAccountActionItems({ index: 0, email: account.email, plan: account.plan, enabled: false, }); - expect(disabled.map((item) => item.label)).toContain("Enable account"); - expect(disabled.map((item) => item.label)).not.toContain("Refresh token"); - }); - - it("builds account-only select items", () => { - const account = fixture.accounts[0]!; - const now = account.lastUsed; - const items = buildAccountSelectItems( - [ - { - index: 0, - email: account.email, - plan: account.plan, - accountId: account.accountId, - lastUsed: account.lastUsed, - isActive: true, - }, - ], - now, - ); - expect(items).toHaveLength(1); - expect(items[0]?.label).toContain(account.email); - expect(items[0]?.label).toContain("[active]"); - expect(items[0]?.hint).toBe("used today"); + const refresh = items.find((item) => item.value === "refresh"); + expect(refresh?.disabled).toBe(true); }); }); diff --git a/test/catalog-defaults.test.ts b/test/catalog-defaults.test.ts index 197e07e..7fa1047 100644 --- a/test/catalog-defaults.test.ts +++ b/test/catalog-defaults.test.ts @@ -4,135 +4,131 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import { - buildInternalModelDefaults, - mergeModelDefaults, + buildInternalModelDefaults, + mergeModelDefaults, } from "../lib/catalog-defaults.js"; describe("catalog internal defaults", () => { - const originalXdg = process.env.XDG_CONFIG_HOME; + const originalXdg = process.env.XDG_CONFIG_HOME; - afterEach(() => { - if (originalXdg === undefined) { - delete process.env.XDG_CONFIG_HOME; - } else { - process.env.XDG_CONFIG_HOME = originalXdg; - } - }); + afterEach(() => { + if (originalXdg === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = originalXdg; + } + }); - it("adds catalog models using template defaults", () => { - const root = mkdtempSync(join(tmpdir(), "catalog-defaults-")); - process.env.XDG_CONFIG_HOME = root; - try { - const cacheDir = join(root, "opencode", "cache"); - mkdirSync(cacheDir, { recursive: true }); - writeFileSync( - join(cacheDir, "codex-models-cache.json"), - JSON.stringify({ - fetchedAt: Date.now(), - source: "server", - models: [{ slug: "gpt-5.3-codex" }], - }), - "utf8", - ); + it("adds catalog models using template defaults", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [{ slug: "gpt-5.3-codex" }], + }), + "utf8", + ); - const defaults = buildInternalModelDefaults(); + const defaults = buildInternalModelDefaults(); - expect(defaults["gpt-5.3-codex"]).toBeDefined(); - expect(defaults["gpt-5.3-codex"].name).toBe( - "GPT 5.3 Codex (Codex)", - ); - expect(defaults["gpt-5.3-codex"].limit?.context).toBe( - defaults["gpt-5.2-codex"].limit?.context, - ); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); + expect(defaults["gpt-5.3-codex"]).toBeDefined(); + expect(defaults["gpt-5.3-codex"].name).toBe("GPT 5.3 Codex (Codex)"); + expect(defaults["gpt-5.3-codex"].limit?.context).toBe( + defaults["gpt-5.2-codex"].limit?.context, + ); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); - it("uses gpt-5.3-codex as template for unknown codex models", () => { - const root = mkdtempSync(join(tmpdir(), "catalog-defaults-unknown-")); - process.env.XDG_CONFIG_HOME = root; - try { - const cacheDir = join(root, "opencode", "cache"); - mkdirSync(cacheDir, { recursive: true }); - writeFileSync( - join(cacheDir, "codex-models-cache.json"), - JSON.stringify({ - fetchedAt: Date.now(), - source: "server", - models: [{ slug: "gpt-5.9-codex" }], - }), - "utf8", - ); + it("uses gpt-5.3-codex as template for unknown codex models", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-unknown-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [{ slug: "gpt-5.9-codex" }], + }), + "utf8", + ); - const defaults = buildInternalModelDefaults(); + const defaults = buildInternalModelDefaults(); - expect(defaults["gpt-5.9-codex"]).toBeDefined(); - expect(defaults["gpt-5.9-codex"].name).toBe( - "GPT 5.9 Codex (Codex)", - ); - // Should have variants from gpt-5.3-codex, not gpt-5.2-codex - // (They are currently identical in opencode-modern.json, but 5.3 is the better template) - expect(defaults["gpt-5.9-codex"].limit?.context).toBe( - defaults["gpt-5.3-codex"].limit?.context, - ); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); + expect(defaults["gpt-5.9-codex"]).toBeDefined(); + expect(defaults["gpt-5.9-codex"].name).toBe("GPT 5.9 Codex (Codex)"); + // Should have variants from gpt-5.3-codex, not gpt-5.2-codex + // (They are currently identical in opencode-modern.json, but 5.3 is the better template) + expect(defaults["gpt-5.9-codex"].limit?.context).toBe( + defaults["gpt-5.3-codex"].limit?.context, + ); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); - it("overrides template defaults with live metadata", () => { - const root = mkdtempSync(join(tmpdir(), "catalog-defaults-live-")); - process.env.XDG_CONFIG_HOME = root; - try { - const cacheDir = join(root, "opencode", "cache"); - mkdirSync(cacheDir, { recursive: true }); - writeFileSync( - join(cacheDir, "codex-models-cache.json"), - JSON.stringify({ - fetchedAt: Date.now(), - source: "server", - models: [ - { - slug: "gpt-5.3-codex", - display_name: "Codex 5.3", - context_window: 123456, - truncation_policy: { - mode: "tokens", - limit: 4242, - }, - input_modalities: ["text"], - }, - ], - }), - "utf8", - ); + it("overrides template defaults with live metadata", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-live-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.3-codex", + display_name: "Codex 5.3", + context_window: 123456, + truncation_policy: { + mode: "tokens", + limit: 4242, + }, + input_modalities: ["text"], + }, + ], + }), + "utf8", + ); - const defaults = buildInternalModelDefaults(); + const defaults = buildInternalModelDefaults(); - expect(defaults["gpt-5.3-codex"].name).toBe("Codex 5.3"); - expect(defaults["gpt-5.3-codex"].limit?.context).toBe(123456); - expect(defaults["gpt-5.3-codex"].limit?.output).toBe(4242); - expect(defaults["gpt-5.3-codex"].modalities?.input).toEqual(["text"]); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); + expect(defaults["gpt-5.3-codex"].name).toBe("Codex 5.3"); + expect(defaults["gpt-5.3-codex"].limit?.context).toBe(123456); + expect(defaults["gpt-5.3-codex"].limit?.output).toBe(4242); + expect(defaults["gpt-5.3-codex"].modalities?.input).toEqual(["text"]); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); - it("merges config overrides above internal defaults", () => { - const defaults = { - "gpt-5.2-codex": { name: "Default" }, - "gpt-5.1": { name: "Default 5.1" }, - }; - const overrides = { - "gpt-5.2-codex": { name: "Custom" }, - "custom-model": { name: "Custom" }, - }; + it("merges config overrides above internal defaults", () => { + const defaults = { + "gpt-5.2-codex": { name: "Default" }, + "gpt-5.1": { name: "Default 5.1" }, + }; + const overrides = { + "gpt-5.2-codex": { name: "Custom" }, + "custom-model": { name: "Custom" }, + }; - const merged = mergeModelDefaults(overrides, defaults); + const merged = mergeModelDefaults(overrides, defaults); - expect(merged["gpt-5.2-codex"].name).toBe("Custom"); - expect(merged["gpt-5.1"].name).toBe("Default 5.1"); - expect(merged["custom-model"].name).toBe("Custom"); - }); + expect(merged["gpt-5.2-codex"].name).toBe("Custom"); + expect(merged["gpt-5.1"].name).toBe("Default 5.1"); + expect(merged["custom-model"].name).toBe("Custom"); + }); }); diff --git a/test/install-script.test.ts b/test/install-script.test.ts index 5a3ad45..4cddc37 100644 --- a/test/install-script.test.ts +++ b/test/install-script.test.ts @@ -1,47 +1,57 @@ -import { describe, it, expect } from 'vitest'; -import { execFileSync } from 'node:child_process'; -import { mkdtempSync, writeFileSync, readFileSync, mkdirSync, existsSync } from 'node:fs'; -import { tmpdir } from 'node:os'; -import { join, resolve } from 'node:path'; -import { parse } from 'jsonc-parser'; - -const SCRIPT_PATH = resolve(process.cwd(), 'scripts', 'install-opencode-codex-auth.js'); -const EXPECTED_PLUGIN_LATEST = 'opencode-openai-codex-multi-auth@latest'; +import { describe, it, expect } from "vitest"; +import { execFileSync } from "node:child_process"; +import { + mkdtempSync, + writeFileSync, + readFileSync, + mkdirSync, + existsSync, +} from "node:fs"; +import { tmpdir } from "node:os"; +import { join, resolve } from "node:path"; +import { parse } from "jsonc-parser"; + +const SCRIPT_PATH = resolve( + process.cwd(), + "scripts", + "install-opencode-codex-auth.js", +); +const EXPECTED_PLUGIN_LATEST = "opencode-openai-codex-multi-auth@latest"; const runInstaller = ( - args: string[], - homeDir: string, - envOverrides: Record = {}, + args: string[], + homeDir: string, + envOverrides: Record = {}, ) => { - return execFileSync(process.execPath, [SCRIPT_PATH, ...args], { - env: { ...process.env, HOME: homeDir, ...envOverrides }, - stdio: 'pipe', - encoding: 'utf8', - }); + return execFileSync(process.execPath, [SCRIPT_PATH, ...args], { + env: { ...process.env, HOME: homeDir, ...envOverrides }, + stdio: "pipe", + encoding: "utf8", + }); }; const readJsoncFile = (path: string) => { - const content = readFileSync(path, 'utf-8'); - return { content, data: parse(content) as Record }; + const content = readFileSync(path, "utf-8"); + return { content, data: parse(content) as Record }; }; -const makeHome = () => mkdtempSync(join(tmpdir(), 'opencode-install-')); +const makeHome = () => mkdtempSync(join(tmpdir(), "opencode-install-")); const writeConfig = (homeDir: string, file: string, content: string) => { - const configDir = join(homeDir, '.config', 'opencode'); - mkdirSync(configDir, { recursive: true }); - const path = join(configDir, file); - writeFileSync(path, content); - return path; + const configDir = join(homeDir, ".config", "opencode"); + mkdirSync(configDir, { recursive: true }); + const path = join(configDir, file); + writeFileSync(path, content); + return path; }; -describe('Install script', () => { - it('updates existing JSONC and preserves comments', () => { - const homeDir = makeHome(); - const configPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ +describe("Install script", () => { + it("updates existing JSONC and preserves comments", () => { + const homeDir = makeHome(); + const configPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ // My existing config "plugin": ["some-other-plugin@1.2.3", "opencode-openai-codex-auth@4.2.0"], "provider": { @@ -51,262 +61,284 @@ describe('Install script', () => { } } }`, - ); - - runInstaller(['--no-cache-clear'], homeDir); - - const { content, data } = readJsoncFile(configPath); - expect(content).toContain('// My existing config'); - expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); - expect(data.plugin).toContain('some-other-plugin@1.2.3'); - expect(data.provider.openai.timeout).toBe(60000); - expect(data.provider.openai.models['custom-model']).toBeDefined(); - expect(data.provider.openai.models['gpt-5.2']).toBeDefined(); - }); - - it('prefers JSONC when both jsonc and json exist', () => { - const homeDir = makeHome(); - const jsoncPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ "plugin": ["opencode-openai-codex-auth@4.2.0"] }`, - ); - const jsonPath = writeConfig( - homeDir, - 'opencode.json', - `{ "plugin": ["should-stay"], "provider": { "openai": { "timeout": 10 } } }`, - ); - const jsonBefore = readFileSync(jsonPath, 'utf-8'); - - runInstaller(['--no-cache-clear'], homeDir); - - const { data } = readJsoncFile(jsoncPath); - expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); - const jsonAfter = readFileSync(jsonPath, 'utf-8'); - expect(jsonAfter).toBe(jsonBefore); - }); - - it('creates JSONC when no config exists', () => { - const homeDir = makeHome(); - runInstaller(['--no-cache-clear'], homeDir); - const configPath = join(homeDir, '.config', 'opencode', 'opencode.jsonc'); - expect(existsSync(configPath)).toBe(true); - const { data } = readJsoncFile(configPath); - expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); - }); - - it('uses online template when available', () => { - const homeDir = makeHome(); - const releaseApiUrl = - 'https://api.github.com/repos/iam-brain/opencode-openai-codex-multi-auth/releases/latest'; - const templateUrl = - 'https://raw.githubusercontent.com/iam-brain/opencode-openai-codex-multi-auth/vtest/config/opencode-modern.json'; - - runInstaller(['--no-cache-clear'], homeDir, { - OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: '1', - OPENCODE_TEST_FETCH_MOCKS: JSON.stringify({ - [releaseApiUrl]: { - status: 200, - json: { tag_name: 'vtest' }, - }, - [templateUrl]: { - status: 200, - json: { - provider: { - openai: { - models: { - 'online-only-model': { - options: { reasoningEffort: 'high' }, - }, - }, - }, - }, - }, - }, - }), - }); - - const configPath = join(homeDir, '.config', 'opencode', 'opencode.jsonc'); - const { data } = readJsoncFile(configPath); - expect(data.provider.openai.models['online-only-model']).toBeDefined(); - }); - - it('falls back to static template when online template payload is malformed', () => { - const homeDir = makeHome(); - const releaseApiUrl = - 'https://api.github.com/repos/iam-brain/opencode-openai-codex-multi-auth/releases/latest'; - const templateUrl = - 'https://raw.githubusercontent.com/iam-brain/opencode-openai-codex-multi-auth/vtest/config/opencode-modern.json'; - - runInstaller(['--no-cache-clear'], homeDir, { - OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: '1', - OPENCODE_TEST_FETCH_MOCKS: JSON.stringify({ - [releaseApiUrl]: { - status: 200, - json: { tag_name: 'vtest' }, - }, - [templateUrl]: { - status: 200, - json: { - provider: { - openai: { - models: [], - }, - }, - }, - }, - }), - }); - - const configPath = join(homeDir, '.config', 'opencode', 'opencode.jsonc'); - const { data } = readJsoncFile(configPath); - expect(data.provider.openai.models['online-only-model']).toBeUndefined(); - expect(data.provider.openai.models['gpt-5.2']).toBeDefined(); - }); - - it('rejects disallowed endpoint override hosts in test mode', () => { - const homeDir = makeHome(); - const output = runInstaller(['--no-cache-clear'], homeDir, { - OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: '1', - OPENCODE_INSTALLER_TEST_MODE: '1', - OPENCODE_TEMPLATE_RELEASE_API: 'https://evil.example/releases/latest', - OPENCODE_TEMPLATE_RAW_BASE: 'https://evil.example', - }); - - expect(output).toContain('Ignoring release endpoint override with disallowed host'); - expect(output).toContain('Ignoring raw endpoint override with disallowed host'); - }); - - it('ignores endpoint overrides outside test mode', () => { - const homeDir = makeHome(); - const output = runInstaller(['--no-cache-clear'], homeDir, { - VITEST: '', - OPENCODE_INSTALLER_TEST_MODE: '0', - OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: '1', - OPENCODE_TEMPLATE_RELEASE_API: 'http://localhost:7777/releases/latest', - OPENCODE_TEMPLATE_RAW_BASE: 'http://localhost:7777', - }); - - expect(output).toContain('Ignoring release endpoint override outside test mode'); - expect(output).toContain('Ignoring raw endpoint override outside test mode'); - }); - - it('preserves pinned plugin versions', () => { - const homeDir = makeHome(); - const configPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ "plugin": ["opencode-openai-codex-multi-auth@4.4.0", "some-other-plugin@1.2.3"] }`, - ); - - runInstaller(['--no-cache-clear'], homeDir); - - const { data } = readJsoncFile(configPath); - expect(data.plugin).toContain('opencode-openai-codex-multi-auth@4.4.0'); - expect(data.plugin).not.toContain(EXPECTED_PLUGIN_LATEST); - }); - - it('rewrites unpinned plugin to @latest', () => { - const homeDir = makeHome(); - const configPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ "plugin": ["opencode-openai-codex-multi-auth", "some-other-plugin@1.2.3"] }`, - ); - - runInstaller(['--no-cache-clear'], homeDir); - - const { data } = readJsoncFile(configPath); - expect(data.plugin).not.toContain('opencode-openai-codex-multi-auth'); - expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); - }); - - it('does not remove plugins that merely contain alias substrings', () => { - const homeDir = makeHome(); - const configPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ "plugin": ["opencode-openai-codex-multi-auth-helper@1.0.0", "opencode-openai-codex-auth@4.2.0"] }`, - ); - - runInstaller(['--no-cache-clear'], homeDir); - - const { data } = readJsoncFile(configPath); - expect(data.plugin).toContain('opencode-openai-codex-multi-auth-helper@1.0.0'); - expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); - }); - - it('uninstall removes plugin models but keeps custom config', () => { - const homeDir = makeHome(); - const configPath = writeConfig( - homeDir, - 'opencode.jsonc', - `{ + ); + + runInstaller(["--no-cache-clear"], homeDir); + + const { content, data } = readJsoncFile(configPath); + expect(content).toContain("// My existing config"); + expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); + expect(data.plugin).toContain("some-other-plugin@1.2.3"); + expect(data.provider.openai.timeout).toBe(60000); + expect(data.provider.openai.models["custom-model"]).toBeDefined(); + expect(data.provider.openai.models["gpt-5.2"]).toBeDefined(); + }); + + it("prefers JSONC when both jsonc and json exist", () => { + const homeDir = makeHome(); + const jsoncPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["opencode-openai-codex-auth@4.2.0"] }`, + ); + const jsonPath = writeConfig( + homeDir, + "opencode.json", + `{ "plugin": ["should-stay"], "provider": { "openai": { "timeout": 10 } } }`, + ); + const jsonBefore = readFileSync(jsonPath, "utf-8"); + + runInstaller(["--no-cache-clear"], homeDir); + + const { data } = readJsoncFile(jsoncPath); + expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); + const jsonAfter = readFileSync(jsonPath, "utf-8"); + expect(jsonAfter).toBe(jsonBefore); + }); + + it("creates JSONC when no config exists", () => { + const homeDir = makeHome(); + runInstaller(["--no-cache-clear"], homeDir); + const configPath = join(homeDir, ".config", "opencode", "opencode.jsonc"); + expect(existsSync(configPath)).toBe(true); + const { data } = readJsoncFile(configPath); + expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); + }); + + it("uses online template when available", () => { + const homeDir = makeHome(); + const releaseApiUrl = + "https://api.github.com/repos/iam-brain/opencode-openai-codex-multi-auth/releases/latest"; + const templateUrl = + "https://raw.githubusercontent.com/iam-brain/opencode-openai-codex-multi-auth/vtest/config/opencode-modern.json"; + + runInstaller(["--no-cache-clear"], homeDir, { + OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: "1", + OPENCODE_TEST_FETCH_MOCKS: JSON.stringify({ + [releaseApiUrl]: { + status: 200, + json: { tag_name: "vtest" }, + }, + [templateUrl]: { + status: 200, + json: { + provider: { + openai: { + models: { + "online-only-model": { + options: { reasoningEffort: "high" }, + }, + }, + }, + }, + }, + }, + }), + }); + + const configPath = join(homeDir, ".config", "opencode", "opencode.jsonc"); + const { data } = readJsoncFile(configPath); + expect(data.provider.openai.models["online-only-model"]).toBeDefined(); + }); + + it("falls back to static template when online template payload is malformed", () => { + const homeDir = makeHome(); + const releaseApiUrl = + "https://api.github.com/repos/iam-brain/opencode-openai-codex-multi-auth/releases/latest"; + const templateUrl = + "https://raw.githubusercontent.com/iam-brain/opencode-openai-codex-multi-auth/vtest/config/opencode-modern.json"; + + runInstaller(["--no-cache-clear"], homeDir, { + OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: "1", + OPENCODE_TEST_FETCH_MOCKS: JSON.stringify({ + [releaseApiUrl]: { + status: 200, + json: { tag_name: "vtest" }, + }, + [templateUrl]: { + status: 200, + json: { + provider: { + openai: { + models: [], + }, + }, + }, + }, + }), + }); + + const configPath = join(homeDir, ".config", "opencode", "opencode.jsonc"); + const { data } = readJsoncFile(configPath); + expect(data.provider.openai.models["online-only-model"]).toBeUndefined(); + expect(data.provider.openai.models["gpt-5.2"]).toBeDefined(); + }); + + it("rejects disallowed endpoint override hosts in test mode", () => { + const homeDir = makeHome(); + const output = runInstaller(["--no-cache-clear"], homeDir, { + OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: "1", + OPENCODE_INSTALLER_TEST_MODE: "1", + OPENCODE_TEMPLATE_RELEASE_API: "https://evil.example/releases/latest", + OPENCODE_TEMPLATE_RAW_BASE: "https://evil.example", + }); + + expect(output).toContain( + "Ignoring release endpoint override with disallowed host", + ); + expect(output).toContain( + "Ignoring raw endpoint override with disallowed host", + ); + }); + + it("ignores endpoint overrides outside test mode", () => { + const homeDir = makeHome(); + const output = runInstaller(["--no-cache-clear"], homeDir, { + VITEST: "", + OPENCODE_INSTALLER_TEST_MODE: "0", + OPENCODE_TEST_ALLOW_ONLINE_TEMPLATE: "1", + OPENCODE_TEMPLATE_RELEASE_API: "http://localhost:7777/releases/latest", + OPENCODE_TEMPLATE_RAW_BASE: "http://localhost:7777", + }); + + expect(output).toContain( + "Ignoring release endpoint override outside test mode", + ); + expect(output).toContain( + "Ignoring raw endpoint override outside test mode", + ); + }); + + it("preserves pinned plugin versions", () => { + const homeDir = makeHome(); + const configPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["opencode-openai-codex-multi-auth@4.4.0", "some-other-plugin@1.2.3"] }`, + ); + + runInstaller(["--no-cache-clear"], homeDir); + + const { data } = readJsoncFile(configPath); + expect(data.plugin).toContain("opencode-openai-codex-multi-auth@4.4.0"); + expect(data.plugin).not.toContain(EXPECTED_PLUGIN_LATEST); + }); + + it("rewrites unpinned plugin to @latest", () => { + const homeDir = makeHome(); + const configPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["opencode-openai-codex-multi-auth", "some-other-plugin@1.2.3"] }`, + ); + + runInstaller(["--no-cache-clear"], homeDir); + + const { data } = readJsoncFile(configPath); + expect(data.plugin).not.toContain("opencode-openai-codex-multi-auth"); + expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); + }); + + it("does not remove plugins that merely contain alias substrings", () => { + const homeDir = makeHome(); + const configPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["opencode-openai-codex-multi-auth-helper@1.0.0", "opencode-openai-codex-auth@4.2.0"] }`, + ); + + runInstaller(["--no-cache-clear"], homeDir); + + const { data } = readJsoncFile(configPath); + expect(data.plugin).toContain( + "opencode-openai-codex-multi-auth-helper@1.0.0", + ); + expect(data.plugin).toContain(EXPECTED_PLUGIN_LATEST); + }); + + it("uninstall removes plugin models but keeps custom config", () => { + const homeDir = makeHome(); + const configPath = writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["some-other-plugin@1.2.3", "opencode-openai-codex-auth@4.2.0"], "provider": { "openai": { "timeout": 60000, "models": { "custom-model": { "name": "Custom" }, - "gpt-5.2": { "name": "GPT 5.2 (Codex)" }, - "gpt-5.2-codex": { "name": "GPT 5.2 Codex (Codex)" } + "gpt-5.2": { "name": "GPT 5.2 (Codex)" }, + "gpt-5.2-codex": { "name": "GPT 5.2 Codex (Codex)" } } }, "anthropic": { "models": { "claude": { "name": "Claude" } } } } }`, - ); - - runInstaller(['--uninstall', '--no-cache-clear'], homeDir); - - const { data } = readJsoncFile(configPath); - expect(data.plugin).toEqual(['some-other-plugin@1.2.3']); - expect(data.provider.openai.timeout).toBe(60000); - expect(data.provider.openai.models['custom-model']).toBeDefined(); - expect(data.provider.openai.models['gpt-5.2']).toBeUndefined(); - expect(data.provider.openai.models['gpt-5.2-codex']).toBeUndefined(); - expect(data.provider.anthropic).toBeDefined(); - }); - - it('uninstall --all removes plugin artifacts', () => { - const homeDir = makeHome(); - writeConfig( - homeDir, - 'opencode.jsonc', - `{ "plugin": ["opencode-openai-codex-auth@4.2.0"] }`, - ); - - const opencodeDir = join(homeDir, '.opencode'); - const configDir = join(homeDir, '.config', 'opencode'); - mkdirSync(join(opencodeDir, 'auth'), { recursive: true }); - mkdirSync(join(opencodeDir, 'logs', 'codex-plugin'), { recursive: true }); - mkdirSync(join(opencodeDir, 'cache'), { recursive: true }); - mkdirSync(configDir, { recursive: true }); - mkdirSync(join(configDir, 'auth'), { recursive: true }); - mkdirSync(join(configDir, 'logs', 'codex-plugin'), { recursive: true }); - mkdirSync(join(configDir, 'cache'), { recursive: true }); - writeFileSync(join(opencodeDir, 'auth', 'openai.json'), '{}'); - writeFileSync(join(configDir, 'auth', 'openai.json'), '{}'); - writeFileSync(join(opencodeDir, 'openai-codex-auth-config.json'), '{}'); - writeFileSync(join(opencodeDir, 'openai-codex-accounts.json'), '{}'); - writeFileSync(join(configDir, 'openai-codex-auth-config.json'), '{}'); - writeFileSync(join(configDir, 'openai-codex-accounts.json'), '{}'); - writeFileSync(join(opencodeDir, 'logs', 'codex-plugin', 'log.txt'), 'log'); - writeFileSync(join(opencodeDir, 'cache', 'codex-instructions.md'), 'cache'); - writeFileSync(join(configDir, 'logs', 'codex-plugin', 'log.txt'), 'log'); - writeFileSync(join(configDir, 'cache', 'codex-instructions.md'), 'cache'); - - runInstaller(['--uninstall', '--all', '--no-cache-clear'], homeDir); - - expect(existsSync(join(opencodeDir, 'auth', 'openai.json'))).toBe(false); - expect(existsSync(join(configDir, 'auth', 'openai.json'))).toBe(false); - expect(existsSync(join(opencodeDir, 'openai-codex-auth-config.json'))).toBe(false); - expect(existsSync(join(opencodeDir, 'openai-codex-accounts.json'))).toBe(false); - expect(existsSync(join(configDir, 'openai-codex-auth-config.json'))).toBe(false); - expect(existsSync(join(configDir, 'openai-codex-accounts.json'))).toBe(false); - expect(existsSync(join(opencodeDir, 'logs', 'codex-plugin'))).toBe(false); - expect(existsSync(join(opencodeDir, 'cache', 'codex-instructions.md'))).toBe(false); - expect(existsSync(join(configDir, 'logs', 'codex-plugin'))).toBe(false); - expect(existsSync(join(configDir, 'cache', 'codex-instructions.md'))).toBe(false); - }); + ); + + runInstaller(["--uninstall", "--no-cache-clear"], homeDir); + + const { data } = readJsoncFile(configPath); + expect(data.plugin).toEqual(["some-other-plugin@1.2.3"]); + expect(data.provider.openai.timeout).toBe(60000); + expect(data.provider.openai.models["custom-model"]).toBeDefined(); + expect(data.provider.openai.models["gpt-5.2"]).toBeUndefined(); + expect(data.provider.openai.models["gpt-5.2-codex"]).toBeUndefined(); + expect(data.provider.anthropic).toBeDefined(); + }); + + it("uninstall --all removes plugin artifacts", () => { + const homeDir = makeHome(); + writeConfig( + homeDir, + "opencode.jsonc", + `{ "plugin": ["opencode-openai-codex-auth@4.2.0"] }`, + ); + + const opencodeDir = join(homeDir, ".opencode"); + const configDir = join(homeDir, ".config", "opencode"); + mkdirSync(join(opencodeDir, "auth"), { recursive: true }); + mkdirSync(join(opencodeDir, "logs", "codex-plugin"), { recursive: true }); + mkdirSync(join(opencodeDir, "cache"), { recursive: true }); + mkdirSync(configDir, { recursive: true }); + mkdirSync(join(configDir, "auth"), { recursive: true }); + mkdirSync(join(configDir, "logs", "codex-plugin"), { recursive: true }); + mkdirSync(join(configDir, "cache"), { recursive: true }); + writeFileSync(join(opencodeDir, "auth", "openai.json"), "{}"); + writeFileSync(join(configDir, "auth", "openai.json"), "{}"); + writeFileSync(join(opencodeDir, "openai-codex-auth-config.json"), "{}"); + writeFileSync(join(opencodeDir, "openai-codex-accounts.json"), "{}"); + writeFileSync(join(configDir, "openai-codex-auth-config.json"), "{}"); + writeFileSync(join(configDir, "openai-codex-accounts.json"), "{}"); + writeFileSync(join(opencodeDir, "logs", "codex-plugin", "log.txt"), "log"); + writeFileSync(join(opencodeDir, "cache", "codex-instructions.md"), "cache"); + writeFileSync(join(configDir, "logs", "codex-plugin", "log.txt"), "log"); + writeFileSync(join(configDir, "cache", "codex-instructions.md"), "cache"); + + runInstaller(["--uninstall", "--all", "--no-cache-clear"], homeDir); + + expect(existsSync(join(opencodeDir, "auth", "openai.json"))).toBe(false); + expect(existsSync(join(configDir, "auth", "openai.json"))).toBe(false); + expect(existsSync(join(opencodeDir, "openai-codex-auth-config.json"))).toBe( + false, + ); + expect(existsSync(join(opencodeDir, "openai-codex-accounts.json"))).toBe( + false, + ); + expect(existsSync(join(configDir, "openai-codex-auth-config.json"))).toBe( + false, + ); + expect(existsSync(join(configDir, "openai-codex-accounts.json"))).toBe( + false, + ); + expect(existsSync(join(opencodeDir, "logs", "codex-plugin"))).toBe(false); + expect( + existsSync(join(opencodeDir, "cache", "codex-instructions.md")), + ).toBe(false); + expect(existsSync(join(configDir, "logs", "codex-plugin"))).toBe(false); + expect(existsSync(join(configDir, "cache", "codex-instructions.md"))).toBe( + false, + ); + }); }); diff --git a/test/opencode-modern-template.test.ts b/test/opencode-modern-template.test.ts new file mode 100644 index 0000000..8af044f --- /dev/null +++ b/test/opencode-modern-template.test.ts @@ -0,0 +1,24 @@ +import { readFileSync } from "node:fs"; +import { join } from "node:path"; + +describe("opencode-modern template structure", () => { + it("keeps codex model presets under provider.openai.models", () => { + const filePath = join(process.cwd(), "config", "opencode-modern.json"); + const parsed = JSON.parse(readFileSync(filePath, "utf8")) as { + provider?: { + openai?: { + models?: Record; + [key: string]: unknown; + }; + }; + }; + + const openai = parsed.provider?.openai ?? {}; + const models = openai.models ?? {}; + + expect(models["gpt-5.3-codex"]).toBeDefined(); + expect(models["gpt-5.2-codex"]).toBeDefined(); + expect(openai["gpt-5.3-codex"]).toBeUndefined(); + expect(openai["gpt-5.2-codex"]).toBeUndefined(); + }); +}); diff --git a/test/plugin-config-hook.test.ts b/test/plugin-config-hook.test.ts index 8bba21e..e5c7b8d 100644 --- a/test/plugin-config-hook.test.ts +++ b/test/plugin-config-hook.test.ts @@ -4,346 +4,483 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; vi.mock("@opencode-ai/plugin", () => { - const describe = () => ({ - describe: () => ({}), - }); - const schema = { - number: describe, - boolean: () => ({ - optional: () => ({ - describe: () => ({}), - }), - }), - }; - const tool = Object.assign((spec: unknown) => spec, { schema }); - return { tool }; + const describe = () => ({ + describe: () => ({}), + }); + const schema = { + number: describe, + boolean: () => ({ + optional: () => ({ + describe: () => ({}), + }), + }), + }; + const tool = Object.assign((spec: unknown) => spec, { schema }); + return { tool }; }); import { OpenAIAuthPlugin } from "../index.js"; describe("OpenAIAuthPlugin config hook", () => { - const originalXdg = process.env.XDG_CONFIG_HOME; - - afterEach(() => { - if (originalXdg === undefined) { - delete process.env.XDG_CONFIG_HOME; - } else { - process.env.XDG_CONFIG_HOME = originalXdg; - } - }); - - it("registers gpt-5.3-codex variants on base model metadata and filters non-allowlisted models", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { - OpenAIAuthPlugin: FreshPlugin, - } = await import("../index.js"); - const { getCachedVariantEfforts } = await import( - "../lib/prompts/codex-models.js" - ); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex": { - id: "gpt-5.3-codex", - instructions: "TEMPLATE", - }, - "o3-mini": { - id: "o3-mini", - instructions: "OTHER", - }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].instructions).toBe( - "TEMPLATE", - ); - expect(cfg.provider.openai.models["gpt-5.3-codex"].id).toBe( - "gpt-5.3-codex", - ); - expect(cfg.provider.openai.models["gpt-5.3-codex-low"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex-medium"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex-xhigh"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.low).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.medium).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.high).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.xhigh).toBeDefined(); - expect(cfg.provider.openai.models["o3-mini"]).toBeUndefined(); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("registers gpt-5.3-codex when gpt-5.3-codex metadata has no instructions field", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-noinst-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const { getCachedVariantEfforts } = await import( - "../lib/prompts/codex-models.js" - ); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex": { - name: "GPT 5.3 Codex (Codex)", - }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("does not synthesize gpt-5.3-codex from gpt-5.2-codex in config hook", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-no52clone-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const { getCachedVariantEfforts } = await import( - "../lib/prompts/codex-models.js" - ); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.2-codex": { - id: "gpt-5.2-codex", - instructions: "TEMPLATE_52", - }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("sets default OpenAI options when missing", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-options-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: {}, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.options).toBeDefined(); - expect(cfg.provider.openai.options.store).toBe(false); - expect(cfg.provider.openai.options.include).toContain( - "reasoning.encrypted_content", - ); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("appends (Codex) to OpenAI model display names", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-codex-label-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const legacySuffix = "(O" + "Auth)"; - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.2-codex": { - name: `GPT 5.2 Codex ${legacySuffix}`, - }, - "gpt-5.1": { - displayName: `GPT 5.1 ${legacySuffix}`, - }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.2-codex"].name).toBe( - "GPT 5.2 Codex (Codex)", - ); - expect(cfg.provider.openai.models["gpt-5.1"].displayName).toBe( - "GPT 5.1 (Codex)", - ); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("preserves effort-suffixed models when base entry is missing", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-legacy-")); - process.env.XDG_CONFIG_HOME = root; - - try { - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex-low": { id: "gpt-5.3-codex-low" }, - "gpt-5.3-codex-high": { id: "gpt-5.3-codex-high" }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex-low"]).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeDefined(); - } finally { - rmSync(root, { recursive: true, force: true }); - } + const originalXdg = process.env.XDG_CONFIG_HOME; + + afterEach(() => { + if (originalXdg === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = originalXdg; + } + }); + + it("registers gpt-5.3-codex variants on base model metadata and filters non-allowlisted models", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-")); + process.env.XDG_CONFIG_HOME = root; + + try { + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = + await import("../lib/prompts/codex-models.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + instructions: "TEMPLATE", + }, + "o3-mini": { + id: "o3-mini", + instructions: "OTHER", + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex"].instructions).toBe( + "TEMPLATE", + ); + expect(cfg.provider.openai.models["gpt-5.3-codex"].id).toBe( + "gpt-5.3-codex", + ); + expect(cfg.provider.openai.models["gpt-5.3-codex-low"]).toBeUndefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex-medium"], + ).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex-xhigh"]).toBeUndefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.low, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.medium, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.high, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.xhigh, + ).toBeDefined(); + expect(cfg.provider.openai.models["o3-mini"]).toBeUndefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("registers gpt-5.3-codex when gpt-5.3-codex metadata has no instructions field", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-noinst-")); + process.env.XDG_CONFIG_HOME = root; + + try { + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = + await import("../lib/prompts/codex-models.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { + name: "GPT 5.3 Codex (Codex)", + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("does not synthesize gpt-5.3-codex from gpt-5.2-codex in config hook", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-no52clone-")); + process.env.XDG_CONFIG_HOME = root; + + try { + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = + await import("../lib/prompts/codex-models.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + instructions: "TEMPLATE_52", + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("preserves effort-suffixed models when base entry is missing", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-legacy-")); + process.env.XDG_CONFIG_HOME = root; + + try { + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex-low": { id: "gpt-5.3-codex-low" }, + "gpt-5.3-codex-high": { id: "gpt-5.3-codex-high" }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex-low"]).toBeDefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeDefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("uses cached supported_reasoning_levels for codex variants", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-cache-")); + process.env.XDG_CONFIG_HOME = root; + + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.3-codex", + supported_reasoning_levels: [ + { effort: "low" }, + { effort: "medium" }, + ], + }, + ], + }), + "utf8", + ); + + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = + await import("../lib/prompts/codex-models.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { id: "gpt-5.3-codex" }, + }, + }, + }, + experimental: {}, + }; + + const efforts = getCachedVariantEfforts(); + expect(efforts.get("gpt-5.3-codex")).toEqual(["low", "medium"]); + + await (plugin as any).config(cfg); + + const variants = cfg.provider.openai.models["gpt-5.3-codex"].variants; + expect(Object.keys(variants)).toEqual(["low", "medium"]); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("supports future gpt codex models without hardcoded allowlist updates", async () => { + const root = mkdtempSync( + join(tmpdir(), "opencode-config-hook-future-codex-"), + ); + process.env.XDG_CONFIG_HOME = root; + + try { + const plugin = await OpenAIAuthPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.4-codex": { + id: "gpt-5.4-codex", + instructions: "FUTURE_TEMPLATE", + }, + "o3-mini": { + id: "o3-mini", + instructions: "OTHER", + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.4-codex"]).toBeDefined(); + expect(cfg.provider.openai.models["gpt-5.4-codex"].instructions).toBe( + "FUTURE_TEMPLATE", + ); + expect(cfg.provider.openai.models["gpt-5.4-codex-low"]).toBeUndefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex-medium"], + ).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.4-codex-high"]).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.4-codex-xhigh"]).toBeUndefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex"].variants, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex"].variants.low, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex"].variants.medium, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex"].variants.high, + ).toBeDefined(); + expect( + cfg.provider.openai.models["gpt-5.4-codex"].variants.xhigh, + ).toBeDefined(); + expect(cfg.provider.openai.models["o3-mini"]).toBeUndefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("preserves suffixed variant metadata when folding into base model variants", async () => { + const root = mkdtempSync( + join(tmpdir(), "opencode-config-hook-variant-merge-"), + ); + process.env.XDG_CONFIG_HOME = root; + + try { + const plugin = await OpenAIAuthPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT 5.3 Codex", + variants: { + low: { + reasoningEffort: "low", + textVerbosity: "low", + }, + }, + }, + "gpt-5.3-codex-high": { + id: "gpt-5.3-codex-high", + name: "GPT 5.3 Codex High", + textVerbosity: "high", + reasoningSummary: "detailed", + disabled: true, + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.low).toEqual({ + reasoningEffort: "low", + textVerbosity: "low", + }); + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.high, + ).toMatchObject({ + reasoningEffort: "high", + textVerbosity: "medium", + reasoningSummary: "detailed", + disabled: true, + }); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("prefers existing base variant values over suffixed metadata on key conflicts", async () => { + const root = mkdtempSync( + join(tmpdir(), "opencode-config-hook-variant-precedence-"), + ); + process.env.XDG_CONFIG_HOME = root; + + try { + const plugin = await OpenAIAuthPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + variants: { + high: { + reasoningEffort: "high", + textVerbosity: "medium", + reasoningSummary: "concise", + }, + }, + }, + "gpt-5.3-codex-high": { + id: "gpt-5.3-codex-high", + textVerbosity: "high", + reasoningSummary: "detailed", + }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect( + cfg.provider.openai.models["gpt-5.3-codex"].variants.high, + ).toMatchObject({ + reasoningEffort: "high", + textVerbosity: "medium", + reasoningSummary: "concise", + }); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("does not register codex commands", async () => { + const root = mkdtempSync( + join(tmpdir(), "opencode-config-hook-codex-auth-"), + ); + process.env.XDG_CONFIG_HOME = root; + + try { + const plugin = await OpenAIAuthPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { provider: { openai: {} }, experimental: {} }; + await (plugin as any).config(cfg); + + const commandKeys = Object.keys(cfg.command ?? {}); + expect(commandKeys.some((key) => key.startsWith("codex-"))).toBe(false); + const toolKeys = cfg.experimental?.primary_tools ?? []; + expect(toolKeys.some((key: string) => key.startsWith("codex-"))).toBe(false); + } finally { + rmSync(root, { recursive: true, force: true }); + } }); - it("uses cached supported_reasoning_levels for codex variants", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-cache-")); - process.env.XDG_CONFIG_HOME = root; - - try { - const cacheDir = join(root, "opencode", "cache"); - mkdirSync(cacheDir, { recursive: true }); - writeFileSync( - join(cacheDir, "codex-models-cache.json"), - JSON.stringify({ - fetchedAt: Date.now(), - source: "server", - models: [ - { - slug: "gpt-5.3-codex", - supported_reasoning_levels: [ - { effort: "low" }, - { effort: "medium" }, - ], - }, - ], - }), - "utf8", - ); - - vi.resetModules(); - const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); - const { getCachedVariantEfforts } = await import( - "../lib/prompts/codex-models.js" - ); - const plugin = await FreshPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex": { id: "gpt-5.3-codex" }, - }, - }, - }, - experimental: {}, - }; - - const efforts = getCachedVariantEfforts(); - expect(efforts.get("gpt-5.3-codex")).toEqual(["low", "medium"]); - - await (plugin as any).config(cfg); - - const variants = cfg.provider.openai.models["gpt-5.3-codex"].variants; - expect(Object.keys(variants)).toEqual(["low", "medium"]); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("supports future gpt codex models without hardcoded allowlist updates", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-future-codex-")); + it("preserves user-defined codex-prefixed commands and tools", async () => { + const root = mkdtempSync( + join(tmpdir(), "opencode-config-hook-codex-custom-"), + ); process.env.XDG_CONFIG_HOME = root; try { @@ -355,171 +492,21 @@ describe("OpenAIAuthPlugin config hook", () => { } as any); const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.4-codex": { - id: "gpt-5.4-codex", - instructions: "FUTURE_TEMPLATE", - }, - "o3-mini": { - id: "o3-mini", - instructions: "OTHER", - }, - }, - }, + provider: { openai: {} }, + command: { + "codex-custom-user-command": { template: "custom" }, + "codex-auth": { template: "legacy" }, }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.4-codex"]).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].instructions).toBe( - "FUTURE_TEMPLATE", - ); - expect(cfg.provider.openai.models["gpt-5.4-codex-low"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex-medium"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex-high"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex-xhigh"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].variants).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].variants.low).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].variants.medium).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].variants.high).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.4-codex"].variants.xhigh).toBeDefined(); - expect(cfg.provider.openai.models["o3-mini"]).toBeUndefined(); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("preserves suffixed variant metadata when folding into base model variants", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-variant-merge-")); - process.env.XDG_CONFIG_HOME = root; - - try { - const plugin = await OpenAIAuthPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex": { - id: "gpt-5.3-codex", - name: "GPT 5.3 Codex", - variants: { - low: { - reasoningEffort: "low", - textVerbosity: "low", - }, - }, - }, - "gpt-5.3-codex-high": { - id: "gpt-5.3-codex-high", - name: "GPT 5.3 Codex High", - textVerbosity: "high", - reasoningSummary: "detailed", - disabled: true, - }, - }, - }, - }, - experimental: {}, - }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeUndefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.low).toEqual({ - reasoningEffort: "low", - textVerbosity: "low", - }); - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.high).toMatchObject({ - reasoningEffort: "high", - textVerbosity: "medium", - reasoningSummary: "detailed", - disabled: true, - }); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("prefers existing base variant values over suffixed metadata on key conflicts", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-variant-precedence-")); - process.env.XDG_CONFIG_HOME = root; - - try { - const plugin = await OpenAIAuthPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { - provider: { - openai: { - models: { - "gpt-5.3-codex": { - id: "gpt-5.3-codex", - variants: { - high: { - reasoningEffort: "high", - textVerbosity: "medium", - reasoningSummary: "concise", - }, - }, - }, - "gpt-5.3-codex-high": { - id: "gpt-5.3-codex-high", - textVerbosity: "high", - reasoningSummary: "detailed", - }, - }, - }, + experimental: { + primary_tools: ["codex-custom-user-command", "codex-auth"], }, - experimental: {}, }; - - await (plugin as any).config(cfg); - - expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.high).toMatchObject({ - reasoningEffort: "high", - textVerbosity: "medium", - reasoningSummary: "concise", - }); - } finally { - rmSync(root, { recursive: true, force: true }); - } - }); - - it("registers codex-auth command and removes legacy codex commands", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-codex-auth-")); - process.env.XDG_CONFIG_HOME = root; - - try { - const plugin = await OpenAIAuthPlugin({ - client: { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - } as any, - } as any); - - const cfg: any = { provider: { openai: {} }, experimental: {} }; await (plugin as any).config(cfg); - expect(cfg.command["codex-auth"]).toBeDefined(); - expect(cfg.command["codex-status"]).toBeUndefined(); - expect(cfg.command["codex-switch-accounts"]).toBeUndefined(); - expect(cfg.command["codex-toggle-account"]).toBeUndefined(); - expect(cfg.command["codex-remove-account"]).toBeUndefined(); + expect(cfg.command["codex-custom-user-command"]).toBeDefined(); + expect(cfg.command["codex-auth"]).toBeUndefined(); + expect(cfg.experimental.primary_tools).toContain("codex-custom-user-command"); + expect(cfg.experimental.primary_tools).not.toContain("codex-auth"); } finally { rmSync(root, { recursive: true, force: true }); } diff --git a/test/plugin-loader.test.ts b/test/plugin-loader.test.ts index c0a68e5..1435858 100644 --- a/test/plugin-loader.test.ts +++ b/test/plugin-loader.test.ts @@ -4,10 +4,8 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import type { Auth } from "@opencode-ai/sdk"; import { AUTH_LABELS, DEFAULT_MODEL_FAMILY, JWT_CLAIM_PATH } from "../lib/constants.js"; -import { AccountManager } from "../lib/accounts.js"; import * as logger from "../lib/logger.js"; import { createJwt } from "./helpers/jwt.js"; -import { promptLoginMode, promptManageAccounts } from "../lib/cli.js"; let mockedTokenResult: any; @@ -27,11 +25,6 @@ vi.mock("@opencode-ai/plugin", () => { return { tool }; }); -vi.mock("../lib/cli.js", () => ({ - promptLoginMode: vi.fn(), - promptManageAccounts: vi.fn(), -})); - vi.mock("../lib/auth/auth.js", async () => { const actual = await vi.importActual("../lib/auth/auth.js"); return { @@ -112,131 +105,17 @@ describe("OpenAIAuthPlugin loader", () => { } }); - it("codex-status is read-only (does not refresh or save)", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-status-")); + it("does not register codex tools", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-tools-")); process.env.XDG_CONFIG_HOME = root; - let refreshSpy: any; - let saveSpy: any; try { - const storageDir = join(root, "opencode"); - mkdirSync(storageDir, { recursive: true }); - const now = Date.now(); - const storage = { - version: 3, - accounts: [ - { - refreshToken: "status-refresh", - accountId: "acct_status", - email: "status@example.com", - plan: "Plus", - enabled: true, - addedAt: now, - lastUsed: now, - }, - ], - activeIndex: 0, - activeIndexByFamily: { codex: 0 }, - }; - writeFileSync( - join(storageDir, "openai-codex-accounts.json"), - JSON.stringify(storage, null, 2), - "utf-8", - ); - - refreshSpy = vi - .spyOn(AccountManager.prototype, "refreshAccountWithFallback") - .mockResolvedValue({ type: "failed" } as any); - saveSpy = vi.spyOn(AccountManager.prototype, "saveToDisk").mockResolvedValue(); - const client = { tui: { showToast: vi.fn() }, auth: { set: vi.fn() }, }; - const plugin = await OpenAIAuthPlugin({ client: client as any } as any); - await (plugin as any).tool["codex-status"].execute({}); - - expect(refreshSpy).not.toHaveBeenCalled(); - expect(saveSpy).not.toHaveBeenCalled(); - } finally { - refreshSpy?.mockRestore(); - saveSpy?.mockRestore(); - rmSync(root, { recursive: true, force: true }); - } - }); - - it("codex-auth tool reports non-tty requirement", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-codex-auth-")); - process.env.XDG_CONFIG_HOME = root; - const originalIsTTY = process.stdin.isTTY; - try { - Object.defineProperty(process.stdin, "isTTY", { value: false, configurable: true }); - const client = { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - }; - const plugin = await OpenAIAuthPlugin({ client: client as any } as any); - const result = await (plugin as any).tool["codex-auth"].execute({}); - expect(result).toContain("TTY"); - } finally { - Object.defineProperty(process.stdin, "isTTY", { value: originalIsTTY, configurable: true }); - rmSync(root, { recursive: true, force: true }); - } - }); - - it("codex-status highlights active account for default family", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-status-active-")); - process.env.XDG_CONFIG_HOME = root; - try { - const storageDir = join(root, "opencode"); - mkdirSync(storageDir, { recursive: true }); - const now = Date.now(); - const storage = { - version: 3, - accounts: [ - { - refreshToken: "status-refresh-1", - accountId: "acct_status_1", - email: "one@example.com", - plan: "Plus", - enabled: true, - addedAt: now, - lastUsed: now, - }, - { - refreshToken: "status-refresh-2", - accountId: "acct_status_2", - email: "two@example.com", - plan: "Plus", - enabled: true, - addedAt: now, - lastUsed: now, - }, - ], - activeIndex: 0, - activeIndexByFamily: { - [DEFAULT_MODEL_FAMILY]: 0, - "gpt-5.2": 1, - }, - }; - writeFileSync( - join(storageDir, "openai-codex-accounts.json"), - JSON.stringify(storage, null, 2), - "utf-8", - ); - - const client = { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - }; - - const plugin = await OpenAIAuthPlugin({ client: client as any } as any); - const output = await (plugin as any).tool["codex-status"].execute({}); - const activeLine = output - .split("\n") - .find((line: string) => line.includes("●") && line.includes("ACTIVE")); - - expect(activeLine).toContain("one@example.com"); + const toolKeys = Object.keys((plugin as any).tool ?? {}); + expect(toolKeys.some((key) => key.startsWith("codex-"))).toBe(false); } finally { rmSync(root, { recursive: true, force: true }); } @@ -343,61 +222,8 @@ describe("OpenAIAuthPlugin loader", () => { } }); - it("removes the correct account when legacy records exist", async () => { - const root = mkdtempSync(join(tmpdir(), "opencode-remove-")); - process.env.XDG_CONFIG_HOME = root; - - const storageDir = join(root, "opencode"); - mkdirSync(storageDir, { recursive: true }); - const now = Date.now(); - const legacyAccount = { - refreshToken: "legacy-refresh", - addedAt: now, - lastUsed: now, - enabled: true, - }; - const fullAccount = { - refreshToken: "full-refresh", - accountId: "acct_123", - email: "user@example.com", - plan: "Plus", - addedAt: now, - lastUsed: now, - enabled: true, - }; - const storage = { - version: 3, - accounts: [legacyAccount, fullAccount], - activeIndex: 0, - activeIndexByFamily: { codex: 0 }, - }; - writeFileSync( - join(storageDir, "openai-codex-accounts.json"), - JSON.stringify(storage, null, 2), - "utf-8", - ); - - const client = { - tui: { showToast: vi.fn() }, - auth: { set: vi.fn() }, - }; - - const plugin = await OpenAIAuthPlugin({ client: client as any } as any); - await (plugin as any).auth.loader(() => Promise.resolve(createAuth()), {} as any); - - const result = await (plugin as any).tool["codex-remove-account"].execute({ index: 2, confirm: true }); - expect(result).toContain("Removed"); - - const updated = JSON.parse( - readFileSync(join(storageDir, "openai-codex-accounts.json"), "utf-8"), - ); - expect(updated.accounts).toHaveLength(1); - expect(updated.accounts[0].refreshToken).toBe("legacy-refresh"); - - await rmSync(root, { recursive: true, force: true }); - }); - it("toggles the targeted account if storage shifts", async () => { + it("keeps existing accounts unchanged in non-TTY authorize flow", async () => { const root = mkdtempSync(join(tmpdir(), "opencode-manage-")); process.env.XDG_CONFIG_HOME = root; process.env.OPENCODE_NO_BROWSER = "1"; @@ -411,41 +237,14 @@ describe("OpenAIAuthPlugin loader", () => { "utf-8", ), ); - const accountA = fixture.accounts[0]; - const accountB = fixture.accounts[1]; - const accountC = fixture.accounts[2]; const baseStorage = { version: 3, - accounts: [accountA, accountB], + accounts: [fixture.accounts[0], fixture.accounts[1]], activeIndex: 0, activeIndexByFamily: fixture.activeIndexByFamily, }; writeFileSync(storagePath, JSON.stringify(baseStorage, null, 2), "utf-8"); - vi.mocked(promptLoginMode) - .mockResolvedValueOnce("manage") - .mockResolvedValueOnce("add"); - vi.mocked(promptManageAccounts).mockImplementationOnce(async () => { - const shiftedStorage = { - ...baseStorage, - accounts: [accountC, accountA, accountB], - }; - writeFileSync( - storagePath, - JSON.stringify(shiftedStorage, null, 2), - "utf-8", - ); - return { - action: "toggle", - target: { - accountId: accountB.accountId, - email: accountB.email, - plan: accountB.plan, - refreshToken: accountB.refreshToken, - }, - }; - }); - const client = { tui: { showToast: vi.fn() }, auth: { set: vi.fn() }, @@ -457,14 +256,8 @@ describe("OpenAIAuthPlugin loader", () => { await oauthMethod.authorize({}); const updated = JSON.parse(readFileSync(storagePath, "utf-8")); - const updatedAccountA = updated.accounts.find( - (account: any) => account.refreshToken === accountA.refreshToken, - ); - const updatedAccountB = updated.accounts.find( - (account: any) => account.refreshToken === accountB.refreshToken, - ); - expect(updatedAccountA.enabled).toBe(true); - expect(updatedAccountB.enabled).toBe(false); + expect(updated.accounts).toEqual(baseStorage.accounts); + expect(updated.activeIndex).toBe(baseStorage.activeIndex); } finally { delete process.env.OPENCODE_NO_BROWSER; rmSync(root, { recursive: true, force: true }); diff --git a/test/request-transformer.test.ts b/test/request-transformer.test.ts index 0ed90b9..1ef8e95 100644 --- a/test/request-transformer.test.ts +++ b/test/request-transformer.test.ts @@ -1,1649 +1,1743 @@ -import { describe, it, expect, vi } from 'vitest'; +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { - normalizeModel, - getModelConfig, - filterInput, - transformRequestBody, -} from '../lib/request/request-transformer.js'; -import { createSyntheticErrorResponse } from '../lib/request/response-handler.js'; -import type { RequestBody, UserConfig, InputItem } from '../lib/types.js'; -import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from 'node:fs'; -import { tmpdir } from 'node:os'; -import { join } from 'node:path'; - -describe('Request Transformer Module', () => { - describe('synthetic error responses', () => { - it('creates JSON error payloads', async () => { - const response = createSyntheticErrorResponse('Bad model', 400, 'unsupported_model'); - const payload = await response.json(); - expect(payload.error.message).toContain('Bad model'); - expect(payload.error.type).toBe('unsupported_model'); - }); - }); - - describe('normalizeModel', () => { - it('should normalize known gpt-5.x codex models', async () => { - expect(normalizeModel('gpt-5.3-codex')).toBe('gpt-5.3-codex'); - expect(normalizeModel('openai/gpt-5.2-codex-high')).toBe('gpt-5.2-codex'); - }); - - it('should normalize known gpt-5.x general models', async () => { - expect(normalizeModel('gpt-5.2')).toBe('gpt-5.2'); - expect(normalizeModel('openai/gpt-5.1-high')).toBe('gpt-5.1'); - }); - - it('should normalize legacy gpt-5 aliases', async () => { - expect(normalizeModel('gpt-5')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('codex-mini-latest')).toBe('gpt-5.1-codex-mini'); - }); - - it('should leave unknown models untouched', async () => { - expect(normalizeModel('unknown-model')).toBe('unknown-model'); - expect(normalizeModel('gpt-4')).toBe('gpt-4'); - }); - - it('should default to gpt-5.1 when model is missing', async () => { - expect(normalizeModel(undefined)).toBe('gpt-5.1'); - expect(normalizeModel('')).toBe('gpt-5.1'); - }); - - // Codex CLI preset name tests - gpt-5.x only - describe('Codex CLI preset names', () => { - it('should normalize gpt-5.1 codex mini presets', async () => { - expect(normalizeModel('gpt-5.1-codex-mini')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5.1-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/gpt-5.1-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); - }); - - it('should normalize gpt-5.1 codex max presets', async () => { - expect(normalizeModel('gpt-5.1-codex-max')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('gpt-5.1-codex-max-high')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('gpt-5.1-codex-max-xhigh')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('openai/gpt-5.1-codex-max-medium')).toBe('gpt-5.1-codex-max'); - }); - - it('should normalize gpt-5.3 and gpt-5.2 codex presets', async () => { - expect(normalizeModel('gpt-5.2-codex')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-low')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-medium')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-high')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-xhigh')).toBe('gpt-5.2-codex'); - expect(normalizeModel('openai/gpt-5.2-codex-xhigh')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.3-codex')).toBe('gpt-5.3-codex'); - expect(normalizeModel('gpt-5.3-codex-low')).toBe('gpt-5.3-codex'); - expect(normalizeModel('gpt-5.3-codex-medium')).toBe('gpt-5.3-codex'); - expect(normalizeModel('gpt-5.3-codex-high')).toBe('gpt-5.3-codex'); - expect(normalizeModel('gpt-5.3-codex-xhigh')).toBe('gpt-5.3-codex'); - expect(normalizeModel('openai/gpt-5.3-codex')).toBe('gpt-5.3-codex'); - expect(normalizeModel('openai/gpt-5.3-codex-xhigh')).toBe('gpt-5.3-codex'); - }); - - it('should normalize gpt-5.1 codex and mini slugs', async () => { - expect(normalizeModel('gpt-5.1-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('openai/gpt-5.1-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5.1-codex-mini')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5.1-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/gpt-5.1-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); - }); - - it('should normalize gpt-5.2 pro presets', async () => { - expect(normalizeModel('gpt-5.2-pro')).toBe('gpt-5.2-pro'); - expect(normalizeModel('gpt-5.2-pro-low')).toBe('gpt-5.2-pro'); - expect(normalizeModel('openai/gpt-5.2-pro-high')).toBe('gpt-5.2-pro'); - }); - - it('should normalize gpt-5.1 general-purpose slugs', async () => { - expect(normalizeModel('gpt-5.1')).toBe('gpt-5.1'); - expect(normalizeModel('openai/gpt-5.1')).toBe('gpt-5.1'); - expect(normalizeModel('GPT 5.1 High')).toBe('gpt 5.1 high'); - }); - - it('should normalize future codex model variants without explicit map entries', async () => { - expect(normalizeModel('gpt-5.4-codex')).toBe('gpt-5.4-codex'); - expect(normalizeModel('gpt-5.4-codex-low')).toBe('gpt-5.4-codex'); - expect(normalizeModel('gpt-5.4-codex-medium')).toBe('gpt-5.4-codex'); - expect(normalizeModel('gpt-5.4-codex-high')).toBe('gpt-5.4-codex'); - expect(normalizeModel('gpt-5.4-codex-xhigh')).toBe('gpt-5.4-codex'); - expect(normalizeModel('openai/gpt-5.4-codex-xhigh')).toBe('gpt-5.4-codex'); - }); - }); - - // Edge case tests - avoid legacy or nonstandard coercion - describe('Edge cases', () => { - it('should handle uppercase and mixed case for known models', async () => { - expect(normalizeModel('GPT-5.3-CODEX')).toBe('gpt-5.3-codex'); - expect(normalizeModel('GpT-5.1-HiGh')).toBe('gpt-5.1'); - }); - - it('should not coerce legacy or verbose names', async () => { - expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe( - 'gpt 5 codex low (chatgpt subscription)', - ); - expect(normalizeModel('my_gpt-5_codex')).toBe('my_gpt-5_codex'); - expect(normalizeModel('gpt.5.high')).toBe('gpt.5.high'); - }); - }); - }); - - describe('getModelConfig', () => { - describe('Per-model options (Bug Fix Verification)', () => { - it('should find per-model options using config key', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low', textVerbosity: 'low' } - } - } - }; - - const result = getModelConfig('gpt-5-codex-low', userConfig); - expect(result.reasoningEffort).toBe('low'); - expect(result.textVerbosity).toBe('low'); - }); - - it('should merge global and per-model options (per-model wins)', async () => { - const userConfig: UserConfig = { - global: { - reasoningEffort: 'medium', - textVerbosity: 'medium', - include: ['reasoning.encrypted_content'] - }, - models: { - 'gpt-5-codex-high': { - options: { reasoningEffort: 'high' } // Override only effort - } - } - }; - - const result = getModelConfig('gpt-5-codex-high', userConfig); - expect(result.reasoningEffort).toBe('high'); // From per-model - expect(result.textVerbosity).toBe('medium'); // From global - expect(result.include).toEqual(['reasoning.encrypted_content']); // From global - }); - - it('should return global options when model not in config', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { options: { reasoningEffort: 'low' } } - } - }; - - // Looking up different model - const result = getModelConfig('gpt-5-codex', userConfig); - expect(result.reasoningEffort).toBe('medium'); // Global only - }); - - it('should handle empty config', async () => { - const result = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - expect(result).toEqual({}); - }); - - it('should handle missing models object', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'low' }, - models: undefined as any - }; - const result = getModelConfig('gpt-5', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - }); - - describe('Backwards compatibility', () => { - it('should work with old verbose config keys', async () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'GPT 5 Codex Low (ChatGPT Subscription)': { - options: { reasoningEffort: 'low' } - } - } - }; - - const result = getModelConfig('GPT 5 Codex Low (ChatGPT Subscription)', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - - it('should work with old configs that have id field', async () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'gpt-5-codex-low': { - id: 'gpt-5-codex', // id field present but should be ignored - options: { reasoningEffort: 'low' } - } - } - }; - - const result = getModelConfig('gpt-5-codex-low', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - }); - - describe('Default models (no custom config)', () => { - it('should return global options for default gpt-5-codex', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'high' }, - models: {} - }; - - const result = getModelConfig('gpt-5-codex', userConfig); - expect(result.reasoningEffort).toBe('high'); - }); - - it('should return empty when no config at all', async () => { - const result = getModelConfig('gpt-5', undefined); - expect(result).toEqual({}); - }); - }); - }); - - describe('filterInput', () => { - it('should keep items without IDs unchanged', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = filterInput(input); - expect(result).toEqual(input); - expect(result![0]).not.toHaveProperty('id'); - }); - - it('should remove ALL message IDs (rs_, msg_, etc.) for store:false compatibility', async () => { - const input: InputItem[] = [ - { id: 'rs_123', type: 'message', role: 'assistant', content: 'hello' }, - { id: 'msg_456', type: 'message', role: 'user', content: 'world' }, - { id: 'assistant_789', type: 'message', role: 'assistant', content: 'test' }, - ]; - const result = filterInput(input); - - // All items should remain (no filtering), but ALL IDs removed - expect(result).toHaveLength(3); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - expect(result![2]).not.toHaveProperty('id'); - expect(result![0].content).toBe('hello'); - expect(result![1].content).toBe('world'); - expect(result![2].content).toBe('test'); - }); - - it('should strip ID field but preserve all other properties', async () => { - const input: InputItem[] = [ - { - id: 'msg_123', - type: 'message', - role: 'user', - content: 'test', - metadata: { some: 'data' } - }, - ]; - const result = filterInput(input); - - expect(result).toHaveLength(1); - expect(result![0]).not.toHaveProperty('id'); - expect(result![0].type).toBe('message'); - expect(result![0].role).toBe('user'); - expect(result![0].content).toBe('test'); - expect(result![0]).toHaveProperty('metadata'); - }); - - it('should handle mixed items with and without IDs', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: '1' }, - { id: 'rs_stored', type: 'message', role: 'assistant', content: '2' }, - { id: 'msg_123', type: 'message', role: 'user', content: '3' }, - ]; - const result = filterInput(input); - - // All items kept, IDs removed from items that had them - expect(result).toHaveLength(3); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - expect(result![2]).not.toHaveProperty('id'); - expect(result![0].content).toBe('1'); - expect(result![1].content).toBe('2'); - expect(result![2].content).toBe('3'); - }); - - it('should handle custom ID formats (future-proof)', async () => { - const input: InputItem[] = [ - { id: 'custom_id_format', type: 'message', role: 'user', content: 'test' }, - { id: 'another-format-123', type: 'message', role: 'user', content: 'test2' }, - ]; - const result = filterInput(input); - - expect(result).toHaveLength(2); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - }); - - it('should return undefined for undefined input', async () => { - expect(filterInput(undefined)).toBeUndefined(); - }); - - it('should return non-array input as-is', async () => { - const notArray = { notAnArray: true }; - expect(filterInput(notArray as any)).toBe(notArray); - }); - - it('should handle empty array', async () => { - const input: InputItem[] = []; - const result = filterInput(input); - expect(result).toEqual([]); - }); - }); - - describe('transformRequestBody', () => { - const codexInstructions = 'Test Codex Instructions'; - - it('preserves existing prompt_cache_key passed by host (OpenCode)', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - // Host-provided key (OpenCode session id) - prompt_cache_key: 'ses_host_key_123', - }; - const result: any = await transformRequestBody(body, codexInstructions); - expect(result.prompt_cache_key).toBe('ses_host_key_123'); - }); - - it('leaves prompt_cache_key unset when host does not supply one', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result: any = await transformRequestBody(body, codexInstructions); - expect(result.prompt_cache_key).toBeUndefined(); - }); - - it('should set required Codex fields', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.store).toBe(false); - expect(result.stream).toBe(true); - expect(result.instructions).toContain(codexInstructions); - }); - - it('should normalize model name', async () => { - const body: RequestBody = { - model: 'gpt-5-mini', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5-mini'); - }); - - it('accepts base gpt-5.x model slugs', async () => { - const body: RequestBody = { - model: 'gpt-5.3', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.3'); - }); - - it('should apply default reasoning config', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.reasoning?.effort).toBe('medium'); - expect(result.reasoning?.summary).toBe('auto'); - }); - - it('defaults reasoning summary to auto when runtime supports summaries', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const runtimeDefaults = { - staticDefaultPersonality: 'none', - supportedReasoningEfforts: ['low', 'medium'], - defaultReasoningEffort: 'medium', - supportsReasoningSummaries: true, - reasoningSummaryFormat: 'experimental', - }; - const result = await transformRequestBody( - body, - codexInstructions, - { global: {}, models: {} }, - runtimeDefaults as any, - ); - - expect(result.reasoning?.summary).toBe('auto'); - }); - - it('normalizes reasoning summary on to auto', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'on' }, - models: {}, - }; - const runtimeDefaults = { - staticDefaultPersonality: 'none', - supportedReasoningEfforts: ['low', 'medium'], - defaultReasoningEffort: 'medium', - supportsReasoningSummaries: true, - reasoningSummaryFormat: 'experimental', - }; - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - runtimeDefaults as any, - ); - - expect(result.reasoning?.summary).toBe('auto'); - }); - - it('disables reasoning summaries via custom settings toggle', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const runtimeDefaults = { - staticDefaultPersonality: 'none', - supportedReasoningEfforts: ['low', 'medium'], - defaultReasoningEffort: 'medium', - supportsReasoningSummaries: true, - reasoningSummaryFormat: 'experimental', - }; - const pluginConfig = { - custom_settings: { - thinking_summaries: false, - }, - }; - const result = await transformRequestBody( - body, - codexInstructions, - { global: {}, models: {} }, - runtimeDefaults as any, - pluginConfig as any, - ); - - expect(result.reasoning?.summary).toBe('off'); - }); - - it('prefers opencode config over custom_settings for reasoning summary', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const runtimeDefaults = { - staticDefaultPersonality: 'none', - supportedReasoningEfforts: ['low', 'medium'], - defaultReasoningEffort: 'medium', - supportsReasoningSummaries: true, - reasoningSummaryFormat: 'experimental', - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'detailed' }, - models: {}, - }; - const pluginConfig = { - custom_settings: { - thinking_summaries: false, - }, - }; - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should apply user reasoning config', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { - reasoningEffort: 'high', - reasoningSummary: 'detailed', - }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.reasoning?.effort).toBe('high'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should respect reasoning config already set in body', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - reasoning: { - effort: 'low', - summary: 'auto', - }, - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'high', reasoningSummary: 'detailed' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.reasoning?.effort).toBe('low'); - expect(result.reasoning?.summary).toBe('auto'); - }); - - it('should use reasoning config from providerOptions when present', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - providerOptions: { - openai: { - reasoningEffort: 'high', - reasoningSummary: 'detailed', - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.reasoning?.effort).toBe('high'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should apply default text verbosity', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.text?.verbosity).toBe('medium'); - }); - - it('should apply user text verbosity', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { textVerbosity: 'low' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.text?.verbosity).toBe('low'); - }); - - it('should use text verbosity from providerOptions when present', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - providerOptions: { - openai: { - textVerbosity: 'low', - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.text?.verbosity).toBe('low'); - }); - - it('should prefer body text verbosity over providerOptions', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - text: { verbosity: 'high' }, - providerOptions: { - openai: { - textVerbosity: 'low', - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.text?.verbosity).toBe('high'); - }); - - it('should set default include for encrypted reasoning', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - - it('should use user-configured include', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { include: ['custom_field', 'reasoning.encrypted_content'] }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.include).toEqual(['custom_field', 'reasoning.encrypted_content']); - }); - - it('should always include reasoning.encrypted_content when include provided', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - include: ['custom_field'], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.include).toEqual(['custom_field', 'reasoning.encrypted_content']); - }); - - it('should remove IDs from input array (keep all items, strip IDs)', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [ - { id: 'rs_123', type: 'message', role: 'assistant', content: 'old' }, - { type: 'message', role: 'user', content: 'new' }, - ], - }; - const result = await transformRequestBody(body, codexInstructions); - - // All items kept, IDs removed - expect(result.input).toHaveLength(2); - expect(result.input![0]).not.toHaveProperty('id'); - expect(result.input![1]).not.toHaveProperty('id'); - expect(result.input![0].content).toBe('old'); - expect(result.input![1].content).toBe('new'); - }); - - it('should not prepend bridge or tool-remap message when tools are present', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.input).toHaveLength(1); - expect(result.input![0].role).toBe('user'); - }); - - it('should not add tool remap message when tools absent', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.input![0].role).toBe('user'); - }); - - it('should remove unsupported parameters', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - max_output_tokens: 1000, - max_completion_tokens: 2000, - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.max_output_tokens).toBeUndefined(); - expect(result.max_completion_tokens).toBeUndefined(); - }); - - it('should normalize minimal to low for gpt-5-codex', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should clamp xhigh to high for codex-mini', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-mini-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should clamp none to medium for codex-mini', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-mini-medium', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('medium'); - }); - - it('should default codex-max to high effort', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should default gpt-5.2-codex to high effort', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should preserve xhigh for codex-max when requested', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max-xhigh', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'auto' }, - models: { - 'gpt-5.1-codex-max-xhigh': { - options: { reasoningEffort: 'xhigh', reasoningSummary: 'detailed' }, - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex-max'); - expect(result.reasoning?.effort).toBe('xhigh'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should preserve xhigh for gpt-5.2-codex when requested', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex-xhigh', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'auto' }, - models: { - 'gpt-5.2-codex-xhigh': { - options: { reasoningEffort: 'xhigh', reasoningSummary: 'detailed' }, - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('xhigh'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should downgrade xhigh to high for non-max codex', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should downgrade xhigh to high for non-max general models', async () => { - const body: RequestBody = { - model: 'gpt-5.1-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should preserve none for GPT-5.2', async () => { - const body: RequestBody = { - model: 'gpt-5.2-none', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2'); - expect(result.reasoning?.effort).toBe('none'); - }); - - it('should upgrade none to low for GPT-5.2-codex (codex does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should normalize minimal to low for gpt-5.2-codex', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should preserve none for GPT-5.1 general purpose', async () => { - const body: RequestBody = { - model: 'gpt-5.1-none', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1'); - expect(result.reasoning?.effort).toBe('none'); - }); - - it('should upgrade none to low for GPT-5.1-codex (codex does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should upgrade none to low for GPT-5.1-codex-max (codex max does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex-max'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should normalize minimal to low for non-codex models', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should use minimal effort for lightweight models', async () => { - const body: RequestBody = { - model: 'gpt-5-nano', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should normalize minimal to low when provided by the host', async () => { - const body: RequestBody = { - model: 'gpt-5-nano', - input: [], - reasoning: { effort: 'minimal' }, - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should convert orphaned function_call_output to message to preserve context', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { type: 'function_call_output', role: 'assistant', call_id: 'orphan_call', name: 'read', output: '{}' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.tools).toBeUndefined(); - expect(result.input).toHaveLength(2); - expect(result.input![0].type).toBe('message'); - expect(result.input![1].type).toBe('message'); - expect(result.input![1].role).toBe('assistant'); - expect(result.input![1].content).toContain('[Previous read result; call_id=orphan_call]'); - }); - - it('should keep matched function_call pairs when no tools present (for compaction)', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { type: 'function_call', call_id: 'call_1', name: 'write', arguments: '{}' } as any, - { type: 'function_call_output', call_id: 'call_1', output: 'success' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.tools).toBeUndefined(); - expect(result.input).toHaveLength(3); - expect(result.input![1].type).toBe('function_call'); - expect(result.input![2].type).toBe('function_call_output'); - }); - - it('should treat local_shell_call as a match for function_call_output', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { - type: 'local_shell_call', - call_id: 'shell_call', - action: { type: 'exec', command: ['ls'] }, - } as any, - { type: 'function_call_output', call_id: 'shell_call', output: 'ok' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.input).toHaveLength(3); - expect(result.input![1].type).toBe('local_shell_call'); - expect(result.input![2].type).toBe('function_call_output'); - }); - - it('should keep matching custom_tool_call_output items', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { - type: 'custom_tool_call', - call_id: 'custom_call', - name: 'mcp_tool', - input: '{}', - } as any, - { type: 'custom_tool_call_output', call_id: 'custom_call', output: 'done' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.input).toHaveLength(3); - expect(result.input![1].type).toBe('custom_tool_call'); - expect(result.input![2].type).toBe('custom_tool_call_output'); - }); - - it('should convert orphaned custom_tool_call_output to message', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { type: 'custom_tool_call_output', call_id: 'orphan_custom', output: 'oops' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.input).toHaveLength(2); - expect(result.input![1].type).toBe('message'); - expect(result.input![1].content).toContain('[Previous tool result; call_id=orphan_custom]'); - }); - - describe('bridge removal parity', () => { - it('does not inject bridge content when tools are present', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.input).toHaveLength(1); - expect(result.input![0].role).toBe('user'); - }); - - it('preserves OpenCode environment/AGENTS-style developer messages', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [ - { - type: 'message', - role: 'developer', - content: [ - 'Here is some useful information about the environment you are running in:', - '', - ' Working directory: /tmp/project', - '', - 'Instructions from: /tmp/project/AGENTS.md', - '# Project Guidelines', - ].join('\n'), - }, - { type: 'message', role: 'user', content: 'hello' }, - ], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.input).toHaveLength(2); - expect(result.input![0].role).toBe('developer'); - expect(String(result.input![0].content)).toContain('Working directory'); - expect(String(result.input![0].content)).toContain('Instructions from: /tmp/project/AGENTS.md'); - expect(result.input![1].role).toBe('user'); - }); - - it('keeps codex instructions as canonical instructions field', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.instructions).toContain(codexInstructions); - }); - }); - - describe('personality resolution', () => { - it('applies custom personality from local file', async () => { - const root = mkdtempSync(join(tmpdir(), 'personality-local-')); - const cwd = process.cwd(); - process.chdir(root); - try { - const localDir = join(root, '.opencode', 'Personalities'); - mkdirSync(localDir, { recursive: true }); - writeFileSync( - join(localDir, 'Idiot.md'), - 'Chaotic friendly override', - 'utf8', - ); - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const pluginConfig = { - custom_settings: { - options: { personality: 'Idiot' }, - models: {}, - }, - }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - friendly: 'Friendly from runtime', - pragmatic: 'Pragmatic from runtime', - }, - staticDefaultPersonality: 'pragmatic', - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - expect(result.instructions).toContain('Chaotic friendly override'); - } finally { - process.chdir(cwd); - rmSync(root, { recursive: true, force: true }); - } - }); - - it('strips cache marker from personality files', async () => { - const root = mkdtempSync(join(tmpdir(), 'personality-marker-')); - const cwd = process.cwd(); - process.chdir(root); - try { - const localDir = join(root, '.opencode', 'Personalities'); - mkdirSync(localDir, { recursive: true }); - writeFileSync( - join(localDir, 'Friendly.md'), - '\nFriendly from cache', - 'utf8', - ); - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - { global: {}, models: {} }, - undefined, - { custom_settings: { options: { personality: 'friendly' }, models: {} } } as any, - ); - expect(result.instructions).toContain('Friendly from cache'); - expect(result.instructions).not.toContain('opencode personality cache'); - } finally { - process.chdir(cwd); - rmSync(root, { recursive: true, force: true }); - } - }); - - it('rejects personality names with path traversal', async () => { - const root = mkdtempSync(join(tmpdir(), 'personality-traversal-')); - const cwd = process.cwd(); - process.chdir(root); - try { - const localDir = join(root, '.opencode', 'Personalities'); - mkdirSync(localDir, { recursive: true }); - writeFileSync( - join(root, '.opencode', 'evil.md'), - 'do not load', - 'utf8', - ); - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const pluginConfig = { - custom_settings: { - options: { personality: '../evil' }, - models: {}, - }, - }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - pragmatic: 'Pragmatic from runtime', - }, - staticDefaultPersonality: 'pragmatic', - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - expect(result.instructions).not.toContain('do not load'); - } finally { - process.chdir(cwd); - rmSync(root, { recursive: true, force: true }); - } - }); - - it('rejects personality names with Windows-style traversal', async () => { - const root = mkdtempSync(join(tmpdir(), 'personality-traversal-win-')); - const cwd = process.cwd(); - process.chdir(root); - try { - const localDir = join(root, '.opencode', 'Personalities'); - mkdirSync(localDir, { recursive: true }); - writeFileSync( - join(root, '.opencode', 'evil.md'), - 'do not load', - 'utf8', - ); - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const pluginConfig = { - custom_settings: { - options: { personality: '..\\evil' }, - models: {}, - }, - }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - pragmatic: 'Pragmatic from runtime', - }, - staticDefaultPersonality: 'pragmatic', - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - expect(result.instructions).not.toContain('do not load'); - } finally { - process.chdir(cwd); - rmSync(root, { recursive: true, force: true }); - } - }); - - it('defaults to pragmatic when no custom personality set', async () => { - const root = mkdtempSync(join(tmpdir(), 'personality-default-')); - const originalXdg = process.env.XDG_CONFIG_HOME; - process.env.XDG_CONFIG_HOME = root; - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - friendly: 'Friendly from runtime', - pragmatic: 'Pragmatic from runtime', - }, - staticDefaultPersonality: 'pragmatic', - }; - try { - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - {} as any, - ); - expect(result.instructions).toContain('Pragmatic from runtime'); - } finally { - if (originalXdg === undefined) { - delete process.env.XDG_CONFIG_HOME; - } else { - process.env.XDG_CONFIG_HOME = originalXdg; - } - rmSync(root, { recursive: true, force: true }); - } - }); - - it('uses runtime default when personality is set to default', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const pluginConfig = { - custom_settings: { - options: { personality: 'default' }, - models: {}, - }, - }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - friendly: 'Friendly from runtime', - pragmatic: 'Pragmatic from runtime', - }, - onlineDefaultPersonality: 'friendly', - staticDefaultPersonality: 'pragmatic', - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - expect(result.instructions).toContain('Friendly from runtime'); - }); - - it('uses explicit runtime default message when provided', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { global: {}, models: {} }; - const pluginConfig = { - custom_settings: { - options: { personality: 'default' }, - models: {}, - }, - }; - const runtimeDefaults = { - instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', - personalityMessages: { - default: 'Default from runtime', - pragmatic: 'Pragmatic from runtime', - }, - staticDefaultPersonality: 'pragmatic', - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - runtimeDefaults as any, - pluginConfig as any, - ); - expect(result.instructions).toContain('Default from runtime'); - }); - - it('logs invalid personality once per process while coercing to pragmatic', async () => { - const previousLogging = process.env.ENABLE_PLUGIN_REQUEST_LOGGING; - process.env.ENABLE_PLUGIN_REQUEST_LOGGING = '1'; - const logSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - - try { - vi.resetModules(); - const dynamicModule = await import('../lib/request/request-transformer.js'); - const dynamicTransform = dynamicModule.transformRequestBody; - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { - global: {}, - models: {}, - }; - const pluginConfig = { - custom_settings: { - options: { personality: 'INVALID' }, - models: {}, - }, - }; - - await dynamicTransform( - body, - 'BASE INSTRUCTIONS', - userConfig, - undefined, - pluginConfig as any, - ); - await dynamicTransform( - body, - 'BASE INSTRUCTIONS', - userConfig, - undefined, - pluginConfig as any, - ); - - const invalidLogs = logSpy.mock.calls.filter((call) => - call.some((part) => - String(part).includes('Invalid personality "INVALID" detected; coercing to "pragmatic"'), - ), - ); - expect(invalidLogs).toHaveLength(1); - } finally { - if (previousLogging === undefined) { - delete process.env.ENABLE_PLUGIN_REQUEST_LOGGING; - } else { - process.env.ENABLE_PLUGIN_REQUEST_LOGGING = previousLogging; - } - vi.restoreAllMocks(); - vi.resetModules(); - } - }); - }); - - // Unknown model validation happens when runtime defaults are resolved from the server catalog. - - // NEW: Integration tests for all config scenarios - describe('Integration: Complete Config Scenarios', () => { - describe('Scenario 1: Default models (no custom config)', () => { - it('should handle gpt-5-codex with global options only', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'high' }, - models: {}, - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('high'); // From global - expect(result.store).toBe(false); - }); - - it('should handle gpt-5-mini normalizing to gpt-5.1', async () => { - const body: RequestBody = { - model: 'gpt-5-mini', - input: [], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.model).toBe('gpt-5-mini'); - expect(result.reasoning?.effort).toBe('low'); // Lightweight defaults - }); - }); - - describe('Scenario 2: Custom preset names (new style)', () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium', include: ['reasoning.encrypted_content'] }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' }, - }, - 'gpt-5-codex-high': { - options: { reasoningEffort: 'high', reasoningSummary: 'detailed' }, - }, - }, - }; - - it('should apply per-model options for gpt-5-codex-low', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('low'); // From per-model - expect(result.include).toEqual(['reasoning.encrypted_content']); // From global - }); - - it('should apply per-model options for gpt-5-codex-high', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-high', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('high'); // From per-model - expect(result.reasoning?.summary).toBe('detailed'); // From per-model - }); - - it('should use global options for default gpt-5-codex', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('medium'); // From global (no per-model) - }); - }); - - describe('Scenario 3: Backwards compatibility (old verbose names)', () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'GPT 5 Codex Low (ChatGPT Subscription)': { - options: { reasoningEffort: 'low', textVerbosity: 'low' }, - }, - }, - }; - - it('should find and apply old config format', async () => { - const body: RequestBody = { - model: 'GPT 5 Codex Low (ChatGPT Subscription)', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.model).toBe('gpt 5 codex low (chatgpt subscription)'); - expect(result.reasoning?.effort).toBe('low'); - expect(result.text?.verbosity).toBe('low'); - }); - }); - - describe('Scenario 4: Mixed default + custom models', () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' }, - }, - }, - }; - - it('should use per-model for custom variant', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.reasoning?.effort).toBe('low'); // Per-model - }); - - it('should use global for default model', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - expect(result.reasoning?.effort).toBe('medium'); // Global - }); - }); - - describe('Scenario 5: Message ID filtering with multi-turn', () => { - it('should remove ALL IDs in multi-turn conversation', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { id: 'msg_turn1', type: 'message', role: 'user', content: 'first' }, - { id: 'rs_response1', type: 'message', role: 'assistant', content: 'response' }, - { id: 'msg_turn2', type: 'message', role: 'user', content: 'second' }, - { id: 'assistant_123', type: 'message', role: 'assistant', content: 'reply' }, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - // All items kept, ALL IDs removed - expect(result.input).toHaveLength(4); - expect(result.input!.every((item) => !item.id)).toBe(true); - expect(result.store).toBe(false); // Stateless mode - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - }); - - describe('Scenario 6: Complete end-to-end transformation', () => { - it('should handle full transformation: custom model + IDs + tools', async () => { - const userConfig: UserConfig = { - global: { include: ['reasoning.encrypted_content'] }, - models: { - 'gpt-5-codex-low': { - options: { - reasoningEffort: 'low', - textVerbosity: 'low', - reasoningSummary: 'auto', - }, - }, - }, - }; - - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [ - { id: 'msg_1', type: 'message', role: 'user', content: 'test' }, - { id: 'rs_2', type: 'message', role: 'assistant', content: 'reply' }, - ], - tools: [{ name: 'edit' }], - }; - - const result = await transformRequestBody( - body, - codexInstructions, - userConfig, - ); - - // Model normalized for legacy identifiers - expect(result.model).toBe('gpt-5.1-codex'); - - // IDs removed - expect(result.input!.every((item) => !item.id)).toBe(true); - - // Per-model options applied - expect(result.reasoning?.effort).toBe('low'); - expect(result.reasoning?.summary).toBe('auto'); - expect(result.text?.verbosity).toBe('low'); - - // Codex fields set - expect(result.store).toBe(false); - expect(result.stream).toBe(true); - expect(result.instructions).toContain(codexInstructions); - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - }); - }); - }); + normalizeModel, + getModelConfig, + filterInput, + transformRequestBody, +} from "../lib/request/request-transformer.js"; +import { createSyntheticErrorResponse } from "../lib/request/response-handler.js"; +import type { RequestBody, UserConfig, InputItem } from "../lib/types.js"; +import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +describe("Request Transformer Module", () => { + describe("synthetic error responses", () => { + it("creates JSON error payloads", async () => { + const response = createSyntheticErrorResponse( + "Bad model", + 400, + "unsupported_model", + ); + const payload = await response.json(); + expect(payload.error.message).toContain("Bad model"); + expect(payload.error.type).toBe("unsupported_model"); + }); + }); + + describe("normalizeModel", () => { + it("should normalize known gpt-5.x codex models", async () => { + expect(normalizeModel("gpt-5.3-codex")).toBe("gpt-5.3-codex"); + expect(normalizeModel("openai/gpt-5.2-codex-high")).toBe("gpt-5.2-codex"); + }); + + it("should normalize known gpt-5.x general models", async () => { + expect(normalizeModel("gpt-5.2")).toBe("gpt-5.2"); + expect(normalizeModel("openai/gpt-5.1-high")).toBe("gpt-5.1"); + }); + + it("should normalize legacy gpt-5 aliases", async () => { + expect(normalizeModel("gpt-5")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt-5-codex-low")).toBe("gpt-5.1-codex"); + expect(normalizeModel("codex-mini-latest")).toBe("gpt-5.1-codex-mini"); + }); + + it("should leave unknown models untouched", async () => { + expect(normalizeModel("unknown-model")).toBe("unknown-model"); + expect(normalizeModel("gpt-4")).toBe("gpt-4"); + }); + + it("should default to gpt-5.1 when model is missing", async () => { + expect(normalizeModel(undefined)).toBe("gpt-5.1"); + expect(normalizeModel("")).toBe("gpt-5.1"); + }); + + // Codex CLI preset name tests - gpt-5.x only + describe("Codex CLI preset names", () => { + it("should normalize gpt-5.1 codex mini presets", async () => { + expect(normalizeModel("gpt-5.1-codex-mini")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("gpt-5.1-codex-mini-high")).toBe( + "gpt-5.1-codex-mini", + ); + expect(normalizeModel("openai/gpt-5.1-codex-mini-medium")).toBe( + "gpt-5.1-codex-mini", + ); + }); + + it("should normalize gpt-5.1 codex max presets", async () => { + expect(normalizeModel("gpt-5.1-codex-max")).toBe("gpt-5.1-codex-max"); + expect(normalizeModel("gpt-5.1-codex-max-high")).toBe( + "gpt-5.1-codex-max", + ); + expect(normalizeModel("gpt-5.1-codex-max-xhigh")).toBe( + "gpt-5.1-codex-max", + ); + expect(normalizeModel("openai/gpt-5.1-codex-max-medium")).toBe( + "gpt-5.1-codex-max", + ); + }); + + it("should normalize gpt-5.3 and gpt-5.2 codex presets", async () => { + expect(normalizeModel("gpt-5.2-codex")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-low")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-medium")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-high")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-xhigh")).toBe("gpt-5.2-codex"); + expect(normalizeModel("openai/gpt-5.2-codex-xhigh")).toBe( + "gpt-5.2-codex", + ); + expect(normalizeModel("gpt-5.3-codex")).toBe("gpt-5.3-codex"); + expect(normalizeModel("gpt-5.3-codex-low")).toBe("gpt-5.3-codex"); + expect(normalizeModel("gpt-5.3-codex-medium")).toBe("gpt-5.3-codex"); + expect(normalizeModel("gpt-5.3-codex-high")).toBe("gpt-5.3-codex"); + expect(normalizeModel("gpt-5.3-codex-xhigh")).toBe("gpt-5.3-codex"); + expect(normalizeModel("openai/gpt-5.3-codex")).toBe("gpt-5.3-codex"); + expect(normalizeModel("openai/gpt-5.3-codex-xhigh")).toBe( + "gpt-5.3-codex", + ); + }); + + it("should normalize gpt-5.1 codex and mini slugs", async () => { + expect(normalizeModel("gpt-5.1-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("openai/gpt-5.1-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt-5.1-codex-mini")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("gpt-5.1-codex-mini-high")).toBe( + "gpt-5.1-codex-mini", + ); + expect(normalizeModel("openai/gpt-5.1-codex-mini-medium")).toBe( + "gpt-5.1-codex-mini", + ); + }); + + it("should normalize gpt-5.2 pro presets", async () => { + expect(normalizeModel("gpt-5.2-pro")).toBe("gpt-5.2-pro"); + expect(normalizeModel("gpt-5.2-pro-low")).toBe("gpt-5.2-pro"); + expect(normalizeModel("openai/gpt-5.2-pro-high")).toBe("gpt-5.2-pro"); + }); + + it("should normalize gpt-5.1 general-purpose slugs", async () => { + expect(normalizeModel("gpt-5.1")).toBe("gpt-5.1"); + expect(normalizeModel("openai/gpt-5.1")).toBe("gpt-5.1"); + expect(normalizeModel("GPT 5.1 High")).toBe("gpt 5.1 high"); + }); + + it("should normalize future codex model variants without explicit map entries", async () => { + expect(normalizeModel("gpt-5.4-codex")).toBe("gpt-5.4-codex"); + expect(normalizeModel("gpt-5.4-codex-low")).toBe("gpt-5.4-codex"); + expect(normalizeModel("gpt-5.4-codex-medium")).toBe("gpt-5.4-codex"); + expect(normalizeModel("gpt-5.4-codex-high")).toBe("gpt-5.4-codex"); + expect(normalizeModel("gpt-5.4-codex-xhigh")).toBe("gpt-5.4-codex"); + expect(normalizeModel("openai/gpt-5.4-codex-xhigh")).toBe( + "gpt-5.4-codex", + ); + }); + }); + + // Edge case tests - avoid legacy or nonstandard coercion + describe("Edge cases", () => { + it("should handle uppercase and mixed case for known models", async () => { + expect(normalizeModel("GPT-5.3-CODEX")).toBe("gpt-5.3-codex"); + expect(normalizeModel("GpT-5.1-HiGh")).toBe("gpt-5.1"); + }); + + it("should not coerce legacy or verbose names", async () => { + expect(normalizeModel("GPT 5 Codex Low (Codex)")).toBe( + "gpt 5 codex low (codex)", + ); + expect(normalizeModel("my_gpt-5_codex")).toBe("my_gpt-5_codex"); + expect(normalizeModel("gpt.5.high")).toBe("gpt.5.high"); + }); + }); + }); + + describe("getModelConfig", () => { + describe("Per-model options (Bug Fix Verification)", () => { + it("should find per-model options using config key", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low", textVerbosity: "low" }, + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-low", userConfig); + expect(result.reasoningEffort).toBe("low"); + expect(result.textVerbosity).toBe("low"); + }); + + it("should merge global and per-model options (per-model wins)", async () => { + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + textVerbosity: "medium", + include: ["reasoning.encrypted_content"], + }, + models: { + "gpt-5-codex-high": { + options: { reasoningEffort: "high" }, // Override only effort + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-high", userConfig); + expect(result.reasoningEffort).toBe("high"); // From per-model + expect(result.textVerbosity).toBe("medium"); // From global + expect(result.include).toEqual(["reasoning.encrypted_content"]); // From global + }); + + it("should return global options when model not in config", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { options: { reasoningEffort: "low" } }, + }, + }; + + // Looking up different model + const result = getModelConfig("gpt-5-codex", userConfig); + expect(result.reasoningEffort).toBe("medium"); // Global only + }); + + it("should handle empty config", async () => { + const result = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + expect(result).toEqual({}); + }); + + it("should handle missing models object", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "low" }, + models: undefined as any, + }; + const result = getModelConfig("gpt-5", userConfig); + expect(result.reasoningEffort).toBe("low"); + }); + }); + + describe("Backwards compatibility", () => { + it("should work with old verbose config keys", async () => { + const userConfig: UserConfig = { + global: {}, + models: { + "GPT 5 Codex Low (Codex)": { + options: { reasoningEffort: "low" }, + }, + }, + }; + + const result = getModelConfig("GPT 5 Codex Low (Codex)", userConfig); + expect(result.reasoningEffort).toBe("low"); + }); + + it("should work with old configs that have id field", async () => { + const userConfig: UserConfig = { + global: {}, + models: { + "gpt-5-codex-low": { + id: "gpt-5-codex", // id field present but should be ignored + options: { reasoningEffort: "low" }, + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-low", userConfig); + expect(result.reasoningEffort).toBe("low"); + }); + }); + + describe("Default models (no custom config)", () => { + it("should return global options for default gpt-5-codex", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "high" }, + models: {}, + }; + + const result = getModelConfig("gpt-5-codex", userConfig); + expect(result.reasoningEffort).toBe("high"); + }); + + it("should return empty when no config at all", async () => { + const result = getModelConfig("gpt-5", undefined); + expect(result).toEqual({}); + }); + }); + }); + + describe("filterInput", () => { + it("should keep items without IDs unchanged", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = filterInput(input); + expect(result).toEqual(input); + expect(result![0]).not.toHaveProperty("id"); + }); + + it("should remove ALL message IDs (rs_, msg_, etc.) for store:false compatibility", async () => { + const input: InputItem[] = [ + { id: "rs_123", type: "message", role: "assistant", content: "hello" }, + { id: "msg_456", type: "message", role: "user", content: "world" }, + { + id: "assistant_789", + type: "message", + role: "assistant", + content: "test", + }, + ]; + const result = filterInput(input); + + // All items should remain (no filtering), but ALL IDs removed + expect(result).toHaveLength(3); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + expect(result![2]).not.toHaveProperty("id"); + expect(result![0].content).toBe("hello"); + expect(result![1].content).toBe("world"); + expect(result![2].content).toBe("test"); + }); + + it("should strip ID field but preserve all other properties", async () => { + const input: InputItem[] = [ + { + id: "msg_123", + type: "message", + role: "user", + content: "test", + metadata: { some: "data" }, + }, + ]; + const result = filterInput(input); + + expect(result).toHaveLength(1); + expect(result![0]).not.toHaveProperty("id"); + expect(result![0].type).toBe("message"); + expect(result![0].role).toBe("user"); + expect(result![0].content).toBe("test"); + expect(result![0]).toHaveProperty("metadata"); + }); + + it("should handle mixed items with and without IDs", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "1" }, + { id: "rs_stored", type: "message", role: "assistant", content: "2" }, + { id: "msg_123", type: "message", role: "user", content: "3" }, + ]; + const result = filterInput(input); + + // All items kept, IDs removed from items that had them + expect(result).toHaveLength(3); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + expect(result![2]).not.toHaveProperty("id"); + expect(result![0].content).toBe("1"); + expect(result![1].content).toBe("2"); + expect(result![2].content).toBe("3"); + }); + + it("should handle custom ID formats (future-proof)", async () => { + const input: InputItem[] = [ + { + id: "custom_id_format", + type: "message", + role: "user", + content: "test", + }, + { + id: "another-format-123", + type: "message", + role: "user", + content: "test2", + }, + ]; + const result = filterInput(input); + + expect(result).toHaveLength(2); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + }); + + it("should return undefined for undefined input", async () => { + expect(filterInput(undefined)).toBeUndefined(); + }); + + it("should return non-array input as-is", async () => { + const notArray = { notAnArray: true }; + expect(filterInput(notArray as any)).toBe(notArray); + }); + + it("should handle empty array", async () => { + const input: InputItem[] = []; + const result = filterInput(input); + expect(result).toEqual([]); + }); + }); + + describe("transformRequestBody", () => { + const codexInstructions = "Test Codex Instructions"; + + it("preserves existing prompt_cache_key passed by host (OpenCode)", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + // Host-provided key (OpenCode session id) + prompt_cache_key: "ses_host_key_123", + }; + const result: any = await transformRequestBody(body, codexInstructions); + expect(result.prompt_cache_key).toBe("ses_host_key_123"); + }); + + it("leaves prompt_cache_key unset when host does not supply one", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result: any = await transformRequestBody(body, codexInstructions); + expect(result.prompt_cache_key).toBeUndefined(); + }); + + it("should set required Codex fields", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.store).toBe(false); + expect(result.stream).toBe(true); + expect(result.instructions).toContain(codexInstructions); + }); + + it("should normalize model name", async () => { + const body: RequestBody = { + model: "gpt-5-mini", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe("gpt-5-mini"); + }); + + it("accepts base gpt-5.x model slugs", async () => { + const body: RequestBody = { + model: "gpt-5.3", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe("gpt-5.3"); + }); + + it("should apply default reasoning config", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.reasoning?.effort).toBe("medium"); + expect(result.reasoning?.summary).toBe("auto"); + }); + + it("should apply user reasoning config", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should respect reasoning config already set in body", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + reasoning: { + effort: "low", + summary: "auto", + }, + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "high", reasoningSummary: "detailed" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.reasoning?.effort).toBe("low"); + expect(result.reasoning?.summary).toBe("auto"); + }); + + it("should use reasoning config from providerOptions when present", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + providerOptions: { + openai: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + }, + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should apply default text verbosity", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.text?.verbosity).toBe("medium"); + }); + + it("should apply user text verbosity", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { textVerbosity: "low" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.text?.verbosity).toBe("low"); + }); + + it("should use text verbosity from providerOptions when present", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + providerOptions: { + openai: { + textVerbosity: "low", + }, + }, + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.text?.verbosity).toBe("low"); + }); + + it("should prefer body text verbosity over providerOptions", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + text: { verbosity: "high" }, + providerOptions: { + openai: { + textVerbosity: "low", + }, + }, + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.text?.verbosity).toBe("high"); + }); + + it("should set default include for encrypted reasoning", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + + it("should use user-configured include", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { include: ["custom_field", "reasoning.encrypted_content"] }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.include).toEqual([ + "custom_field", + "reasoning.encrypted_content", + ]); + }); + + it("should always include reasoning.encrypted_content when include provided", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + include: ["custom_field"], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.include).toEqual([ + "custom_field", + "reasoning.encrypted_content", + ]); + }); + + it("should remove IDs from input array (keep all items, strip IDs)", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { id: "rs_123", type: "message", role: "assistant", content: "old" }, + { type: "message", role: "user", content: "new" }, + ], + }; + const result = await transformRequestBody(body, codexInstructions); + + // All items kept, IDs removed + expect(result.input).toHaveLength(2); + expect(result.input![0]).not.toHaveProperty("id"); + expect(result.input![1]).not.toHaveProperty("id"); + expect(result.input![0].content).toBe("old"); + expect(result.input![1].content).toBe("new"); + }); + + it("should not prepend bridge or tool-remap message when tools are present", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.input).toHaveLength(1); + expect(result.input![0].role).toBe("user"); + }); + + it("should not add tool remap message when tools absent", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.input![0].role).toBe("user"); + }); + + it("should remove unsupported parameters", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + max_output_tokens: 1000, + max_completion_tokens: 2000, + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.max_output_tokens).toBeUndefined(); + expect(result.max_completion_tokens).toBeUndefined(); + }); + + it("should normalize minimal to low for gpt-5-codex", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should clamp xhigh to high for codex-mini", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-mini-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should clamp none to medium for codex-mini", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-mini-medium", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.reasoning?.effort).toBe("medium"); + }); + + it("should default codex-max to high effort", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should default gpt-5.2-codex to high effort", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should preserve xhigh for codex-max when requested", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max-xhigh", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningSummary: "auto" }, + models: { + "gpt-5.1-codex-max-xhigh": { + options: { reasoningEffort: "xhigh", reasoningSummary: "detailed" }, + }, + }, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1-codex-max"); + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should preserve xhigh for gpt-5.2-codex when requested", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex-xhigh", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningSummary: "auto" }, + models: { + "gpt-5.2-codex-xhigh": { + options: { reasoningEffort: "xhigh", reasoningSummary: "detailed" }, + }, + }, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should downgrade xhigh to high for non-max codex", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should downgrade xhigh to high for non-max general models", async () => { + const body: RequestBody = { + model: "gpt-5.1-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should preserve none for GPT-5.2", async () => { + const body: RequestBody = { + model: "gpt-5.2-none", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.2"); + expect(result.reasoning?.effort).toBe("none"); + }); + + it("should upgrade none to low for GPT-5.2-codex (codex does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should normalize minimal to low for gpt-5.2-codex", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should preserve none for GPT-5.1 general purpose", async () => { + const body: RequestBody = { + model: "gpt-5.1-none", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1"); + expect(result.reasoning?.effort).toBe("none"); + }); + + it("should upgrade none to low for GPT-5.1-codex (codex does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should upgrade none to low for GPT-5.1-codex-max (codex max does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.model).toBe("gpt-5.1-codex-max"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should normalize minimal to low for non-codex models", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should use minimal effort for lightweight models", async () => { + const body: RequestBody = { + model: "gpt-5-nano", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should normalize minimal to low when provided by the host", async () => { + const body: RequestBody = { + model: "gpt-5-nano", + input: [], + reasoning: { effort: "minimal" }, + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should convert orphaned function_call_output to message to preserve context", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "function_call_output", + role: "assistant", + call_id: "orphan_call", + name: "read", + output: "{}", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.tools).toBeUndefined(); + expect(result.input).toHaveLength(2); + expect(result.input![0].type).toBe("message"); + expect(result.input![1].type).toBe("message"); + expect(result.input![1].role).toBe("assistant"); + expect(result.input![1].content).toContain( + "[Previous read result; call_id=orphan_call]", + ); + }); + + it("should keep matched function_call pairs when no tools present (for compaction)", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "function_call", + call_id: "call_1", + name: "write", + arguments: "{}", + } as any, + { + type: "function_call_output", + call_id: "call_1", + output: "success", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.tools).toBeUndefined(); + expect(result.input).toHaveLength(3); + expect(result.input![1].type).toBe("function_call"); + expect(result.input![2].type).toBe("function_call_output"); + }); + + it("should treat local_shell_call as a match for function_call_output", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "local_shell_call", + call_id: "shell_call", + action: { type: "exec", command: ["ls"] }, + } as any, + { + type: "function_call_output", + call_id: "shell_call", + output: "ok", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.input).toHaveLength(3); + expect(result.input![1].type).toBe("local_shell_call"); + expect(result.input![2].type).toBe("function_call_output"); + }); + + it("should keep matching custom_tool_call_output items", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "custom_tool_call", + call_id: "custom_call", + name: "mcp_tool", + input: "{}", + } as any, + { + type: "custom_tool_call_output", + call_id: "custom_call", + output: "done", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.input).toHaveLength(3); + expect(result.input![1].type).toBe("custom_tool_call"); + expect(result.input![2].type).toBe("custom_tool_call_output"); + }); + + it("should convert orphaned custom_tool_call_output to message", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "custom_tool_call_output", + call_id: "orphan_custom", + output: "oops", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.input).toHaveLength(2); + expect(result.input![1].type).toBe("message"); + expect(result.input![1].content).toContain( + "[Previous tool result; call_id=orphan_custom]", + ); + }); + + describe("bridge removal parity", () => { + it("does not inject bridge content when tools are present", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.input).toHaveLength(1); + expect(result.input![0].role).toBe("user"); + }); + + it("preserves OpenCode environment/AGENTS-style developer messages", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { + type: "message", + role: "developer", + content: [ + "Here is some useful information about the environment you are running in:", + "", + " Working directory: /tmp/project", + "", + "Instructions from: /tmp/project/AGENTS.md", + "# Project Guidelines", + ].join("\n"), + }, + { type: "message", role: "user", content: "hello" }, + ], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.input).toHaveLength(2); + expect(result.input![0].role).toBe("developer"); + expect(String(result.input![0].content)).toContain("Working directory"); + expect(String(result.input![0].content)).toContain( + "Instructions from: /tmp/project/AGENTS.md", + ); + expect(result.input![1].role).toBe("user"); + }); + + it("keeps codex instructions as canonical instructions field", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.instructions).toContain(codexInstructions); + }); + }); + + describe("personality resolution", () => { + const originalXdg = process.env.XDG_CONFIG_HOME; + let xdgRoot: string | undefined; + + beforeEach(() => { + xdgRoot = mkdtempSync(join(tmpdir(), "personality-xdg-")); + process.env.XDG_CONFIG_HOME = xdgRoot; + }); + + afterEach(() => { + if (xdgRoot) { + rmSync(xdgRoot, { recursive: true, force: true }); + xdgRoot = undefined; + } + if (originalXdg === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = originalXdg; + } + }); + + it("applies custom personality from local file", async () => { + const root = mkdtempSync(join(tmpdir(), "personality-local-")); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, ".opencode", "Personalities"); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(localDir, "Idiot.md"), + "Chaotic friendly override", + "utf8", + ); + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: "Idiot" }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + friendly: "Friendly from runtime", + pragmatic: "Pragmatic from runtime", + }, + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain("Chaotic friendly override"); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it("strips cache marker from personality files", async () => { + const root = mkdtempSync(join(tmpdir(), "personality-marker-")); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, ".opencode", "Personalities"); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(localDir, "Friendly.md"), + "\nFriendly from cache", + "utf8", + ); + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + { global: {}, models: {} }, + undefined, + { + custom_settings: { + options: { personality: "friendly" }, + models: {}, + }, + } as any, + ); + expect(result.instructions).toContain("Friendly from cache"); + expect(result.instructions).not.toContain( + "opencode personality cache", + ); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it("rejects personality names with path traversal", async () => { + const root = mkdtempSync(join(tmpdir(), "personality-traversal-")); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, ".opencode", "Personalities"); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(root, ".opencode", "evil.md"), + "do not load", + "utf8", + ); + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: "../evil" }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + pragmatic: "Pragmatic from runtime", + }, + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).not.toContain("do not load"); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it("rejects personality names with Windows-style traversal", async () => { + const root = mkdtempSync(join(tmpdir(), "personality-traversal-win-")); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, ".opencode", "Personalities"); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(root, ".opencode", "evil.md"), + "do not load", + "utf8", + ); + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: "..\\evil" }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + pragmatic: "Pragmatic from runtime", + }, + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).not.toContain("do not load"); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it("defaults to pragmatic when no custom personality set", async () => { + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + friendly: "Friendly from runtime", + pragmatic: "Pragmatic from runtime", + }, + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + {} as any, + ); + expect(result.instructions).toContain("Pragmatic from runtime"); + }); + + it("uses runtime default when personality is set to default", async () => { + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: "default" }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + friendly: "Friendly from runtime", + pragmatic: "Pragmatic from runtime", + }, + onlineDefaultPersonality: "friendly", + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain("Friendly from runtime"); + }); + + it("uses explicit runtime default message when provided", async () => { + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: "default" }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: "BASE INSTRUCTIONS\n\n{{ personality }}", + personalityMessages: { + default: "Default from runtime", + pragmatic: "Pragmatic from runtime", + }, + staticDefaultPersonality: "pragmatic", + }; + const result = await transformRequestBody( + body, + "BASE INSTRUCTIONS", + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain("Default from runtime"); + }); + + it("logs invalid personality once per process while coercing to pragmatic", async () => { + const previousLogging = process.env.ENABLE_PLUGIN_REQUEST_LOGGING; + process.env.ENABLE_PLUGIN_REQUEST_LOGGING = "1"; + const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + try { + vi.resetModules(); + const dynamicModule = + await import("../lib/request/request-transformer.js"); + const dynamicTransform = dynamicModule.transformRequestBody; + const body: RequestBody = { + model: "gpt-5.3-codex", + input: [], + }; + const userConfig: UserConfig = { + global: {}, + models: {}, + }; + const pluginConfig = { + custom_settings: { + options: { personality: "INVALID" }, + models: {}, + }, + }; + + await dynamicTransform( + body, + "BASE INSTRUCTIONS", + userConfig, + undefined, + pluginConfig as any, + ); + await dynamicTransform( + body, + "BASE INSTRUCTIONS", + userConfig, + undefined, + pluginConfig as any, + ); + + const invalidLogs = logSpy.mock.calls.filter((call) => + call.some((part) => + String(part).includes( + 'Invalid personality "INVALID" detected; coercing to "pragmatic"', + ), + ), + ); + expect(invalidLogs).toHaveLength(1); + } finally { + if (previousLogging === undefined) { + delete process.env.ENABLE_PLUGIN_REQUEST_LOGGING; + } else { + process.env.ENABLE_PLUGIN_REQUEST_LOGGING = previousLogging; + } + vi.restoreAllMocks(); + vi.resetModules(); + } + }); + }); + + // Unknown model validation happens when runtime defaults are resolved from the server catalog. + + // NEW: Integration tests for all config scenarios + describe("Integration: Complete Config Scenarios", () => { + describe("Scenario 1: Default models (no custom config)", () => { + it("should handle gpt-5-codex with global options only", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "high" }, + models: {}, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("high"); // From global + expect(result.store).toBe(false); + }); + + it("should handle gpt-5-mini normalizing to gpt-5.1", async () => { + const body: RequestBody = { + model: "gpt-5-mini", + input: [], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.model).toBe("gpt-5-mini"); + expect(result.reasoning?.effort).toBe("low"); // Lightweight defaults + }); + }); + + describe("Scenario 2: Custom preset names (new style)", () => { + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + include: ["reasoning.encrypted_content"], + }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low" }, + }, + "gpt-5-codex-high": { + options: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + }, + }, + }; + + it("should apply per-model options for gpt-5-codex-low", async () => { + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("low"); // From per-model + expect(result.include).toEqual(["reasoning.encrypted_content"]); // From global + }); + + it("should apply per-model options for gpt-5-codex-high", async () => { + const body: RequestBody = { + model: "gpt-5-codex-high", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("high"); // From per-model + expect(result.reasoning?.summary).toBe("detailed"); // From per-model + }); + + it("should use global options for default gpt-5-codex", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("medium"); // From global (no per-model) + }); + }); + + describe("Scenario 3: Backwards compatibility (old verbose names)", () => { + const userConfig: UserConfig = { + global: {}, + models: { + "GPT 5 Codex Low (Codex)": { + options: { reasoningEffort: "low", textVerbosity: "low" }, + }, + }, + }; + + it("should find and apply old config format", async () => { + const body: RequestBody = { + model: "GPT 5 Codex Low (Codex)", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.model).toBe("gpt 5 codex low (codex)"); + expect(result.reasoning?.effort).toBe("low"); + expect(result.text?.verbosity).toBe("low"); + }); + }); + + describe("Scenario 4: Mixed default + custom models", () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low" }, + }, + }, + }; + + it("should use per-model for custom variant", async () => { + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.reasoning?.effort).toBe("low"); // Per-model + }); + + it("should use global for default model", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + expect(result.reasoning?.effort).toBe("medium"); // Global + }); + }); + + describe("Scenario 5: Message ID filtering with multi-turn", () => { + it("should remove ALL IDs in multi-turn conversation", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { + id: "msg_turn1", + type: "message", + role: "user", + content: "first", + }, + { + id: "rs_response1", + type: "message", + role: "assistant", + content: "response", + }, + { + id: "msg_turn2", + type: "message", + role: "user", + content: "second", + }, + { + id: "assistant_123", + type: "message", + role: "assistant", + content: "reply", + }, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + // All items kept, ALL IDs removed + expect(result.input).toHaveLength(4); + expect(result.input!.every((item) => !item.id)).toBe(true); + expect(result.store).toBe(false); // Stateless mode + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + }); + + describe("Scenario 6: Complete end-to-end transformation", () => { + it("should handle full transformation: custom model + IDs + tools", async () => { + const userConfig: UserConfig = { + global: { include: ["reasoning.encrypted_content"] }, + models: { + "gpt-5-codex-low": { + options: { + reasoningEffort: "low", + textVerbosity: "low", + reasoningSummary: "auto", + }, + }, + }, + }; + + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [ + { id: "msg_1", type: "message", role: "user", content: "test" }, + { + id: "rs_2", + type: "message", + role: "assistant", + content: "reply", + }, + ], + tools: [{ name: "edit" }], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); + + // Model normalized for legacy identifiers + expect(result.model).toBe("gpt-5.1-codex"); + + // IDs removed + expect(result.input!.every((item) => !item.id)).toBe(true); + + // Per-model options applied + expect(result.reasoning?.effort).toBe("low"); + expect(result.reasoning?.summary).toBe("auto"); + expect(result.text?.verbosity).toBe("low"); + + // Codex fields set + expect(result.store).toBe(false); + expect(result.stream).toBe(true); + expect(result.instructions).toContain(codexInstructions); + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + }); + }); + }); }); diff --git a/test/tty-confirm.test.ts b/test/tty-confirm.test.ts index eceb7e2..38ee0ad 100644 --- a/test/tty-confirm.test.ts +++ b/test/tty-confirm.test.ts @@ -2,24 +2,25 @@ import { PassThrough } from "node:stream"; import { describe, it, expect, vi } from "vitest"; -import { runConfirm } from "../lib/ui/tty/confirm.js"; +import { confirm } from "../lib/ui/tty/confirm.js"; + +function makeTty() { + const input = new PassThrough(); + const output = new PassThrough(); + (input as unknown as { isTTY: boolean }).isTTY = true; + (output as unknown as { isTTY: boolean }).isTTY = true; + (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = vi.fn(); + return { input, output }; +} describe("tty confirm", () => { it("returns true when confirming", async () => { - const input = new PassThrough(); - const output = new PassThrough(); - (input as unknown as { isTTY: boolean }).isTTY = true; - (output as unknown as { isTTY: boolean }).isTTY = true; - (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = vi.fn(); - - const resultPromise = runConfirm({ - title: "Delete account", - message: "Delete user?", - input, - output, - useColor: false, + const { input, output } = makeTty(); + const resultPromise = confirm("Delete account?", false, { + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, }); - + input.write("\u001b[B"); input.write("\r"); const result = await resultPromise; expect(result).toBe(true); diff --git a/test/tty-select.test.ts b/test/tty-select.test.ts index ddfa085..dabdf57 100644 --- a/test/tty-select.test.ts +++ b/test/tty-select.test.ts @@ -1,88 +1,105 @@ +import { PassThrough } from "node:stream"; + import { describe, it, expect, vi } from "vitest"; -import { PassThrough } from "node:stream"; +import { ANSI } from "../lib/ui/tty/ansi.js"; +import { select } from "../lib/ui/tty/select.js"; -import { renderSelectFrame, parseSelectKey, moveSelectIndex, runSelect } from "../lib/ui/tty/select.js"; +function makeTty(columns = 80) { + const input = new PassThrough(); + const output = new PassThrough(); + (input as unknown as { isTTY: boolean }).isTTY = true; + (output as unknown as { isTTY: boolean }).isTTY = true; + (output as unknown as { columns: number }).columns = columns; + (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = vi.fn(); + return { input, output }; +} -describe("tty select", () => { - it("renders ASCII frame and selection marker", () => { - const lines = renderSelectFrame({ - title: "Manage accounts", - subtitle: "Select account", - items: [{ label: "Add new account" }, { label: "Check quotas", hint: "used today" }], - selectedIndex: 0, - useColor: false, - }); +function captureOutput(output: PassThrough): { chunks: string[] } { + const chunks: string[] = []; + output.on("data", (chunk) => chunks.push(chunk.toString())); + return { chunks }; +} - const output = lines.join("\n"); - expect(output).toContain("+ Manage accounts"); - expect(output).toContain("| Select account"); - expect(output).toContain("| > Add new account"); - expect(output).toContain("| Check quotas used today"); - expect(output).toContain("^/v to select"); - }); +describe("tty select", () => { + it("renders box drawing without clearing the screen", async () => { + const { input, output } = makeTty(); + const capture = captureOutput(output); + const resultPromise = select( + [ + { label: "Add new account", value: "add" }, + { label: "Check quotas", value: "check" }, + ], + { + message: "Manage accounts", + subtitle: "Select account", + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, + }, + ); - it("parses arrow and vim keys", () => { - expect(parseSelectKey("\u001b[A")).toBe("up"); - expect(parseSelectKey("\u001b[B")).toBe("down"); - expect(parseSelectKey("k")).toBe("up"); - expect(parseSelectKey("j")).toBe("down"); - expect(parseSelectKey("\r")).toBe("enter"); - expect(parseSelectKey("\u001b")).toBe("cancel"); - }); + input.write("\r"); + await resultPromise; + const text = capture.chunks.join(""); - it("wraps selection index", () => { - expect(moveSelectIndex(0, -1, 3)).toBe(2); - expect(moveSelectIndex(2, 1, 3)).toBe(0); - // No movement when list is empty. - expect(moveSelectIndex(0, 1, 0)).toBe(0); + expect(text).toContain("┌"); + expect(text).toContain("└"); + expect(text).toContain("│"); + expect(text).not.toContain(ANSI.clearScreen); }); - it("adds ANSI colors when enabled", () => { - const lines = renderSelectFrame({ - title: "Manage accounts", - items: [{ label: "Add new account" }], - selectedIndex: 0, - useColor: true, - }); - expect(lines.join("\n")).toContain("\u001b["); - }); + it("honors NO_COLOR for label styling", async () => { + const originalNoColor = process.env.NO_COLOR; + process.env.NO_COLOR = "1"; + try { + const { input, output } = makeTty(); + const capture = captureOutput(output); + const resultPromise = select( + [ + { label: "Add new account", value: "add" }, + { label: "Check quotas", value: "check" }, + ], + { + message: "Manage accounts", + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, + }, + ); + input.write("\r"); + await resultPromise; + const text = capture.chunks.join(""); - it("uses ASCII hint line", () => { - const lines = renderSelectFrame({ - title: "Manage accounts", - items: [{ label: "Add new account" }], - selectedIndex: 0, - useColor: false, - }); - expect(lines.join("\n")).toContain("^/v to select, Enter: confirm"); + expect(text).not.toContain(ANSI.green); + expect(text).not.toContain(ANSI.yellow); + expect(text).not.toContain(ANSI.red); + } finally { + if (originalNoColor === undefined) { + delete process.env.NO_COLOR; + } else { + process.env.NO_COLOR = originalNoColor; + } + } }); - it("selects item using key input", async () => { - const input = new PassThrough(); - const output = new PassThrough(); - (input as unknown as { isTTY: boolean }).isTTY = true; - (output as unknown as { isTTY: boolean }).isTTY = true; - const setRawMode = vi.fn(); - (input as unknown as { setRawMode: (val: boolean) => void }).setRawMode = setRawMode; - - const resultPromise = runSelect({ - title: "Select", - items: [ - { label: "One", value: "one" }, - { label: "Two", value: "two" }, + it("truncates long labels for narrow terminals", async () => { + const { input, output } = makeTty(28); + const capture = captureOutput(output); + const resultPromise = select( + [ + { label: "ExtremelyLongAccountLabel", value: "one", hint: "used today" }, + { label: "Short", value: "two" }, ], - input, - output, - useColor: false, - }); + { + message: "Manage accounts", + input: input as unknown as NodeJS.ReadStream, + output: output as unknown as NodeJS.WriteStream, + }, + ); - input.write("\u001b[B"); input.write("\r"); - - const result = await resultPromise; - expect(result?.value).toBe("two"); - expect(setRawMode).toHaveBeenCalledWith(true); - expect(setRawMode).toHaveBeenCalledWith(false); + await resultPromise; + const text = capture.chunks.join(""); + expect(text).toContain("ExtremelyLong"); + expect(text).toContain("…"); }); });