diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 0000000..5e95d81 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,9 @@ +#!/bin/sh + +set -eu + +repo_root="$(git rev-parse --show-toplevel)" +cd "$repo_root" + +echo "Ensuring local verify passed before commit..." +node scripts/enforce-local-verify.mjs pre-commit diff --git a/.githooks/pre-push b/.githooks/pre-push new file mode 100755 index 0000000..9ea30fe --- /dev/null +++ b/.githooks/pre-push @@ -0,0 +1,9 @@ +#!/bin/sh + +set -eu + +repo_root="$(git rev-parse --show-toplevel)" +cd "$repo_root" + +echo "Ensuring local verify passed before push..." +node scripts/enforce-local-verify.mjs pre-push diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 873311e..5254982 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -157,6 +157,7 @@ jobs: security-audit: name: Security Audit + if: github.event_name == 'push' runs-on: ubuntu-latest timeout-minutes: 15 diff --git a/AGENTS.md b/AGENTS.md index 5a0bd4e..51b5c26 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -29,14 +29,16 @@ Current planning docs: ## Current storage and config Canonical files: -- Plugin config: `~/.config/opencode/codex-config.json` +- Plugin config: `~/.config/opencode/codex-config.jsonc` - Plugin accounts: `~/.config/opencode/codex-accounts.json` - OpenCode provider auth marker: `${XDG_DATA_HOME:-~/.local/share}/opencode/auth.json` - Optional request snapshots/logs: `/logs/codex-plugin/` Important: - `opencode.json` should only contain plugin installation/enablement. -- Runtime flags and behavior go in `codex-config.json`. +- Runtime flags and behavior go in `codex-config.jsonc`. +- Legacy `codex-config.json` is compatibility-only; prefer `.jsonc` in code, docs, examples, and tests. +- Keep internal catalog/runtime defaults (for example `codexRuntimeDefaults.reasoningSummaryFormat`) out of public config unless schema, loader, examples, and docs are intentionally updated together. ## Modes @@ -73,6 +75,9 @@ npm run verify `npm run verify` is the default pre-release check. +- Treat `npm run verify` as required before both commits and PR/push updates. Local hooks should enforce it, and manual verification is still required if hooks are bypassed. +- After changing tests, test helpers, or TypeScript-only fixture shapes, run `npm run typecheck:test` before pushing. `npm test` and `npm run typecheck` do not cover the test TypeScript project on their own. + ## Module sizing - There is no hard max-lines or max-file-size rule in this repo. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e375b8f..49688a1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,14 +6,31 @@ Thanks for contributing to `opencode-codex-auth`. ```bash npm ci +npm run hooks:install npm run verify ``` -`npm run verify` is the baseline gate and runs: +Local hooks enforce `npm run verify` before both commits and pushes once you run `npm run hooks:install`. +The commit hook accepts staged-only commit-ready changes, while the push hook requires a clean tree so it validates the exact commits being pushed. +`npm run verify:local` is the recommended manual gate. It runs `npm run verify`, but skips reruns when the current tree already passed locally. + +Pull request GitHub CI keeps only hosted-value checks: clean-room verify, Linux tarball smoke, Windows smoke, dependency review, and secret scanning. `npm audit` still runs in GitHub, but only on default-branch pushes rather than every PR. + +`npm run verify` is the baseline full gate and runs: + +- `npm run check:esm-imports` +- `npm run lint` +- `npm run format:check` - `npm run typecheck` -- `npm test` +- `npm run typecheck:test` +- `npm run test:anti-mock` +- `npm run test:coverage` +- `npm run check:coverage-ratchet` +- `npm run check:docs` - `npm run build` +- `npm run check:dist-esm-imports` +- `npm run smoke:cli:dist` ## Pull requests diff --git a/README.md b/README.md index 2dcb06b..2bf7fd3 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ opencode run "say hi" --model=openai/gpt-5 ## Configuration -Keep plugin install/enablement in `opencode.json`, and runtime behavior in `codex-config.json`. +Keep plugin install/enablement in `opencode.json`, and runtime behavior in `codex-config.jsonc`. The plugin still accepts commented legacy `codex-config.json` files for compatibility. - Config reference: [docs/configuration.md](docs/configuration.md) - Multi-account behavior: [docs/multi-account.md](docs/multi-account.md) @@ -56,18 +56,23 @@ Keep plugin install/enablement in `opencode.json`, and runtime behavior in `code ```bash npm install +npm run hooks:install npm run verify ``` Helpful local commands: ```bash +npm run verify:local +npm run prepush npm run lint npm run test:coverage npm run check:docs ``` -`npm run verify` is the primary quality gate and includes lint, formatting, type-checking, anti-mock, coverage/ratchet, docs drift checks, build validation, and CLI smoke checks. +Local git hooks now enforce `npm run verify` before both `git commit` and `git push`. The commit hook accepts staged-only commit-ready changes, and the push hook requires a clean tree so it verifies the exact commits being pushed. `npm run verify:local` runs the same enforcement manually, with a cache so unchanged trees do not rerun the full suite twice in a row. + +Pull request CI stays intentionally lean: GitHub still runs clean-room verify, tarball smoke, Windows smoke, dependency review, and secret scanning. Dependency vulnerability auditing via `npm audit` now runs on default-branch pushes instead of every PR. ## Usage Note diff --git a/docs/configuration.md b/docs/configuration.md index c83127f..771fb5d 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -3,8 +3,8 @@ This plugin uses one runtime config file: - resolved config path: - - `$XDG_CONFIG_HOME/opencode/codex-config.json` when `XDG_CONFIG_HOME` is set - - otherwise `~/.config/opencode/codex-config.json` + - `$XDG_CONFIG_HOME/opencode/codex-config.jsonc` when `XDG_CONFIG_HOME` is set + - otherwise `~/.config/opencode/codex-config.jsonc` If the default config path does not exist, installer/bootstrap flows create it with defaults. @@ -24,7 +24,7 @@ Known exceptions: Use these schemas for validation/autocomplete: -- `schemas/codex-config.schema.json` -> `codex-config.json` +- `schemas/codex-config.schema.json` -> `codex-config.jsonc` - `schemas/opencode.schema.json` -> `opencode.json` - `schemas/codex-accounts.schema.json` -> `codex-accounts.json` (advanced/manual recovery only) @@ -34,16 +34,17 @@ The plugin loads config in this order: 1. `OPENCODE_OPENAI_MULTI_CONFIG_PATH` 2. Resolved default config path: - - `$XDG_CONFIG_HOME/opencode/codex-config.json` when `XDG_CONFIG_HOME` is set - - otherwise `~/.config/opencode/codex-config.json` + - `$XDG_CONFIG_HOME/opencode/codex-config.jsonc` when `XDG_CONFIG_HOME` is set + - otherwise `~/.config/opencode/codex-config.jsonc` + - compatibility fallback: `codex-config.json` if the canonical `.jsonc` file is absent -`codex-config.json` supports JSON comments (`//` and `/* ... */`) for readability. +`codex-config.jsonc` supports JSON comments (`//` and `/* ... */`) for readability. The loader also accepts commented legacy `codex-config.json` files. Known-field type validation is applied on load. If a known field has an invalid type/value, the plugin ignores that config file and logs an actionable warning. ## Default generated config -```json +```jsonc { "$schema": "https://schemas.iam-brain.dev/opencode-codex-auth/codex-config.schema.json", "debug": false, @@ -65,9 +66,11 @@ Known-field type validation is applied on load. If a known field has an invalid }, "global": { "personality": "pragmatic", - "verbosityEnabled": true, - "verbosity": "default" + "reasoningEffort": "high", + "reasoningSummary": "auto", + "textVerbosity": "default" }, + "customModels": {}, "perModel": {} } ``` @@ -144,45 +147,68 @@ Mode-derived runtime defaults when omitted: - `global.personality: string` - Personality key applied to all models unless overridden. -- `global.thinkingSummaries: boolean` - - Global thinking-summary preference. Omit to use model/catalog default. -- `global.verbosityEnabled: boolean` - - Enables/disables `textVerbosity` injection globally (`true` default). -- `global.verbosity: "default" | "low" | "medium" | "high"` - - Verbosity preference (`"default"` uses each model catalog default). -- `global.serviceTier: "default" | "priority" | "flex"` - - Global service tier preference. +- `global.reasoningEffort: string` + - Global reasoning effort override forwarded upstream when the request does not already set one. +- `global.reasoningSummary: "auto" | "concise" | "detailed" | "none"` + - Global reasoning summary format override forwarded upstream as `reasoning.summary`. + - `"none"` disables reasoning summaries. + - Deprecated boolean aliases still load: + - `reasoningSummaries: true` => `"auto"` + - `reasoningSummaries: false` => `"none"` + - `thinkingSummaries` behaves the same way and warns on load. +- `global.textVerbosity: "default" | "low" | "medium" | "high" | "none"` + - Global text verbosity override forwarded upstream as `text.verbosity`. + - `"default"` uses each model catalog default. + - `"none"` disables text verbosity. + - Deprecated aliases still load: + - `verbosityEnabled: false` => `"none"` + - `verbosity: "medium"` => `textVerbosity: "medium"` +- `global.serviceTier: "auto" | "priority" | "flex"` + - Global Fast Mode preference (`serviceTier`). - `"priority"` maps to request-body `service_tier: "priority"` only for `gpt-5.4*`. - `"flex"` passes through `service_tier: "flex"`. - - `"default"` or omission leaves `service_tier` unset unless the request body already sets it. + - `"auto"` or omission leaves `service_tier` unset unless the request body already sets it. + - Deprecated alias: `"default"` => `"auto"`. +- `global.include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` + - Global response include values merged into host-provided `include`. +- `global.parallelToolCalls: boolean` + - Global override for `parallel_tool_calls` when the request does not already set one. +- `customModels..targetModel: string` + - Required target model slug inherited by the selectable custom model alias. +- `customModels..name: string` + - Optional display name for the custom selectable model. +- `customModels..personality`, `customModels..reasoningEffort`, `customModels..reasoningSummary`, `customModels..textVerbosity`, `customModels..serviceTier`, `customModels..include`, `customModels..parallelToolCalls` + - Defaults applied when that custom slug is selected. +- `customModels..variants..personality` + - Variant-level override for the selected custom slug. +- `customModels..variants..reasoningEffort`, `customModels..variants..reasoningSummary`, `customModels..variants..textVerbosity`, `customModels..variants..serviceTier`, `customModels..variants..include`, `customModels..variants..parallelToolCalls` + - Variant-level overrides for the selected custom slug. - `perModel..personality: string` - Model-specific personality override. -- `perModel..thinkingSummaries: boolean` - - Model-specific summary override (`true` force-on, `false` force-off). -- `perModel..verbosityEnabled: boolean` - - Model-specific enable/disable for `textVerbosity`. -- `perModel..verbosity: "default" | "low" | "medium" | "high"` - - Model-specific verbosity setting. -- `perModel..serviceTier: "default" | "priority" | "flex"` - - Model-specific service tier override. +- `perModel..reasoningEffort`, `perModel..reasoningSummary`, `perModel..textVerbosity`, `perModel..serviceTier`, `perModel..include`, `perModel..parallelToolCalls` + - Model-specific overrides with the same semantics as `global.*`. - `perModel..variants..personality: string` - Variant-level personality override. -- `perModel..variants..thinkingSummaries: boolean` - - Variant-level summary override (`true` force-on, `false` force-off). -- `perModel..variants..verbosityEnabled: boolean` - - Variant-level enable/disable for `textVerbosity`. -- `perModel..variants..verbosity: "default" | "low" | "medium" | "high"` - - Variant-level verbosity setting. -- `perModel..variants..serviceTier: "default" | "priority" | "flex"` - - Variant-level service tier override. +- `perModel..variants..reasoningEffort`, `perModel..variants..reasoningSummary`, `perModel..variants..textVerbosity`, `perModel..variants..serviceTier`, `perModel..variants..include`, `perModel..variants..parallelToolCalls` + - Variant-level overrides with the same semantics as `global.*`. If a model reports `supportsVerbosity=false` in catalog/runtime defaults, verbosity overrides are ignored. -Precedence for `personality`, `thinkingSummaries`, verbosity, and `serviceTier` settings: +Precedence for `personality`, `reasoningEffort`, `reasoningSummary`, `textVerbosity`, `serviceTier`, `include`, and `parallelToolCalls` settings: 1. `perModel..variants.` 2. `perModel.` -3. `global` +3. `customModels..variants.` +4. `customModels.` +5. `global` + +Custom model notes: + +- `customModels` creates selectable aliases like `openai/my-fast-codex`. +- The selected custom slug inherits instructions, runtime defaults, capabilities, limits, and supported variants from `targetModel`. +- The backend request still uses `targetModel` as the API model id. +- If `targetModel` is not present in the active catalog/provider, the plugin warns and skips that custom model instead of inventing metadata. +- `reasoningSummaryFormat` remains internal-only. Users control request summaries with `reasoningSummary`; internal catalog defaults may still populate `reasoning.summary` when no explicit config override is set. ### GPT-5.4 fast mode and long context @@ -238,7 +264,7 @@ Flow: 2. The assistant interviews you (inspiration, tone, coding style, guardrails, examples). 3. The assistant calls `create-personality`. 4. A new profile is written under `personalities/.md`. -5. Set the key in `codex-config.json` via `global.personality` or `perModel`. +5. Set the key in `codex-config.jsonc` via `global.personality` or `perModel`. Advanced path: @@ -247,7 +273,7 @@ Advanced path: ## Why `runtime.mode` exists (and no `identityMode`) -- `runtime.mode` is the canonical persisted mode setting in `codex-config.json`. +- `runtime.mode` is the canonical persisted mode setting in `codex-config.jsonc`. - Identity behavior is derived from mode: - `native` -> native identity - `codex` -> codex identity @@ -258,6 +284,8 @@ Advanced path: ### Config/mode overrides - `OPENCODE_OPENAI_MULTI_CONFIG_PATH`: explicit config file path (absolute path recommended). +- `OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES`: global reasoning-summary env override. +- `OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES`: deprecated alias for `OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES`. - `OPENCODE_OPENAI_MULTI_MODE`: `native|codex`. - `OPENCODE_OPENAI_MULTI_SPOOF_MODE`: advanced temporary identity override (`native|codex`). - If `OPENCODE_OPENAI_MULTI_MODE` is set, runtime mode takes precedence. @@ -305,7 +333,7 @@ Advanced path: ## Legacy keys -Legacy behavior keys are no longer parsed from `codex-config.json`. +Legacy behavior keys are no longer parsed from `codex-config.jsonc`. - `personality` - `customSettings` and all nested `customSettings.*` diff --git a/docs/development/ARCHITECTURE.md b/docs/development/ARCHITECTURE.md index 9c18577..7c87e6d 100644 --- a/docs/development/ARCHITECTURE.md +++ b/docs/development/ARCHITECTURE.md @@ -5,7 +5,7 @@ This plugin bridges OpenCode's OpenAI provider hooks to ChatGPT Codex backend en ## Runtime overview 1. OpenCode initializes plugin hooks (`index.ts`). -2. Config is resolved from `codex-config.json` + env overrides through `lib/config.ts` (stable barrel over `lib/config/types.ts`, `lib/config/file.ts`, and `lib/config/resolve.ts`). +2. Config is resolved from `codex-config.jsonc` + env overrides through `lib/config.ts` (stable barrel over `lib/config/types.ts`, `lib/config/file.ts`, and `lib/config/resolve.ts`). Commented legacy `codex-config.json` is still accepted as a compatibility fallback. 3. Auth loader selects a healthy account through `lib/storage.ts` + `lib/rotation.ts`, with storage normalization/migration helpers consolidated in `lib/storage/auth-state.ts`. 4. `CodexAuthPlugin` wires focused auth/request helpers under `lib/codex-native/` and routes Codex backend requests. 5. Failures (`429`, refresh/auth) trigger cooldown/disable semantics and retry orchestration (`lib/fetch-orchestrator.ts`). diff --git a/docs/development/CONFIG_FIELDS.md b/docs/development/CONFIG_FIELDS.md index 94cc523..08b1daa 100644 --- a/docs/development/CONFIG_FIELDS.md +++ b/docs/development/CONFIG_FIELDS.md @@ -5,8 +5,9 @@ Canonical source: `lib/config.ts` ## File location - `OPENCODE_OPENAI_MULTI_CONFIG_PATH` -- fallback: `$XDG_CONFIG_HOME/opencode/codex-config.json` -- fallback (no `XDG_CONFIG_HOME`): `~/.config/opencode/codex-config.json` +- fallback: `$XDG_CONFIG_HOME/opencode/codex-config.jsonc` +- fallback (no `XDG_CONFIG_HOME`): `~/.config/opencode/codex-config.jsonc` +- compatibility fallback: `codex-config.json` - parser accepts JSON with comments (`//`, `/* ... */`) ## Canonical JSON keys @@ -32,25 +33,52 @@ Top-level: - `runtime.collaborationProfile: boolean` - `runtime.orchestratorSubagents: boolean` - `global.personality: string` -- `global.thinkingSummaries: boolean` -- `global.verbosityEnabled: boolean` -- `global.verbosity: "default" | "low" | "medium" | "high"` -- `global.serviceTier: "default" | "priority" | "flex"` +- `global.reasoningEffort: string` +- `global.reasoningSummary: "auto" | "concise" | "detailed" | "none"` +- `global.textVerbosity: "default" | "low" | "medium" | "high" | "none"` +- `global.serviceTier: "auto" | "priority" | "flex"` +- `global.include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` +- `global.parallelToolCalls: boolean` +- `customModels..targetModel: string` +- `customModels..name: string` +- `customModels..personality: string` +- `customModels..reasoningEffort: string` +- `customModels..reasoningSummary: "auto" | "concise" | "detailed" | "none"` +- `customModels..textVerbosity: "default" | "low" | "medium" | "high" | "none"` +- `customModels..serviceTier: "auto" | "priority" | "flex"` +- `customModels..include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` +- `customModels..parallelToolCalls: boolean` +- `customModels..variants..personality: string` +- `customModels..variants..reasoningEffort: string` +- `customModels..variants..reasoningSummary: "auto" | "concise" | "detailed" | "none"` +- `customModels..variants..textVerbosity: "default" | "low" | "medium" | "high" | "none"` +- `customModels..variants..serviceTier: "auto" | "priority" | "flex"` +- `customModels..variants..include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` +- `customModels..variants..parallelToolCalls: boolean` +- deprecated aliases still accepted: + - `global.reasoningSummaries: boolean` + - `global.thinkingSummaries: boolean` + - `global.verbosityEnabled: boolean` + - `global.verbosity: "default" | "low" | "medium" | "high"` - `perModel..personality: string` -- `perModel..thinkingSummaries: boolean` -- `perModel..verbosityEnabled: boolean` -- `perModel..verbosity: "default" | "low" | "medium" | "high"` -- `perModel..serviceTier: "default" | "priority" | "flex"` +- `perModel..reasoningEffort: string` +- `perModel..reasoningSummary: "auto" | "concise" | "detailed" | "none"` +- `perModel..textVerbosity: "default" | "low" | "medium" | "high" | "none"` +- `perModel..serviceTier: "auto" | "priority" | "flex"` +- `perModel..include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` +- `perModel..parallelToolCalls: boolean` - `perModel..variants..personality: string` -- `perModel..variants..thinkingSummaries: boolean` -- `perModel..variants..verbosityEnabled: boolean` -- `perModel..variants..verbosity: "default" | "low" | "medium" | "high"` -- `perModel..variants..serviceTier: "default" | "priority" | "flex"` +- `perModel..variants..reasoningEffort: string` +- `perModel..variants..reasoningSummary: "auto" | "concise" | "detailed" | "none"` +- `perModel..variants..textVerbosity: "default" | "low" | "medium" | "high" | "none"` +- `perModel..variants..serviceTier: "auto" | "priority" | "flex"` +- `perModel..variants..include: ("reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs")[]` +- `perModel..variants..parallelToolCalls: boolean` Canonical user-edited file set: - `/opencode.json` (plugin registration) -- `/codex-config.json` (runtime behavior) +- `/codex-config.jsonc` (runtime behavior; `codex-config.json` remains a compatibility fallback) - `/codex-accounts.json` (advanced/manual recovery only) - `` resolves to `$XDG_CONFIG_HOME/opencode` when `XDG_CONFIG_HOME` is set, otherwise `~/.config/opencode` - `.opencode/personalities/*.md` or `/personalities/*.md` (custom personalities) @@ -74,8 +102,10 @@ Default generated values: - `runtime.collaborationProfile`: mode-derived when unset (`true` in `codex`, `false` in `native`) - `runtime.orchestratorSubagents`: inherits `runtime.collaborationProfile` effective value when unset - `global.personality: "pragmatic"` -- `global.verbosityEnabled: true` -- `global.verbosity: "default"` +- `global.reasoningEffort: "high"` +- `global.reasoningSummary: "auto"` +- `global.textVerbosity: "default"` +- `customModels: {}` - `perModel: {}` ## Legacy compatibility keys @@ -113,7 +143,8 @@ Resolved by `resolveConfig`: - `OPENCODE_OPENAI_MULTI_ROTATION_STRATEGY` - `OPENCODE_OPENAI_MULTI_PROMPT_CACHE_KEY_STRATEGY` - `OPENCODE_OPENAI_MULTI_PERSONALITY` -- `OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES` +- `OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES` +- `OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES` (deprecated alias) - `OPENCODE_OPENAI_MULTI_VERBOSITY_ENABLED` - `OPENCODE_OPENAI_MULTI_VERBOSITY` - `OPENCODE_OPENAI_MULTI_SERVICE_TIER` diff --git a/docs/development/CONFIG_FLOW.md b/docs/development/CONFIG_FLOW.md index fff271e..0fe8679 100644 --- a/docs/development/CONFIG_FLOW.md +++ b/docs/development/CONFIG_FLOW.md @@ -7,8 +7,9 @@ Config resolution has three stages. `ensureDefaultConfigFile({ env: process.env })` - creates default config path when missing: - - `$XDG_CONFIG_HOME/opencode/codex-config.json` when `XDG_CONFIG_HOME` is set - - otherwise `~/.config/opencode/codex-config.json` + - `$XDG_CONFIG_HOME/opencode/codex-config.jsonc` when `XDG_CONFIG_HOME` is set + - otherwise `~/.config/opencode/codex-config.jsonc` + - if only legacy `codex-config.json` exists, keeps using that file - seeds canonical defaults for runtime + behavior sections ## Stage 1: file load @@ -16,7 +17,8 @@ Config resolution has three stages. `loadConfigFile({ env: process.env })` - reads from `OPENCODE_OPENAI_MULTI_CONFIG_PATH` if present -- otherwise reads default config path (`$XDG_CONFIG_HOME/opencode/codex-config.json` or `~/.config/opencode/codex-config.json`) +- otherwise reads default config path (`$XDG_CONFIG_HOME/opencode/codex-config.jsonc` or `~/.config/opencode/codex-config.jsonc`) +- accepts commented legacy `codex-config.json` as a compatibility fallback - parses canonical fields into `PluginConfig` partial - if known fields are invalid, or the file is unreadable/malformed, ignores the config file and warns (env/defaults still apply) diff --git a/docs/development/TESTING.md b/docs/development/TESTING.md index fe5b372..556cdfc 100644 --- a/docs/development/TESTING.md +++ b/docs/development/TESTING.md @@ -6,16 +6,21 @@ This repo uses Vitest + TypeScript type checks. ```bash npm run typecheck +npm run typecheck:test npm test npm run build npm run lint +npm run verify:local +npm run prepush npm run test:anti-mock npm run check:coverage-ratchet npm run check:docs npm run verify ``` -`npm run verify` is the pre-release gate. +`npm run verify` is the required local gate before commits, pushes, and PR updates. `npm run verify:local` runs that gate with caching, and the installed git hooks enforce it automatically before `git commit` and `git push`. The commit hook accepts staged-only commit-ready changes; the push hook requires a clean tree so it validates the exact commits being pushed. GitHub Actions still adds extra platform and security jobs beyond the repo-local verify run. + +PR GitHub CI is intentionally slimmer than local `verify`: it keeps the clean-room Ubuntu verify job, Linux tarball smoke, Windows smoke, dependency review, and secret scanning. The separate `npm audit` dependency audit remains GitHub-hosted, but it now runs on default-branch pushes instead of every PR. It now includes strict Biome linting + format checks (including typed promise-safety rules), anti-mock policy checks, coverage ratcheting, docs drift checks, Node ESM regression checks (source + dist import specifiers), and a built CLI smoke run. @@ -23,6 +28,16 @@ It now includes strict Biome linting + format checks (including typed promise-sa - `npm run lint` - Runs Biome lint on source + tests with focused-test bans and typed promise-safety rules. +- `npm run typecheck:test` + - Type-checks the test TypeScript project with `tsconfig.test.json`. + - This catches fixture-shape and helper-signature regressions that `npm test` and `npm run typecheck` can miss. +- `npm run verify:local` + - Runs `npm run verify` and records a local success stamp for the current tree. + - Re-running it on an unchanged tree skips the full suite, which keeps pre-commit and pre-push hooks from doing duplicate work. + - It accepts either a clean tree or staged-only commit-ready changes. Extra unstaged or untracked WIP must be cleaned up before hook enforcement will pass. +- `npm run prepush` + - Alias for `npm run verify:local`. + - Kept for compatibility with the earlier local push workflow. - `npm run test:anti-mock` - Enforces boundary-only mock policy. - No new `vi.doMock`/`vi.mock`/direct `vi.stubGlobal` usage beyond the tracked baseline in `scripts/test-mocking-allowlist.json`. diff --git a/docs/examples/README.md b/docs/examples/README.md index e7c9b23..c35cec4 100644 --- a/docs/examples/README.md +++ b/docs/examples/README.md @@ -6,18 +6,18 @@ This directory contains starter config files. - `opencode.json` - minimal plugin registration example (published install) -- `codex-config.json` +- `codex-config.jsonc` - runtime and model behavior example ## Usage 1. Keep `opencode.json` minimal (plugin enablement only). -2. Put runtime behavior in `~/.config/opencode/codex-config.json`. +2. Put runtime behavior in `~/.config/opencode/codex-config.jsonc`. 3. Customize: - `runtime.mode` (`native`, `codex`) - `runtime.developerMessagesToUser` (codex-mode role remap toggle) - `runtime.promptCacheKeyStrategy` (`default`, `project`) - - `global` personality/summaries + - `global` personality/reasoning summaries - `perModel` and `variants` 4. Validate with schemas: - `schemas/codex-config.schema.json` diff --git a/docs/examples/codex-config.json b/docs/examples/codex-config.json deleted file mode 100644 index dd5181b..0000000 --- a/docs/examples/codex-config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "$schema": "https://schemas.iam-brain.dev/opencode-codex-auth/codex-config.schema.json", - "debug": false, - "quiet": false, - "refreshAhead": { - "enabled": true, - "bufferMs": 60000 - }, - "runtime": { - "mode": "native", - "rotationStrategy": "sticky", - "sanitizeInputs": false, - "developerMessagesToUser": true, - "promptCacheKeyStrategy": "default", - "headerSnapshots": false, - "headerSnapshotBodies": false, - "headerTransformDebug": false, - "pidOffset": false - }, - "global": { - "personality": "pragmatic", - "verbosityEnabled": true, - "verbosity": "default" - }, - "perModel": {} -} diff --git a/docs/examples/codex-config.jsonc b/docs/examples/codex-config.jsonc new file mode 100644 index 0000000..1f4255e --- /dev/null +++ b/docs/examples/codex-config.jsonc @@ -0,0 +1,124 @@ +{ + "$schema": "https://schemas.iam-brain.dev/opencode-codex-auth/codex-config.schema.json", + + // Enable verbose plugin debug logs. + "debug": false, + + // Suppress plugin toasts and notifications. + "quiet": false, + + // Proactively refresh access tokens before expiry. + "refreshAhead": { + // Refresh before token expiry. + "enabled": true, + + // Milliseconds before expiry to refresh. + "bufferMs": 60000 + }, + + "runtime": { + // Request identity/profile mode. + "mode": "native", + + // Account rotation strategy. + "rotationStrategy": "sticky", + + // Input compatibility sanitizer for edge payloads. + "sanitizeInputs": false, + + // Remap non-permissions developer messages to user role in codex mode. + "developerMessagesToUser": true, + + // Prompt cache key policy. + "promptCacheKeyStrategy": "default", + + // Optional codex-rs compaction/profile override. + // "codexCompactionOverride": true, + + // Optional collaboration controls. + // "collaborationProfile": true, + // "orchestratorSubagents": true, + + // Debug request snapshots. + "headerSnapshots": false, + "headerSnapshotBodies": false, + "headerTransformDebug": false, + + // Session-aware offset for account selection. + "pidOffset": false + }, + + "global": { + // Built-ins: "pragmatic", "friendly" + // Custom: personalities/.md + "personality": "pragmatic", + + // Reasoning effort override forwarded upstream. + "reasoningEffort": "high", + + // Reasoning summary format forwarded upstream as reasoning.summary. + // options: "auto" | "concise" | "detailed" | "none" + // "none" disables summaries entirely. + "reasoningSummary": "auto", + + // Optional Fast Mode (`serviceTier`) override: + // "auto" leaves the request unchanged. + // "serviceTier": "priority", + + // Text verbosity sent upstream as text.verbosity. + // options: "default" | "low" | "medium" | "high" | "none" + // "default" uses the selected model's catalog default. + // "none" disables text verbosity entirely. + "textVerbosity": "default", + + // Optional extra response include values. + // allowed: "reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs" + "include": ["file_search_call.results"], + + // Whether to allow multiple tool calls in parallel. + "parallelToolCalls": true + }, + + // Optional custom selectable model aliases. + // The config key becomes the model slug users select. + // targetModel remains the backend-facing API model id. + "customModels": { + "openai/my-fast-codex": { + "targetModel": "gpt-5.3-codex", + "name": "My Fast Codex", + "reasoningEffort": "low", + "reasoningSummary": "concise", + "textVerbosity": "medium", + "serviceTier": "auto", + "include": ["file_search_call.results"], + "parallelToolCalls": true, + "variants": { + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed" + } + } + } + }, + + "perModel": { + // Per-model override example. + "gpt-5.3-codex": { + "personality": "friendly", + "reasoningEffort": "medium", + "reasoningSummary": "concise", + "textVerbosity": "medium", + "serviceTier": "flex", + "include": ["file_search_call.results"], + "parallelToolCalls": false, + "variants": { + // Variant-specific override example. + "high": { + "personality": "pragmatic", + "reasoningSummary": "detailed", + "textVerbosity": "high" + } + } + } + } +} diff --git a/docs/getting-started.md b/docs/getting-started.md index 26148b4..89a5a37 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -18,7 +18,7 @@ npx -y @iam-brain/opencode-codex-auth@latest What this does: - Adds `@iam-brain/opencode-codex-auth@latest` to resolved `/opencode.json` (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`) -- Creates `codex-config.json` at resolved config root (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`) if missing +- Creates `codex-config.jsonc` at resolved config root (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`) if missing - Synchronizes `/create-personality` command at `/commands/create-personality.md` - Synchronizes `personality-builder` skill at `/skills/personality-builder/SKILL.md` @@ -33,7 +33,7 @@ Installer flags: - `--config `: use a custom `opencode.json` path. - `--plugin `: override plugin specifier written into `opencode.json`. -`codex-config.json` is still created at the default resolved config location. To load config from a custom path at runtime, set `OPENCODE_OPENAI_MULTI_CONFIG_PATH`. +`codex-config.jsonc` is created at the default resolved config location when no config exists. To load config from a custom path at runtime, set `OPENCODE_OPENAI_MULTI_CONFIG_PATH`. ## 2) Keep OpenCode config minimal @@ -49,9 +49,10 @@ Example: Put all plugin behavior flags in: -- resolved `/codex-config.json` (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`) +- resolved `/codex-config.jsonc` (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`) +- compatibility fallback: `/codex-config.json` when `codex-config.jsonc` is absent -Use `docs/examples/codex-config.json` as a baseline. +Use `docs/examples/codex-config.jsonc` as a baseline. Use schemas for autocomplete/validation: - `schemas/codex-config.schema.json` @@ -98,7 +99,7 @@ The plugin now tracks the live Codex catalog, so exact GPT-5-family availability ## 5a) Optional: enable GPT-5.4 fast mode -Add a `serviceTier` override in `codex-config.json`: +Add a `serviceTier` override in `codex-config.jsonc`: ```json { @@ -140,7 +141,7 @@ This guided flow writes a profile into: ## Mode + agent behavior -Runtime mode is configured in `codex-config.json`. +Runtime mode is configured in `codex-config.jsonc`. - `native`: default - `codex` diff --git a/docs/index.md b/docs/index.md index 95612bd..21b1de6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -38,7 +38,7 @@ Use this page as the fast entrypoint for humans and agents. - `docs/examples/README.md` - `docs/examples/opencode.json` -- `docs/examples/codex-config.json` +- `docs/examples/codex-config.jsonc` ## Planning and Research diff --git a/docs/multi-account.md b/docs/multi-account.md index 8aba0c1..4502a66 100644 --- a/docs/multi-account.md +++ b/docs/multi-account.md @@ -118,7 +118,7 @@ Primary actions: - Add new account - Check quotas - Manage accounts (enable/disable) -- Configure models in `codex-config.json` +- Configure models in `codex-config.jsonc` - Delete all accounts Per-account actions: diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 380feaa..e592779 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -3,7 +3,7 @@ ## Quick checks 1. Confirm plugin is installed in resolved `/opencode.json` (`$XDG_CONFIG_HOME/opencode` when set, otherwise `~/.config/opencode`). -2. Confirm config exists at the resolved path (`OPENCODE_OPENAI_MULTI_CONFIG_PATH` when set, otherwise exactly one default path: `$XDG_CONFIG_HOME/opencode/codex-config.json` when `XDG_CONFIG_HOME` is set, else `~/.config/opencode/codex-config.json`). +2. Confirm config exists at the resolved path (`OPENCODE_OPENAI_MULTI_CONFIG_PATH` when set, otherwise the canonical default path `$XDG_CONFIG_HOME/opencode/codex-config.jsonc` when `XDG_CONFIG_HOME` is set, else `~/.config/opencode/codex-config.jsonc`; commented legacy `codex-config.json` is still accepted as a fallback). 3. Confirm auth files exist: - required runtime store: resolved `/codex-accounts.json` - optional legacy transfer source: resolved `/openai-codex-accounts.json` diff --git a/index.ts b/index.ts index ad4e980..a8c25ea 100644 --- a/index.ts +++ b/index.ts @@ -14,6 +14,7 @@ import { getCompatInputSanitizerEnabled, getCodexCompactionOverrideEnabled, getBehaviorSettings, + getCustomModels, getCollaborationProfileEnabled, getDebugEnabled, getHeaderSnapshotBodiesEnabled, @@ -157,7 +158,8 @@ export const OpenAIMultiAuthPlugin: Plugin = async (input) => { headerTransformDebug: getHeaderTransformDebugEnabled(cfg), collaborationProfileEnabled, orchestratorSubagentsEnabled: getOrchestratorSubagentsEnabled(cfg), - behaviorSettings: getBehaviorSettings(cfg) + behaviorSettings: getBehaviorSettings(cfg), + customModels: getCustomModels(cfg) }) const z = tool.schema diff --git a/lib/codex-native.ts b/lib/codex-native.ts index 1fd5199..a7013a4 100644 --- a/lib/codex-native.ts +++ b/lib/codex-native.ts @@ -8,6 +8,7 @@ import type { OpenAIAuthMode, RotationStrategy } from "./types.js" import type { BehaviorSettings, CodexSpoofMode, + CustomModelConfig, PersonalityOption, PluginRuntimeMode, PromptCacheKeyStrategy @@ -65,6 +66,7 @@ export { extractAccountId, extractAccountIdFromClaims, refreshAccessToken } from const INTERNAL_COLLABORATION_MODE_HEADER = "x-opencode-collaboration-mode-kind" const INTERNAL_COLLABORATION_AGENT_HEADER = "x-opencode-collaboration-agent-kind" const INTERNAL_CATALOG_SCOPE_HEADER = "x-opencode-catalog-scope-key" +const INTERNAL_SELECTED_MODEL_HEADER = "x-opencode-selected-model-slug" const SESSION_AFFINITY_MISSING_GRACE_MS = 15 * 60 * 1000 const REASONING_VARIANT_KEYS = ["none", "minimal", "low", "medium", "high", "xhigh"] as const @@ -150,6 +152,7 @@ export type CodexAuthPluginOptions = { log?: Logger personality?: PersonalityOption behaviorSettings?: BehaviorSettings + customModels?: Record mode?: PluginRuntimeMode quietMode?: boolean pidOffsetEnabled?: boolean @@ -172,7 +175,7 @@ type ConfigWithProviderVariants = Config & { { models?: Record< string, - { + Record & { variants?: Record> } > @@ -180,6 +183,18 @@ type ConfigWithProviderVariants = Config & { > } +function cloneConfigValue(value: T): T { + if (Array.isArray(value)) { + return value.map((entry) => cloneConfigValue(entry)) as T + } + if (typeof value === "object" && value !== null) { + return Object.fromEntries( + Object.entries(value as Record).map(([key, entry]) => [key, cloneConfigValue(entry)]) + ) as T + } + return value +} + function getSupportedReasoningEfforts(model: CodexModelInfo): string[] { return Array.from( new Set( @@ -230,6 +245,73 @@ function applyCatalogVariantOverridesToConfig(config: Config, catalogModels: Cod } } +function applyCustomModelsToConfig( + config: Config, + customModels: Record | undefined, + warn?: (message: string) => void +): void { + if (!customModels || Object.keys(customModels).length === 0) return + + const nextConfig = config as ConfigWithProviderVariants + const provider = (nextConfig.provider ??= {}) + const openai = (provider.openai ??= {}) + const models = (openai.models ??= {}) + + for (const [slug, customModel] of Object.entries(customModels)) { + const target = customModel.targetModel.trim() + const targetEntry = models[target] + if (!targetEntry) { + warn?.( + `[opencode-codex-auth] customModels.${slug}.targetModel points to ${JSON.stringify(target)}, but that model is not available in the current provider config. Skipping custom model synthesis.` + ) + delete models[slug] + continue + } + + const nextEntry = cloneConfigValue(targetEntry) + nextEntry.id = slug + nextEntry.slug = slug + nextEntry.model = slug + if (customModel.name) { + nextEntry.name = customModel.name + nextEntry.displayName = customModel.name + nextEntry.display_name = customModel.name + } + + const nextApi = + typeof nextEntry.api === "object" && nextEntry.api !== null && !Array.isArray(nextEntry.api) + ? (nextEntry.api as Record) + : {} + nextApi.id = target + nextEntry.api = nextApi + + const baseVariants = + typeof nextEntry.variants === "object" && nextEntry.variants !== null && !Array.isArray(nextEntry.variants) + ? (nextEntry.variants as Record>) + : {} + const overlayVariants = Object.fromEntries( + Object.entries(customModel.variants ?? {}).map(([variantName, variantValue]) => [ + variantName, + cloneConfigValue(variantValue ?? {}) + ]) + ) + nextEntry.variants = { + ...baseVariants, + ...Object.fromEntries( + Object.entries(overlayVariants).map(([variantName, variantValue]) => [ + variantName, + { + ...(baseVariants[variantName] ?? {}), + ...variantValue + } + ]) + ) + } + + models[slug] = nextEntry + } +} + export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginOptions = {}): Promise { opts.log?.debug("codex-native init") const codexCompactionSummaryPrefixSessions = new Set() @@ -291,7 +373,9 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO applyCodexCatalogToProviderModels({ providerModels: providerModelsForCatalogSync, catalogModels: activeCatalogModels, - personality: opts.personality + personality: opts.personality, + customModels: opts.customModels, + warn: (message) => console.warn(message) }) } const setCatalogModels = (scopeKey: string | undefined, models: CodexModelInfo[] | undefined): void => { @@ -309,7 +393,9 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO applyCodexCatalogToProviderModels({ providerModels: providerModelsForCatalogSync, catalogModels: activeCatalogModels, - personality: opts.personality + personality: opts.personality, + customModels: opts.customModels, + warn: (message) => console.warn(message) }) } const getCatalogModels = (scopeKey?: string): CodexModelInfo[] | undefined => { @@ -371,6 +457,7 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO onEvent: (event) => opts.log?.debug("codex model catalog", event) }) applyCatalogVariantOverridesToConfig(config, catalogModels) + applyCustomModelsToConfig(config, opts.customModels, (message) => console.warn(message)) } catch (error) { if (error instanceof Error) { opts.log?.debug("config variant override failed", { error: error.message }) @@ -422,6 +509,7 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO projectPath: typeof input.worktree === "string" && input.worktree.trim() ? input.worktree : process.cwd(), remapDeveloperMessagesToUserEnabled, behaviorSettings: opts.behaviorSettings, + customModels: opts.customModels, personality: opts.personality, log: opts.log, quietMode: opts.quietMode === true, @@ -430,6 +518,7 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO headerTransformDebug: opts.headerTransformDebug === true, compatInputSanitizerEnabled: opts.compatInputSanitizer === true, internalCatalogScopeHeader: INTERNAL_CATALOG_SCOPE_HEADER, + internalSelectedModelHeader: INTERNAL_SELECTED_MODEL_HEADER, internalCollaborationModeHeader: INTERNAL_COLLABORATION_MODE_HEADER, internalCollaborationAgentHeader: INTERNAL_COLLABORATION_AGENT_HEADER, requestSnapshots, @@ -517,6 +606,7 @@ export async function CodexAuthPlugin(input: PluginInput, opts: CodexAuthPluginO spoofMode, requestCatalogScopeKey, internalCatalogScopeHeader: INTERNAL_CATALOG_SCOPE_HEADER, + internalSelectedModelHeader: INTERNAL_SELECTED_MODEL_HEADER, internalCollaborationModeHeader: INTERNAL_COLLABORATION_MODE_HEADER, internalCollaborationAgentHeader: INTERNAL_COLLABORATION_AGENT_HEADER, collaborationProfileEnabled, diff --git a/lib/codex-native/auth-menu-flow.ts b/lib/codex-native/auth-menu-flow.ts index e9d28ad..4cb0b7d 100644 --- a/lib/codex-native/auth-menu-flow.ts +++ b/lib/codex-native/auth-menu-flow.ts @@ -57,7 +57,7 @@ export async function runInteractiveAuthMenu(input: RunInteractiveAuthMenuInput) }, onConfigureModels: async () => { process.stdout.write( - "\nConfigure provider models in opencode.json and runtime flags in codex-config.json.\n\n" + "\nConfigure provider models in opencode.json and runtime flags in codex-config.jsonc.\n\n" ) }, onTransfer: async () => { diff --git a/lib/codex-native/chat-hooks.ts b/lib/codex-native/chat-hooks.ts index d3d5c14..dfa4e3b 100644 --- a/lib/codex-native/chat-hooks.ts +++ b/lib/codex-native/chat-hooks.ts @@ -6,10 +6,18 @@ import { getRuntimeDefaultsForModel, resolveInstructionsForModel } from "../mode import { applyCodexRuntimeDefaultsToParams, findCatalogModelForCandidates, + getCustomModelIncludeOverride, + getCustomModelParallelToolCallsOverride, + getCustomModelReasoningEffortOverride, + getCustomModelReasoningSummaryOverride, + getCustomModelTextVerbosityOverride, getModelLookupCandidates, - getModelThinkingSummariesOverride, - getModelVerbosityEnabledOverride, - getModelVerbosityOverride, + getModelIncludeOverride, + getModelParallelToolCallsOverride, + getModelReasoningEffortOverride, + getModelReasoningSummaryOverride, + getSelectedModelLookupCandidates, + getModelTextVerbosityOverride, getVariantLookupCandidates, resolvePersonalityForModel } from "./request-transform-model.js" @@ -33,10 +41,16 @@ import { resolveSubagentHeaderValue } from "./collaboration.js" -function normalizeVerbositySetting(value: unknown): "default" | "low" | "medium" | "high" | undefined { +function normalizeVerbositySetting(value: unknown): "default" | "low" | "medium" | "high" | "none" | undefined { if (typeof value !== "string") return undefined const normalized = value.trim().toLowerCase() - if (normalized === "default" || normalized === "low" || normalized === "medium" || normalized === "high") { + if ( + normalized === "default" || + normalized === "low" || + normalized === "medium" || + normalized === "high" || + normalized === "none" + ) { return normalized } return undefined @@ -77,6 +91,9 @@ export async function handleChatParamsHook(input: { }): Promise { if (input.hookInput.model.providerID !== "openai") return const modelOptions = isRecord(input.hookInput.model.options) ? input.hookInput.model.options : {} + const selectedModelCandidates = getSelectedModelLookupCandidates({ + id: input.hookInput.model.id + }) const modelCandidates = getModelLookupCandidates({ id: input.hookInput.model.id, api: { id: input.hookInput.model.api?.id } @@ -88,25 +105,55 @@ export async function handleChatParamsHook(input: { const catalogModelFallback = findCatalogModelForCandidates(input.lastCatalogModels, modelCandidates) const effectivePersonality = resolvePersonalityForModel({ behaviorSettings: input.behaviorSettings, - modelCandidates, + modelOptions, + modelCandidates: selectedModelCandidates, variantCandidates, fallback: input.fallbackPersonality }) - const modelThinkingSummariesOverride = getModelThinkingSummariesOverride( + const customModelReasoningEffortOverride = getCustomModelReasoningEffortOverride(modelOptions, variantCandidates) + const customModelReasoningSummaryOverride = getCustomModelReasoningSummaryOverride(modelOptions, variantCandidates) + const customModelTextVerbosityOverride = getCustomModelTextVerbosityOverride(modelOptions, variantCandidates) + const customModelIncludeOverride = getCustomModelIncludeOverride(modelOptions, variantCandidates) + const customModelParallelToolCallsOverride = getCustomModelParallelToolCallsOverride(modelOptions, variantCandidates) + const modelReasoningEffortOverride = getModelReasoningEffortOverride( input.behaviorSettings, - modelCandidates, + selectedModelCandidates, variantCandidates ) - const modelVerbosityEnabledOverride = getModelVerbosityEnabledOverride( + const modelReasoningSummaryOverride = getModelReasoningSummaryOverride( input.behaviorSettings, - modelCandidates, + selectedModelCandidates, + variantCandidates + ) + const modelTextVerbosityOverride = getModelTextVerbosityOverride( + input.behaviorSettings, + selectedModelCandidates, + variantCandidates + ) + const modelIncludeOverride = getModelIncludeOverride( + input.behaviorSettings, + selectedModelCandidates, + variantCandidates + ) + const modelParallelToolCallsOverride = getModelParallelToolCallsOverride( + input.behaviorSettings, + selectedModelCandidates, variantCandidates ) - const modelVerbosityOverride = getModelVerbosityOverride(input.behaviorSettings, modelCandidates, variantCandidates) const globalBehavior = input.behaviorSettings?.global - const globalVerbosityEnabled = - typeof globalBehavior?.verbosityEnabled === "boolean" ? globalBehavior.verbosityEnabled : undefined - const globalVerbosity = normalizeVerbositySetting(globalBehavior?.verbosity) + const globalReasoningSummary = + typeof globalBehavior?.reasoningSummary === "string" + ? globalBehavior.reasoningSummary + : typeof globalBehavior?.reasoningSummaries === "boolean" + ? globalBehavior.reasoningSummaries + ? "auto" + : "none" + : undefined + const globalTextVerbosity = + normalizeVerbositySetting(globalBehavior?.textVerbosity) ?? + (typeof globalBehavior?.verbosityEnabled === "boolean" && globalBehavior.verbosityEnabled === false + ? "none" + : normalizeVerbositySetting(globalBehavior?.verbosity)) const catalogModelFromOptions = isRecord(modelOptions.codexCatalogModel) ? (modelOptions.codexCatalogModel as CodexModelInfo) : undefined @@ -139,10 +186,11 @@ export async function handleChatParamsHook(input: { if (asString(input.output.options.serviceTier) === undefined) { const resolvedServiceTier = resolveServiceTierForModel({ behaviorSettings: input.behaviorSettings, - modelCandidates, + modelOptions, + modelCandidates: selectedModelCandidates, variantCandidates }) - if (resolvedServiceTier && resolvedServiceTier !== "default") { + if (resolvedServiceTier && resolvedServiceTier !== "auto") { input.output.options.serviceTier = resolvedServiceTier } } @@ -153,10 +201,17 @@ export async function handleChatParamsHook(input: { applyCodexRuntimeDefaultsToParams({ modelOptions, modelToolCallCapable: input.hookInput.model.capabilities?.toolcall, - thinkingSummariesOverride: modelThinkingSummariesOverride ?? globalBehavior?.thinkingSummaries, - verbosityEnabledOverride: modelVerbosityEnabledOverride ?? globalVerbosityEnabled, - verbosityOverride: modelVerbosityOverride ?? globalVerbosity, + resolvedBehavior: { + reasoningEffort: + modelReasoningEffortOverride ?? customModelReasoningEffortOverride ?? globalBehavior?.reasoningEffort, + reasoningSummary: modelReasoningSummaryOverride ?? customModelReasoningSummaryOverride ?? globalReasoningSummary, + textVerbosity: modelTextVerbosityOverride ?? customModelTextVerbosityOverride ?? globalTextVerbosity, + include: modelIncludeOverride ?? customModelIncludeOverride ?? globalBehavior?.include, + parallelToolCalls: + modelParallelToolCallsOverride ?? customModelParallelToolCallsOverride ?? globalBehavior?.parallelToolCalls + }, preferCodexInstructions: input.spoofMode === "codex" && !preserveOrchestratorInstructions, + modelId: input.hookInput.model.id, output: input.output }) @@ -185,11 +240,12 @@ export async function handleChatParamsHook(input: { } export async function handleChatHeadersHook(input: { - hookInput: { model: { providerID?: string }; sessionID: string; agent?: unknown } + hookInput: { model: { providerID?: string; id?: string }; sessionID: string; agent?: unknown } output: { headers: Record } spoofMode: CodexSpoofMode requestCatalogScopeKey?: string internalCatalogScopeHeader: string + internalSelectedModelHeader: string internalCollaborationModeHeader: string internalCollaborationAgentHeader: string collaborationProfileEnabled: boolean @@ -200,6 +256,11 @@ export async function handleChatHeadersHook(input: { input.output.headers.originator = originator input.output.headers["User-Agent"] = resolveRequestUserAgent(input.spoofMode, originator) input.output.headers.session_id = input.hookInput.sessionID + if (typeof input.hookInput.model.id === "string" && input.hookInput.model.id.trim()) { + input.output.headers[input.internalSelectedModelHeader] = input.hookInput.model.id + } else { + delete input.output.headers[input.internalSelectedModelHeader] + } delete input.output.headers["OpenAI-Beta"] delete input.output.headers.conversation_id if (input.requestCatalogScopeKey) { diff --git a/lib/codex-native/openai-loader-fetch.ts b/lib/codex-native/openai-loader-fetch.ts index c69a9cf..4c69509 100644 --- a/lib/codex-native/openai-loader-fetch.ts +++ b/lib/codex-native/openai-loader-fetch.ts @@ -3,7 +3,13 @@ import { PluginFatalError, isPluginFatalError, toSyntheticErrorResponse } from " import type { Logger } from "../logger.js" import type { CodexModelInfo } from "../model-catalog.js" import type { RotationStrategy } from "../types.js" -import type { BehaviorSettings, CodexSpoofMode, PersonalityOption, PromptCacheKeyStrategy } from "../config.js" +import type { + BehaviorSettings, + CodexSpoofMode, + CustomModelConfig, + PersonalityOption, + PromptCacheKeyStrategy +} from "../config.js" import type { OpenAIAuthMode } from "../types.js" import type { QuotaThresholdTrackerState } from "../quota-threshold-alerts.js" import { acquireOpenAIAuth } from "./acquire-auth.js" @@ -16,6 +22,7 @@ import { type OutboundRequestPayloadTransformResult, transformOutboundRequestPayload } from "./request-transform-payload.js" +import { toReasoningSummaryPluginFatalError } from "./reasoning-summary.js" import type { SessionAffinityRuntimeState } from "./session-affinity-state.js" import { scheduleQuotaRefresh } from "./openai-loader-fetch-quota.js" import { @@ -36,6 +43,7 @@ export type CreateOpenAIFetchHandlerInput = { spoofMode: CodexSpoofMode remapDeveloperMessagesToUserEnabled: boolean behaviorSettings?: BehaviorSettings + customModels?: Record personality?: PersonalityOption promptCacheKeyStrategy?: PromptCacheKeyStrategy projectPath?: string @@ -46,6 +54,7 @@ export type CreateOpenAIFetchHandlerInput = { headerTransformDebug: boolean compatInputSanitizerEnabled: boolean internalCatalogScopeHeader?: string + internalSelectedModelHeader?: string internalCollaborationModeHeader: string internalCollaborationAgentHeader?: string requestSnapshots: SnapshotRecorder @@ -67,6 +76,7 @@ export type CreateOpenAIFetchHandlerInput = { export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { const internalCatalogScopeHeader = input.internalCatalogScopeHeader ?? "x-opencode-catalog-scope-key" + const internalSelectedModelHeader = input.internalSelectedModelHeader ?? "x-opencode-selected-model-slug" const internalCollaborationAgentHeader = input.internalCollaborationAgentHeader ?? "x-opencode-collaboration-agent-kind" const quotaTrackerByIdentity = new Map() @@ -92,7 +102,9 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { message: "Outbound request validation failed before preparing OpenAI request.", status: 400, type: "disallowed_outbound_request", - param: "request" + param: "request", + source: "request.url", + hint: "Check the outbound URL and request target before calling the OpenAI provider." }) ) } @@ -106,7 +118,9 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { message: "Outbound request could not be prepared for OpenAI backend.", status: 400, type: "disallowed_outbound_request", - param: "request" + param: "request", + source: "request", + hint: "Ensure the outbound request can be constructed with a valid URL, method, headers, and body." }) ) } @@ -249,8 +263,12 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { maxRedirects: 3, showToast: input.showToast, onAttemptRequest: async ({ attempt, maxAttempts, attemptReasonCode, request, auth, sessionKey }) => { + const selectedModelSlug = request.headers.get(internalSelectedModelHeader)?.trim() || undefined const requestCatalogScopeKey = request.headers.get(internalCatalogScopeHeader)?.trim() || selectedPreviousCatalogScopeKey + if (request.headers.has(internalSelectedModelHeader)) { + request.headers.delete(internalSelectedModelHeader) + } if (request.headers.has(internalCatalogScopeHeader)) { request.headers.delete(internalCatalogScopeHeader) } @@ -261,6 +279,7 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { (selectedCatalogModels === undefined && Boolean(requestCatalogModels)) const payloadTransform: OutboundRequestPayloadTransformResult = await transformOutboundRequestPayload({ request, + selectedModelSlug, stripReasoningReplayEnabled: true, remapDeveloperMessagesToUserEnabled: input.remapDeveloperMessagesToUserEnabled, compatInputSanitizerEnabled: input.compatInputSanitizerEnabled, @@ -270,9 +289,14 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { previousCatalogModels: requestCatalogModels, requestCatalogScopeChanged, fallbackPersonality: input.personality, - behaviorSettings: input.behaviorSettings + behaviorSettings: input.behaviorSettings, + customModels: input.customModels }) + if (payloadTransform.reasoningSummaryValidation) { + throw toReasoningSummaryPluginFatalError(payloadTransform.reasoningSummaryValidation) + } + if (input.headerTransformDebug) { await input.requestSnapshots.captureRequest("after-header-transform", payloadTransform.request, { spoofMode: input.spoofMode, @@ -367,7 +391,9 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { message: "Outbound request validation failed before sending to OpenAI backend.", status: 400, type: "disallowed_outbound_request", - param: "request" + param: "request", + source: "request.url", + hint: "Check the rewritten outbound URL and request target before the request is sent." }) ) } @@ -392,7 +418,9 @@ export function createOpenAIFetchHandler(input: CreateOpenAIFetchHandlerInput) { message: "OpenAI request failed unexpectedly. Retry once, and if it persists run `opencode auth login`.", status: 502, type: "plugin_fetch_failed", - param: "request" + param: "request", + source: "request", + hint: "If retries keep failing, refresh auth state with `opencode auth login` and inspect plugin debug logs." }) ) } diff --git a/lib/codex-native/reasoning-summary.ts b/lib/codex-native/reasoning-summary.ts new file mode 100644 index 0000000..b089964 --- /dev/null +++ b/lib/codex-native/reasoning-summary.ts @@ -0,0 +1,132 @@ +import { PluginFatalError } from "../fatal-errors.js" +export const SUPPORTED_REASONING_SUMMARY_VALUES = ["auto", "concise", "detailed"] as const + +export type ReasoningSummaryValue = (typeof SUPPORTED_REASONING_SUMMARY_VALUES)[number] + +export type ReasoningSummaryValidationDiagnostic = { + actual: string + model?: string + source: string + sourceType: "request_option" | "catalog_default" +} + +function asString(value: unknown): string | undefined { + if (typeof value !== "string") return undefined + const trimmed = value.trim() + return trimmed ? trimmed : undefined +} + +export function inspectReasoningSummaryValue(input: unknown): { + state: "absent" | "disabled" | "valid" | "invalid" + raw?: string + value?: ReasoningSummaryValue +} { + const raw = asString(input) + const normalized = raw?.toLowerCase() + if (!normalized) return { state: "absent" } + if (normalized === "none") return { state: "disabled", raw } + if ( + normalized === SUPPORTED_REASONING_SUMMARY_VALUES[0] || + normalized === SUPPORTED_REASONING_SUMMARY_VALUES[1] || + normalized === SUPPORTED_REASONING_SUMMARY_VALUES[2] + ) { + return { state: "valid", raw, value: normalized } + } + return { state: "invalid", raw } +} + +export function resolveReasoningSummaryValue(input: { + explicitValue?: unknown + explicitSource: string + hasReasoning: boolean + configuredValue?: unknown + configuredSource?: string + supportsReasoningSummaries?: boolean + defaultReasoningSummaryFormat?: string + defaultReasoningSummarySource: string + model?: string +}): { value?: ReasoningSummaryValue; diagnostic?: ReasoningSummaryValidationDiagnostic } { + const explicit = inspectReasoningSummaryValue(input.explicitValue) + if (explicit.state === "invalid" && explicit.raw) { + return { + diagnostic: { + actual: explicit.raw, + source: input.explicitSource, + sourceType: "request_option" + } + } + } + if (explicit.state === "valid") { + return { value: explicit.value } + } + if (explicit.state === "disabled") { + return {} + } + + if (!input.hasReasoning) { + return {} + } + + const configured = inspectReasoningSummaryValue(input.configuredValue) + if (configured.state === "invalid" && configured.raw) { + return { + diagnostic: { + actual: configured.raw, + source: input.configuredSource ?? "config.reasoningSummary", + sourceType: "request_option" + } + } + } + if (configured.state === "valid") { + return { value: configured.value } + } + if (configured.state === "disabled") { + return {} + } + + if (input.supportsReasoningSummaries !== true) { + return {} + } + + const defaultValue = inspectReasoningSummaryValue(input.defaultReasoningSummaryFormat) + if (defaultValue.state === "invalid" && defaultValue.raw) { + return { + diagnostic: { + actual: defaultValue.raw, + model: input.model, + source: input.defaultReasoningSummarySource, + sourceType: "catalog_default" + } + } + } + if (defaultValue.state === "valid") { + return { value: defaultValue.value } + } + if (defaultValue.state === "disabled") { + return {} + } + + return { value: "auto" } +} + +export function toReasoningSummaryPluginFatalError(diagnostic: ReasoningSummaryValidationDiagnostic): PluginFatalError { + const supportedValues = [...SUPPORTED_REASONING_SUMMARY_VALUES, "none"].map((value) => `\`${value}\``).join(", ") + const subject = + diagnostic.sourceType === "catalog_default" + ? `selected model catalog default \`${diagnostic.source}\`${diagnostic.model ? ` for \`${diagnostic.model}\`` : ""}` + : `request setting \`${diagnostic.source}\`` + + const hint = + diagnostic.sourceType === "catalog_default" + ? 'This source is internal, not a user config key. Disable summaries with `reasoningSummary: "none"` if you need a workaround.' + : "Update the request to a supported reasoning summary value." + + return new PluginFatalError({ + message: `Invalid reasoning summary setting source: ${subject} is \`${diagnostic.actual}\`. Supported values are ${supportedValues}.`, + status: 400, + type: "invalid_reasoning_summary", + param: "reasoning.summary", + source: diagnostic.source, + hint + }) +} diff --git a/lib/codex-native/request-routing.ts b/lib/codex-native/request-routing.ts index 8657d0c..8dcc733 100644 --- a/lib/codex-native/request-routing.ts +++ b/lib/codex-native/request-routing.ts @@ -41,7 +41,9 @@ export function assertAllowedOutboundUrl(url: URL): void { "This plugin only proxies HTTPS requests to OpenAI/ChatGPT backends.", status: 400, type: "disallowed_outbound_protocol", - param: "request" + param: "request", + source: "request.url.protocol", + hint: "Use an https:// OpenAI or ChatGPT backend URL." }) } @@ -52,7 +54,9 @@ export function assertAllowedOutboundUrl(url: URL): void { "This plugin only proxies OpenAI/ChatGPT backend traffic without URL credentials.", status: 400, type: "disallowed_outbound_credentials", - param: "request" + param: "request", + source: "request.url", + hint: "Remove username/password credentials from the request URL." }) } @@ -64,7 +68,9 @@ export function assertAllowedOutboundUrl(url: URL): void { "This plugin only proxies OpenAI/ChatGPT backend traffic over the default HTTPS port.", status: 400, type: "disallowed_outbound_port", - param: "request" + param: "request", + source: "request.url.port", + hint: "Use the default HTTPS port or omit the explicit port." }) } @@ -75,6 +81,8 @@ export function assertAllowedOutboundUrl(url: URL): void { `Blocked outbound request to "${url.hostname}". ` + "This plugin only proxies OpenAI/ChatGPT backend traffic.", status: 400, type: "disallowed_outbound_host", - param: "request" + param: "request", + source: "request.url.host", + hint: "Use an OpenAI or ChatGPT backend host such as api.openai.com or chatgpt.com." }) } diff --git a/lib/codex-native/request-transform-model-service-tier.ts b/lib/codex-native/request-transform-model-service-tier.ts index 077f109..4f2449a 100644 --- a/lib/codex-native/request-transform-model-service-tier.ts +++ b/lib/codex-native/request-transform-model-service-tier.ts @@ -1,4 +1,5 @@ -import type { BehaviorSettings, ServiceTierOption } from "../config.js" +import type { BehaviorSettings, ModelBehaviorOverride, ServiceTierOption } from "../config.js" +import type { CustomModelBehaviorConfig } from "../model-catalog.js" import { isRecord } from "../util.js" const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i @@ -30,7 +31,10 @@ function resolveCaseInsensitiveEntry(entries: Record | undefined, function normalizeServiceTierSetting(value: unknown): ServiceTierOption | undefined { const normalized = asString(value)?.toLowerCase() - if (normalized === "default" || normalized === "priority" || normalized === "flex") { + if (normalized === "default" || normalized === "auto") { + return "auto" + } + if (normalized === "priority" || normalized === "flex") { return normalized } return undefined @@ -40,6 +44,35 @@ function stripEffortSuffix(value: string): string { return value.replace(EFFORT_SUFFIX_REGEX, "") } +function readCustomModelConfig(options: Record): CustomModelBehaviorConfig | undefined { + const raw = options.codexCustomModelConfig + if (!isRecord(raw)) return undefined + const targetModel = asString(raw.targetModel) + if (!targetModel) return undefined + return { + targetModel, + ...(asString(raw.name) ? { name: asString(raw.name) } : {}), + ...(typeof raw.serviceTier === "string" ? { serviceTier: raw.serviceTier as ServiceTierOption } : {}), + ...(isRecord(raw.variants) ? { variants: raw.variants as Record } : {}) + } +} + +export function getCustomModelServiceTierOverride( + modelOptions: Record, + variantCandidates: string[] +): ServiceTierOption | undefined { + const customModel = readCustomModelConfig(modelOptions) + if (!customModel) return undefined + + for (const variantCandidate of variantCandidates) { + const variantEntry = resolveCaseInsensitiveEntry(customModel.variants, variantCandidate) + const variantServiceTier = normalizeServiceTierSetting(variantEntry?.serviceTier) + if (variantServiceTier) return variantServiceTier + } + + return normalizeServiceTierSetting(customModel.serviceTier) +} + export function getRequestBodyVariantCandidates(input: { body: Record; modelSlug: string }): string[] { const out: string[] = [] const seen = new Set() @@ -102,6 +135,7 @@ export function getModelServiceTierOverride( export function resolveServiceTierForModel(input: { behaviorSettings?: BehaviorSettings + modelOptions?: Record modelCandidates: string[] variantCandidates: string[] }): ServiceTierOption | undefined { @@ -112,5 +146,10 @@ export function resolveServiceTierForModel(input: { ) if (modelOverride) return modelOverride + const customModelOverride = input.modelOptions + ? getCustomModelServiceTierOverride(input.modelOptions, input.variantCandidates) + : undefined + if (customModelOverride) return customModelOverride + return normalizeServiceTierSetting(input.behaviorSettings?.global?.serviceTier) } diff --git a/lib/codex-native/request-transform-model.ts b/lib/codex-native/request-transform-model.ts index f2721f4..f7963b8 100644 --- a/lib/codex-native/request-transform-model.ts +++ b/lib/codex-native/request-transform-model.ts @@ -1,6 +1,7 @@ -import type { BehaviorSettings, PersonalityOption } from "../config.js" -import type { CodexModelInfo } from "../model-catalog.js" +import type { BehaviorSettings, CustomModelConfig, ModelBehaviorOverride, PersonalityOption } from "../config.js" +import type { CodexModelInfo, CustomModelBehaviorConfig } from "../model-catalog.js" import { isRecord } from "../util.js" +import { resolveReasoningSummaryValue } from "./reasoning-summary.js" const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i @@ -15,11 +16,9 @@ function asStringArray(value: unknown): string[] | undefined { return value.filter((item): item is string => typeof item === "string" && item.trim().length > 0) } -function normalizeReasoningSummaryOption(value: unknown): "auto" | "concise" | "detailed" | undefined { - const normalized = asString(value)?.toLowerCase() - if (!normalized || normalized === "none") return undefined - if (normalized === "auto" || normalized === "concise" || normalized === "detailed") return normalized - return undefined +function normalizeCustomIncludeOptions(value: unknown): CustomModelBehaviorConfig["include"] | undefined { + const include = asStringArray(value) + return include as CustomModelBehaviorConfig["include"] | undefined } function normalizeTextVerbosity(value: unknown): "low" | "medium" | "high" | undefined { @@ -29,10 +28,16 @@ function normalizeTextVerbosity(value: unknown): "low" | "medium" | "high" | und return undefined } -function normalizeVerbositySetting(value: unknown): "default" | "low" | "medium" | "high" | undefined { +function normalizeVerbositySetting(value: unknown): "default" | "low" | "medium" | "high" | "none" | undefined { const normalized = asString(value)?.toLowerCase() if (!normalized) return undefined - if (normalized === "default" || normalized === "low" || normalized === "medium" || normalized === "high") { + if ( + normalized === "default" || + normalized === "low" || + normalized === "medium" || + normalized === "high" || + normalized === "none" + ) { return normalized } return undefined @@ -94,6 +99,82 @@ function normalizePersonalityKey(value: unknown): string | undefined { return normalized } +function readCustomModelConfig(options: Record): CustomModelBehaviorConfig | undefined { + const raw = options.codexCustomModelConfig + if (!isRecord(raw)) return undefined + const targetModel = asString(raw.targetModel) + if (!targetModel) return undefined + const variants = isRecord(raw.variants) ? (raw.variants as Record) : undefined + + return { + targetModel, + ...(asString(raw.name) ? { name: asString(raw.name) } : {}), + ...(normalizePersonalityKey(raw.personality) ? { personality: normalizePersonalityKey(raw.personality) } : {}), + ...(asString(raw.reasoningEffort) ? { reasoningEffort: asString(raw.reasoningEffort) } : {}), + ...(normalizeVerbositySetting(raw.textVerbosity) + ? { textVerbosity: normalizeVerbositySetting(raw.textVerbosity) } + : {}), + ...(typeof raw.serviceTier === "string" && + (raw.serviceTier === "auto" || raw.serviceTier === "priority" || raw.serviceTier === "flex") + ? { serviceTier: raw.serviceTier } + : {}), + ...(Array.isArray(raw.include) ? { include: normalizeCustomIncludeOptions(raw.include) } : {}), + ...(typeof raw.parallelToolCalls === "boolean" ? { parallelToolCalls: raw.parallelToolCalls } : {}), + ...(typeof raw.reasoningSummary === "string" && + (raw.reasoningSummary === "auto" || + raw.reasoningSummary === "concise" || + raw.reasoningSummary === "detailed" || + raw.reasoningSummary === "none") + ? { reasoningSummary: raw.reasoningSummary } + : {}), + ...(variants ? { variants } : {}) + } +} + +function getCustomModelBehaviorOverrideValue( + options: Record, + variantCandidates: string[], + selector: (entry: ModelBehaviorOverride) => T | undefined +): T | undefined { + const config = readCustomModelConfig(options) + if (!config) return undefined + + for (const variantCandidate of variantCandidates) { + const variantEntry = resolveCaseInsensitiveEntry(config.variants, variantCandidate) + if (!variantEntry) continue + const variantValue = selector(variantEntry) + if (variantValue !== undefined) return variantValue + } + + return selector(config) +} + +export function getConfiguredCustomModelBehaviorOverrideValue( + customModels: Record | undefined, + modelCandidates: string[], + variantCandidates: string[], + selector: (entry: ModelBehaviorOverride) => T | undefined +): T | undefined { + if (!customModels) return undefined + + for (const candidate of getModelLookupCandidatesWithEffortFallback(modelCandidates)) { + const entry = resolveCaseInsensitiveEntry(customModels, candidate) + if (!entry) continue + + for (const variantCandidate of variantCandidates) { + const variantEntry = resolveCaseInsensitiveEntry(entry.variants, variantCandidate) + if (!variantEntry) continue + const variantValue = selector(variantEntry) + if (variantValue !== undefined) return variantValue + } + + const modelValue = selector(entry) + if (modelValue !== undefined) return modelValue + } + + return undefined +} + export function getModelLookupCandidates(model: { id?: string; api?: { id?: string } }): string[] { const out: string[] = [] const seen = new Set() @@ -113,6 +194,23 @@ export function getModelLookupCandidates(model: { id?: string; api?: { id?: stri return out } +export function getSelectedModelLookupCandidates(model: { id?: string }): string[] { + const out: string[] = [] + const seen = new Set() + const add = (value: string | undefined) => { + const trimmed = value?.trim() + if (!trimmed) return + if (seen.has(trimmed)) return + seen.add(trimmed) + out.push(trimmed) + } + + add(model.id) + add(model.id?.split("/").pop()) + + return out +} + export function getVariantLookupCandidates(input: { message?: unknown; modelCandidates: string[] }): string[] { const out: string[] = [] const seen = new Set() @@ -181,83 +279,117 @@ function resolveCaseInsensitiveEntry(entries: Record | undefined, return undefined } -function getModelPersonalityOverride( +function getModelLookupCandidatesWithEffortFallback(modelCandidates: string[]): string[] { + const out: string[] = [] + const seen = new Set() + const add = (value: string | undefined) => { + const trimmed = value?.trim() + if (!trimmed || seen.has(trimmed)) return + seen.add(trimmed) + out.push(trimmed) + } + + for (const candidate of modelCandidates) { + add(candidate) + add(stripEffortSuffix(candidate)) + } + + return out +} + +function getModelBehaviorOverrideValue( behaviorSettings: BehaviorSettings | undefined, modelCandidates: string[], - variantCandidates: string[] -): string | undefined { + variantCandidates: string[], + selector: (entry: ModelBehaviorOverride) => T | undefined +): T | undefined { const models = behaviorSettings?.perModel if (!models) return undefined - for (const candidate of modelCandidates) { + for (const candidate of getModelLookupCandidatesWithEffortFallback(modelCandidates)) { const entry = resolveCaseInsensitiveEntry(models, candidate) if (!entry) continue for (const variantCandidate of variantCandidates) { const variantEntry = resolveCaseInsensitiveEntry(entry.variants, variantCandidate) - const variantPersonality = normalizePersonalityKey(variantEntry?.personality) - if (variantPersonality) return variantPersonality + if (!variantEntry) continue + const variantValue = selector(variantEntry) + if (variantValue !== undefined) return variantValue } - const modelPersonality = normalizePersonalityKey(entry.personality) - if (modelPersonality) return modelPersonality + const modelValue = selector(entry) + if (modelValue !== undefined) return modelValue } return undefined } -export function getModelThinkingSummariesOverride( +function getModelPersonalityOverride( behaviorSettings: BehaviorSettings | undefined, modelCandidates: string[], variantCandidates: string[] -): boolean | undefined { - const models = behaviorSettings?.perModel - if (!models) return undefined +): string | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => + normalizePersonalityKey(entry.personality) + ) +} - for (const candidate of modelCandidates) { - const entry = resolveCaseInsensitiveEntry(models, candidate) - if (!entry) continue +export function getModelReasoningEffortOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): string | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => + asString(entry.reasoningEffort) + ) +} - for (const variantCandidate of variantCandidates) { - const variantEntry = resolveCaseInsensitiveEntry(entry.variants, variantCandidate) - if (typeof variantEntry?.thinkingSummaries === "boolean") { - return variantEntry.thinkingSummaries - } +export function getModelReasoningSummaryOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): "auto" | "concise" | "detailed" | "none" | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => { + const normalized = asString(entry.reasoningSummary)?.toLowerCase() + if (normalized === "auto" || normalized === "concise" || normalized === "detailed" || normalized === "none") { + return normalized } - - if (typeof entry.thinkingSummaries === "boolean") { - return entry.thinkingSummaries + if (typeof entry.reasoningSummaries === "boolean") { + return entry.reasoningSummaries ? "auto" : "none" } - } - - return undefined + return undefined + }) } -export function getModelVerbosityEnabledOverride( +export function getModelReasoningSummariesOverride( behaviorSettings: BehaviorSettings | undefined, modelCandidates: string[], variantCandidates: string[] ): boolean | undefined { - const models = behaviorSettings?.perModel - if (!models) return undefined - - for (const candidate of modelCandidates) { - const entry = resolveCaseInsensitiveEntry(models, candidate) - if (!entry) continue - - for (const variantCandidate of variantCandidates) { - const variantEntry = resolveCaseInsensitiveEntry(entry.variants, variantCandidate) - if (typeof variantEntry?.verbosityEnabled === "boolean") { - return variantEntry.verbosityEnabled - } - } + const summary = getModelReasoningSummaryOverride(behaviorSettings, modelCandidates, variantCandidates) + return summary === undefined ? undefined : summary !== "none" +} - if (typeof entry.verbosityEnabled === "boolean") { - return entry.verbosityEnabled - } - } +export function getModelTextVerbosityOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): "default" | "low" | "medium" | "high" | "none" | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => { + const textVerbosity = normalizeVerbositySetting(entry.textVerbosity) + if (textVerbosity) return textVerbosity + if (typeof entry.verbosityEnabled === "boolean" && entry.verbosityEnabled === false) return "none" + return normalizeVerbositySetting(entry.verbosity) + }) +} - return undefined +export function getModelVerbosityEnabledOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): boolean | undefined { + const textVerbosity = getModelTextVerbosityOverride(behaviorSettings, modelCandidates, variantCandidates) + return textVerbosity === undefined ? undefined : textVerbosity !== "none" } export function getModelVerbosityOverride( @@ -265,28 +397,112 @@ export function getModelVerbosityOverride( modelCandidates: string[], variantCandidates: string[] ): "default" | "low" | "medium" | "high" | undefined { - const models = behaviorSettings?.perModel - if (!models) return undefined + const textVerbosity = getModelTextVerbosityOverride(behaviorSettings, modelCandidates, variantCandidates) + if (!textVerbosity || textVerbosity === "none") return undefined + return textVerbosity +} - for (const candidate of modelCandidates) { - const entry = resolveCaseInsensitiveEntry(models, candidate) - if (!entry) continue +export function getModelIncludeOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): string[] | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => { + const include = asStringArray(entry.include) + return include && include.length > 0 ? include : undefined + }) +} - for (const variantCandidate of variantCandidates) { - const variantEntry = resolveCaseInsensitiveEntry(entry.variants, variantCandidate) - const variantVerbosity = normalizeVerbositySetting(variantEntry?.verbosity) - if (variantVerbosity) return variantVerbosity +export function getModelParallelToolCallsOverride( + behaviorSettings: BehaviorSettings | undefined, + modelCandidates: string[], + variantCandidates: string[] +): boolean | undefined { + return getModelBehaviorOverrideValue(behaviorSettings, modelCandidates, variantCandidates, (entry) => + typeof entry.parallelToolCalls === "boolean" ? entry.parallelToolCalls : undefined + ) +} + +export function getCustomModelReasoningEffortOverride( + options: Record, + variantCandidates: string[] +): string | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => asString(entry.reasoningEffort)) +} + +export function getCustomModelReasoningSummaryOverride( + options: Record, + variantCandidates: string[] +): "auto" | "concise" | "detailed" | "none" | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => { + const normalized = asString(entry.reasoningSummary)?.toLowerCase() + if (normalized === "auto" || normalized === "concise" || normalized === "detailed" || normalized === "none") { + return normalized } + return undefined + }) +} - const modelVerbosity = normalizeVerbositySetting(entry.verbosity) - if (modelVerbosity) return modelVerbosity - } +export function getConfiguredCustomModelReasoningSummaryOverride( + customModels: Record | undefined, + modelCandidates: string[], + variantCandidates: string[] +): "auto" | "concise" | "detailed" | "none" | undefined { + return getConfiguredCustomModelBehaviorOverrideValue(customModels, modelCandidates, variantCandidates, (entry) => { + const normalized = asString(entry.reasoningSummary)?.toLowerCase() + if (normalized === "auto" || normalized === "concise" || normalized === "detailed" || normalized === "none") { + return normalized + } + if (typeof entry.reasoningSummaries === "boolean") { + return entry.reasoningSummaries ? "auto" : "none" + } + return undefined + }) +} - return undefined +export function getCustomModelTextVerbosityOverride( + options: Record, + variantCandidates: string[] +): "default" | "low" | "medium" | "high" | "none" | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => { + const textVerbosity = normalizeVerbositySetting(entry.textVerbosity) + if (textVerbosity) return textVerbosity + if (typeof entry.verbosityEnabled === "boolean" && entry.verbosityEnabled === false) return "none" + return normalizeVerbositySetting(entry.verbosity) + }) +} + +export function getCustomModelIncludeOverride( + options: Record, + variantCandidates: string[] +): string[] | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => { + const include = asStringArray(entry.include) + return include && include.length > 0 ? include : undefined + }) +} + +export function getCustomModelParallelToolCallsOverride( + options: Record, + variantCandidates: string[] +): boolean | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => + typeof entry.parallelToolCalls === "boolean" ? entry.parallelToolCalls : undefined + ) +} + +export function getCustomModelPersonalityOverride( + options: Record, + variantCandidates: string[] +): string | undefined { + return getCustomModelBehaviorOverrideValue(options, variantCandidates, (entry) => + normalizePersonalityKey(entry.personality) + ) } export function resolvePersonalityForModel(input: { behaviorSettings?: BehaviorSettings + modelOptions?: Record modelCandidates: string[] variantCandidates: string[] fallback?: PersonalityOption @@ -298,6 +514,11 @@ export function resolvePersonalityForModel(input: { ) if (modelOverride) return modelOverride + const customModelOverride = input.modelOptions + ? getCustomModelPersonalityOverride(input.modelOptions, input.variantCandidates) + : undefined + if (customModelOverride) return customModelOverride + const globalOverride = normalizePersonalityKey(input.behaviorSettings?.global?.personality) if (globalOverride) return globalOverride @@ -317,9 +538,14 @@ export function applyResolvedCodexRuntimeDefaults(input: { supportsVerbosity?: boolean } modelToolCallCapable: boolean | undefined - thinkingSummariesOverride: boolean | undefined - verbosityEnabledOverride: boolean | undefined - verbosityOverride: "default" | "low" | "medium" | "high" | undefined + resolvedBehavior: { + reasoningEffort?: string + reasoningSummary?: "auto" | "concise" | "detailed" | "none" + textVerbosity?: "default" | "low" | "medium" | "high" | "none" + include?: string[] + parallelToolCalls?: boolean + } + modelId?: string preferCodexInstructions: boolean }): void { const options = input.options @@ -330,34 +556,35 @@ export function applyResolvedCodexRuntimeDefaults(input: { options.instructions = codexInstructions } - if (asString(options.reasoningEffort) === undefined && defaults.defaultReasoningEffort) { - options.reasoningEffort = defaults.defaultReasoningEffort + if (asString(options.reasoningEffort) === undefined) { + if (input.resolvedBehavior.reasoningEffort) { + options.reasoningEffort = input.resolvedBehavior.reasoningEffort + } else if (defaults.defaultReasoningEffort) { + options.reasoningEffort = defaults.defaultReasoningEffort + } } const reasoningEffort = asString(options.reasoningEffort) const hasReasoning = reasoningEffort !== undefined && reasoningEffort !== "none" const rawReasoningSummary = asString(options.reasoningSummary) - const hadExplicitReasoningSummary = rawReasoningSummary !== undefined - const currentReasoningSummary = normalizeReasoningSummaryOption(rawReasoningSummary) - if (rawReasoningSummary !== undefined) { - if (currentReasoningSummary) { - options.reasoningSummary = currentReasoningSummary - } else { - delete options.reasoningSummary - } - } - if (!hadExplicitReasoningSummary && currentReasoningSummary === undefined) { - if (hasReasoning && (defaults.supportsReasoningSummaries === true || input.thinkingSummariesOverride === true)) { - if (input.thinkingSummariesOverride === false) { - delete options.reasoningSummary - } else { - if (defaults.reasoningSummaryFormat?.toLowerCase() === "none") { - delete options.reasoningSummary - } else { - options.reasoningSummary = defaults.reasoningSummaryFormat ?? "auto" - } - } - } + const reasoningSummary = resolveReasoningSummaryValue({ + explicitValue: rawReasoningSummary, + explicitSource: "options.reasoningSummary", + hasReasoning, + configuredValue: input.resolvedBehavior.reasoningSummary, + configuredSource: "config.reasoningSummary", + supportsReasoningSummaries: defaults.supportsReasoningSummaries, + defaultReasoningSummaryFormat: defaults.reasoningSummaryFormat, + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat", + model: input.modelId + }) + if (reasoningSummary.value) { + options.reasoningSummary = reasoningSummary.value + } else if ( + rawReasoningSummary?.trim().toLowerCase() === "none" || + input.resolvedBehavior.reasoningSummary === "none" + ) { + delete options.reasoningSummary } const rawTextVerbosity = asString(options.textVerbosity) @@ -366,11 +593,10 @@ export function applyResolvedCodexRuntimeDefaults(input: { delete options.textVerbosity } - const verbosityEnabled = input.verbosityEnabledOverride ?? true - const verbositySetting = input.verbosityOverride ?? "default" const supportsVerbosity = defaults.supportsVerbosity !== false + const verbositySetting = input.resolvedBehavior.textVerbosity ?? "default" - if (!supportsVerbosity || !verbosityEnabled) { + if (!supportsVerbosity || verbositySetting === "none") { delete options.textVerbosity } else if (normalizeTextVerbosity(options.textVerbosity) === undefined) { if (verbositySetting === "default") { @@ -387,13 +613,21 @@ export function applyResolvedCodexRuntimeDefaults(input: { } if (typeof options.parallelToolCalls !== "boolean") { - if (defaults.supportsParallelToolCalls !== undefined) { + if (input.resolvedBehavior.parallelToolCalls !== undefined) { + options.parallelToolCalls = input.resolvedBehavior.parallelToolCalls + } else if (defaults.supportsParallelToolCalls !== undefined) { options.parallelToolCalls = defaults.supportsParallelToolCalls } else if (input.modelToolCallCapable !== undefined) { options.parallelToolCalls = input.modelToolCallCapable } } + const configuredInclude = input.resolvedBehavior.include ?? [] + if (configuredInclude.length > 0) { + const include = asStringArray(options.include) ?? [] + options.include = mergeUnique([...include, ...configuredInclude]) + } + const shouldIncludeReasoning = hasReasoning && ((asString(options.reasoningSummary) !== undefined && @@ -409,10 +643,15 @@ export function applyResolvedCodexRuntimeDefaults(input: { export function applyCodexRuntimeDefaultsToParams(input: { modelOptions: Record modelToolCallCapable: boolean | undefined - thinkingSummariesOverride: boolean | undefined - verbosityEnabledOverride: boolean | undefined - verbosityOverride: "default" | "low" | "medium" | "high" | undefined + resolvedBehavior: { + reasoningEffort?: string + reasoningSummary?: "auto" | "concise" | "detailed" | "none" + textVerbosity?: "default" | "low" | "medium" | "high" | "none" + include?: string[] + parallelToolCalls?: boolean + } preferCodexInstructions: boolean + modelId?: string output: ChatParamsOutput }): void { const modelOptions = input.modelOptions @@ -421,9 +660,8 @@ export function applyCodexRuntimeDefaultsToParams(input: { codexInstructions: asString(modelOptions.codexInstructions), defaults: readModelRuntimeDefaults(modelOptions), modelToolCallCapable: input.modelToolCallCapable, - thinkingSummariesOverride: input.thinkingSummariesOverride, - verbosityEnabledOverride: input.verbosityEnabledOverride, - verbosityOverride: input.verbosityOverride, + resolvedBehavior: input.resolvedBehavior, + modelId: input.modelId, preferCodexInstructions: input.preferCodexInstructions }) } diff --git a/lib/codex-native/request-transform-payload.ts b/lib/codex-native/request-transform-payload.ts index 9068efc..d04815d 100644 --- a/lib/codex-native/request-transform-payload.ts +++ b/lib/codex-native/request-transform-payload.ts @@ -1,13 +1,16 @@ -import type { BehaviorSettings, PersonalityOption } from "../config.js" +import type { BehaviorSettings, CustomModelConfig, PersonalityOption } from "../config.js" import type { CodexModelInfo } from "../model-catalog.js" import { getRuntimeDefaultsForModel, resolveInstructionsForModel } from "../model-catalog.js" import { sanitizeRequestPayloadForCompat } from "../compat-sanitizer.js" import { isRecord } from "../util.js" import { findCatalogModelForCandidates, + getConfiguredCustomModelReasoningSummaryOverride, getModelLookupCandidates, + getModelReasoningSummaryOverride, resolvePersonalityForModel } from "./request-transform-model.js" +import { type ReasoningSummaryValidationDiagnostic, resolveReasoningSummaryValue } from "./reasoning-summary.js" import { getRequestBodyVariantCandidates } from "./request-transform-model-service-tier.js" import { type CompatSanitizerTransformResult, @@ -83,6 +86,7 @@ export function applyGpt54LongContextClampsToPayload(payload: Record } export type OutboundRequestPayloadTransformResult = { @@ -104,6 +109,7 @@ export type OutboundRequestPayloadTransformResult = { promptCacheKey: PromptCacheKeyTransformResult compatSanitizer: CompatSanitizerTransformResult serviceTier: ServiceTierTransformResult + reasoningSummaryValidation?: ReasoningSummaryValidationDiagnostic } export type ServiceTierTransformResult = { @@ -175,7 +181,8 @@ export async function transformOutboundRequestPayload( compatSanitizer: input.compatInputSanitizerEnabled ? { ...disabledCompatSanitizer, reason: "non_post" } : disabledCompatSanitizer, - serviceTier: disabledServiceTier + serviceTier: disabledServiceTier, + reasoningSummaryValidation: undefined } } @@ -196,7 +203,8 @@ export async function transformOutboundRequestPayload( compatSanitizer: input.compatInputSanitizerEnabled ? { ...disabledCompatSanitizer, reason: "invalid_json" } : disabledCompatSanitizer, - serviceTier: disabledServiceTier + serviceTier: disabledServiceTier, + reasoningSummaryValidation: undefined } } @@ -214,7 +222,8 @@ export async function transformOutboundRequestPayload( compatSanitizer: input.compatInputSanitizerEnabled ? { ...disabledCompatSanitizer, reason: "empty_body" } : disabledCompatSanitizer, - serviceTier: disabledServiceTier + serviceTier: disabledServiceTier, + reasoningSummaryValidation: undefined } } @@ -235,7 +244,8 @@ export async function transformOutboundRequestPayload( compatSanitizer: input.compatInputSanitizerEnabled ? { ...disabledCompatSanitizer, reason: "invalid_json" } : disabledCompatSanitizer, - serviceTier: disabledServiceTier + serviceTier: disabledServiceTier, + reasoningSummaryValidation: undefined } } @@ -253,7 +263,8 @@ export async function transformOutboundRequestPayload( compatSanitizer: input.compatInputSanitizerEnabled ? { ...disabledCompatSanitizer, reason: "non_object_body" } : disabledCompatSanitizer, - serviceTier: disabledServiceTier + serviceTier: disabledServiceTier, + reasoningSummaryValidation: undefined } } @@ -290,6 +301,13 @@ export async function transformOutboundRequestPayload( const gpt54LongContextClampChanged = input.gpt54LongContextClampEnabled !== false ? applyGpt54LongContextClampsToPayload(finalPayload) : false const serviceTier = disabledServiceTier + const reasoningSummaryValidation = validateReasoningSummaryPayload({ + payload: finalPayload, + selectedModelSlug: input.selectedModelSlug, + catalogModels: input.catalogModels, + behaviorSettings: input.behaviorSettings, + customModels: input.customModels + }) changed = changed || compatSanitizer.changed || @@ -305,7 +323,8 @@ export async function transformOutboundRequestPayload( developerRoleRemap, promptCacheKey, compatSanitizer, - serviceTier: { ...serviceTier, request: input.request } + serviceTier: { ...serviceTier, request: input.request }, + reasoningSummaryValidation } } @@ -319,7 +338,8 @@ export async function transformOutboundRequestPayload( serviceTier: { ...serviceTier, request: input.request - } + }, + reasoningSummaryValidation } } @@ -456,6 +476,69 @@ function syncReasoningEncryptedContentInclude(input: { return false } +function validateReasoningSummaryPayload(input: { + payload: Record + selectedModelSlug?: string + catalogModels?: CodexModelInfo[] + behaviorSettings?: BehaviorSettings + customModels?: Record +}): ReasoningSummaryValidationDiagnostic | undefined { + const modelSlug = asString(input.payload.model) + const selectedModelSlug = asString(input.selectedModelSlug) + if (!modelSlug && !selectedModelSlug) return undefined + + const modelCandidates = getModelLookupCandidates({ + id: modelSlug, + api: { id: modelSlug } + }) + const configuredModelCandidates = selectedModelSlug + ? getModelLookupCandidates({ + id: selectedModelSlug, + api: { id: modelSlug } + }) + : modelCandidates + const variantCandidates = getRequestBodyVariantCandidates({ + body: input.payload, + modelSlug: modelSlug ?? selectedModelSlug ?? "" + }) + const reasoning = isRecord(input.payload.reasoning) ? input.payload.reasoning : undefined + const reasoningEffort = asString(reasoning?.effort) + const reasoningSummary = asString(reasoning?.summary) + const globalBehavior = input.behaviorSettings?.global + const catalogModel = findCatalogModelForCandidates(input.catalogModels, modelCandidates) + const defaults = catalogModel ? getRuntimeDefaultsForModel(catalogModel) : undefined + const modelReasoningSummaryOverride = getModelReasoningSummaryOverride( + input.behaviorSettings, + configuredModelCandidates, + variantCandidates + ) + const customModelReasoningSummaryOverride = getConfiguredCustomModelReasoningSummaryOverride( + input.customModels, + configuredModelCandidates, + variantCandidates + ) + const globalReasoningSummary = + typeof globalBehavior?.reasoningSummary === "string" + ? globalBehavior.reasoningSummary + : typeof globalBehavior?.reasoningSummaries === "boolean" + ? globalBehavior.reasoningSummaries + ? "auto" + : "none" + : undefined + + return resolveReasoningSummaryValue({ + explicitValue: reasoningSummary, + explicitSource: "request.reasoning.summary", + hasReasoning: reasoningEffort !== undefined && reasoningEffort !== "none", + configuredValue: modelReasoningSummaryOverride ?? customModelReasoningSummaryOverride ?? globalReasoningSummary, + configuredSource: "config.reasoningSummary", + supportsReasoningSummaries: defaults?.supportsReasoningSummaries, + defaultReasoningSummaryFormat: defaults?.reasoningSummaryFormat, + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat", + model: modelSlug ?? selectedModelSlug + }).diagnostic +} + export async function remapDeveloperMessagesToUserOnRequest(input: { request: Request; enabled: boolean }): Promise<{ request: Request changed: boolean diff --git a/lib/config.ts b/lib/config.ts index 38fdef5..9636214 100644 --- a/lib/config.ts +++ b/lib/config.ts @@ -1,16 +1,21 @@ export { CONFIG_FILE, + type CustomModelConfig, DEFAULT_CODEX_CONFIG, DEFAULT_CODEX_CONFIG_TEMPLATE, + LEGACY_CONFIG_FILE, type BehaviorSettings, type CodexSpoofMode, + type IncludeOption, type ModelBehaviorOverride, type ModelConfigOverride, type PersonalityOption, type PluginConfig, type PluginRuntimeMode, type PromptCacheKeyStrategy, + type ReasoningSummaryOption, type ServiceTierOption, + type TextVerbosityOption, type VerbosityOption } from "./config/types.js" @@ -20,6 +25,7 @@ export { loadConfigFile, normalizePersonalityOption, normalizeServiceTierOption, + normalizeTextVerbosityOption, normalizeVerbosityOption, parseConfigFileObject, parseConfigJsonWithComments, @@ -30,6 +36,7 @@ export { parseRuntimeMode, parseSpoofMode, resolveDefaultConfigPath, + resolveLegacyDefaultConfigPath, type EnsureDefaultConfigFileResult, validateConfigFileObject } from "./config/file.js" @@ -41,6 +48,7 @@ export { getCodexCompactionOverrideEnabled, getCollaborationProfileEnabled, getCompatInputSanitizerEnabled, + getCustomModels, getDebugEnabled, getHeaderSnapshotBodiesEnabled, getHeaderSnapshotsEnabled, @@ -53,6 +61,8 @@ export { getProactiveRefreshEnabled, getPromptCacheKeyStrategy, getQuietMode, + getReasoningSummaryOverride, + getReasoningSummariesOverride, getRemapDeveloperMessagesToUserEnabled, getRotationStrategy, getSpoofMode, diff --git a/lib/config/file.ts b/lib/config/file.ts index 74be14b..763c352 100644 --- a/lib/config/file.ts +++ b/lib/config/file.ts @@ -8,13 +8,18 @@ import type { RotationStrategy } from "../types.js" import { CONFIG_FILE, DEFAULT_CODEX_CONFIG_TEMPLATE, + LEGACY_CONFIG_FILE, type BehaviorSettings, + type CustomModelConfig, + type IncludeOption, type ModelConfigOverride, type PersonalityOption, type PluginConfig, type PluginRuntimeMode, type PromptCacheKeyStrategy, + type ReasoningSummaryOption, type ServiceTierOption, + type TextVerbosityOption, type VerbosityOption } from "./types.js" @@ -30,10 +35,20 @@ export type EnsureDefaultConfigFileResult = { type ModelBehaviorSettings = { personality?: PersonalityOption - thinkingSummaries?: boolean + reasoningEffort?: string + reasoningSummary?: ReasoningSummaryOption + reasoningSummaries?: boolean verbosityEnabled?: boolean verbosity?: VerbosityOption + textVerbosity?: TextVerbosityOption serviceTier?: ServiceTierOption + include?: IncludeOption[] + parallelToolCalls?: boolean +} + +type ParsedConfigFile = { + config: Partial + deprecatedKeys: string[] } function describeValueType(value: unknown): string { @@ -42,6 +57,21 @@ function describeValueType(value: unknown): string { return typeof value } +function describeValuePreview(value: unknown): string { + if (typeof value === "string") return JSON.stringify(value) + if (typeof value === "number" || typeof value === "boolean" || value === null || value === undefined) { + return String(value) + } + if (Array.isArray(value)) { + return `array(${value.length})` + } + if (typeof value === "object") { + const keys = Object.keys(value as Record) + return keys.length > 0 ? `object(${keys.slice(0, 3).join(", ")})` : "object" + } + return String(value) +} + function pushValidationIssue( issues: string[], input: { @@ -50,7 +80,9 @@ function pushValidationIssue( actual: unknown } ): void { - issues.push(`${input.path}: expected ${input.expected}, got ${describeValueType(input.actual)}`) + issues.push( + `${input.path}: expected ${input.expected}, found ${describeValueType(input.actual)} (${describeValuePreview(input.actual)})` + ) } export function parseEnvBoolean(value: string | undefined): boolean | undefined { @@ -135,6 +167,91 @@ function stripJsonComments(raw: string): string { return out } +const SUPPORTED_INCLUDE_OPTIONS = [ + "reasoning.encrypted_content", + "file_search_call.results", + "message.output_text.logprobs" +] as const satisfies readonly IncludeOption[] + +type NormalizedServiceTierInput = { + value?: ServiceTierOption + usedDeprecatedDefaultAlias: boolean +} + +function normalizeNonEmptyString(value: unknown): string | undefined { + if (typeof value !== "string") return undefined + const trimmed = value.trim() + return trimmed.length > 0 ? trimmed : undefined +} + +function deriveReasoningSummaryAlias(value: ReasoningSummaryOption | undefined): boolean | undefined { + if (value === undefined) return undefined + return value !== "none" +} + +function deriveVerbosityEnabledAlias(value: TextVerbosityOption | undefined): boolean | undefined { + if (value === undefined) return undefined + return value !== "none" +} + +function deriveVerbosityAlias(value: TextVerbosityOption | undefined): VerbosityOption | undefined { + if (value === undefined || value === "none") return undefined + return value +} + +function normalizeReasoningSummaryOption(value: unknown): ReasoningSummaryOption | undefined { + if (typeof value !== "string") return undefined + const normalized = value.trim().toLowerCase() + if (normalized === "auto" || normalized === "concise" || normalized === "detailed" || normalized === "none") { + return normalized + } + return undefined +} + +export function normalizeTextVerbosityOption(value: unknown): TextVerbosityOption | undefined { + if (typeof value !== "string") return undefined + const normalized = value.trim().toLowerCase() + if ( + normalized === "default" || + normalized === "low" || + normalized === "medium" || + normalized === "high" || + normalized === "none" + ) { + return normalized + } + return undefined +} + +function normalizeIncludeOptions(value: unknown): IncludeOption[] | undefined { + if (!Array.isArray(value)) return undefined + const out: IncludeOption[] = [] + const seen = new Set() + for (const entry of value) { + if (typeof entry !== "string") continue + const normalized = entry.trim().toLowerCase() as IncludeOption + if (!SUPPORTED_INCLUDE_OPTIONS.includes(normalized)) continue + if (seen.has(normalized)) continue + seen.add(normalized) + out.push(normalized) + } + return out.length > 0 ? out : undefined +} + +function normalizeServiceTierInput(value: unknown): NormalizedServiceTierInput { + if (typeof value !== "string") { + return { value: undefined, usedDeprecatedDefaultAlias: false } + } + const normalized = value.trim().toLowerCase() + if (normalized === "default") { + return { value: "auto", usedDeprecatedDefaultAlias: true } + } + if (normalized === "auto" || normalized === "priority" || normalized === "flex") { + return { value: normalized, usedDeprecatedDefaultAlias: false } + } + return { value: undefined, usedDeprecatedDefaultAlias: false } +} + function normalizeModelBehaviorSettings(raw: unknown): ModelBehaviorSettings | undefined { if (!isRecord(raw)) return undefined const out: ModelBehaviorSettings = {} @@ -142,21 +259,57 @@ function normalizeModelBehaviorSettings(raw: unknown): ModelBehaviorSettings | u const personality = normalizePersonalityOption(raw.personality) if (personality) out.personality = personality - if (typeof raw.thinkingSummaries === "boolean") out.thinkingSummaries = raw.thinkingSummaries - if (typeof raw.verbosityEnabled === "boolean") out.verbosityEnabled = raw.verbosityEnabled + const reasoningEffort = normalizeNonEmptyString(raw.reasoningEffort) + if (reasoningEffort) out.reasoningEffort = reasoningEffort + + const reasoningSummary = normalizeReasoningSummaryOption(raw.reasoningSummary) + if (reasoningSummary) { + out.reasoningSummary = reasoningSummary + } else if (typeof raw.reasoningSummaries === "boolean") { + out.reasoningSummary = raw.reasoningSummaries ? "auto" : "none" + } else if (typeof raw.thinkingSummaries === "boolean") { + out.reasoningSummary = raw.thinkingSummaries ? "auto" : "none" + } + out.reasoningSummaries = deriveReasoningSummaryAlias(out.reasoningSummary) + + const textVerbosity = normalizeTextVerbosityOption(raw.textVerbosity) + if (textVerbosity) { + out.textVerbosity = textVerbosity + } else { + const verbosityEnabled = typeof raw.verbosityEnabled === "boolean" ? raw.verbosityEnabled : undefined + const verbosity = normalizeVerbosityOption(raw.verbosity) + if (verbosityEnabled === false) { + out.textVerbosity = "none" + } else if (verbosity) { + out.textVerbosity = verbosity + } else if (verbosityEnabled === true) { + out.textVerbosity = "default" + } + } + out.verbosityEnabled = deriveVerbosityEnabledAlias(out.textVerbosity) + out.verbosity = deriveVerbosityAlias(out.textVerbosity) + + const serviceTier = normalizeServiceTierInput(raw.serviceTier) + if (serviceTier.value) out.serviceTier = serviceTier.value - const verbosity = normalizeVerbosityOption(raw.verbosity) - if (verbosity) out.verbosity = verbosity + const include = normalizeIncludeOptions(raw.include) + if (include) out.include = include - const serviceTier = normalizeServiceTierOption(raw.serviceTier) - if (serviceTier) out.serviceTier = serviceTier + if (typeof raw.parallelToolCalls === "boolean") { + out.parallelToolCalls = raw.parallelToolCalls + } if ( !out.personality && - out.thinkingSummaries === undefined && + !out.reasoningEffort && + out.reasoningSummary === undefined && + out.reasoningSummaries === undefined && + out.textVerbosity === undefined && out.verbosityEnabled === undefined && out.verbosity === undefined && - out.serviceTier === undefined + out.serviceTier === undefined && + out.include === undefined && + out.parallelToolCalls === undefined ) { return undefined } @@ -178,10 +331,15 @@ function normalizeModelConfigOverride(raw: unknown): ModelConfigOverride | undef if (!normalized) continue variantMap[variantName] = { ...(normalized.personality ? { personality: normalized.personality } : {}), - ...(normalized.thinkingSummaries !== undefined ? { thinkingSummaries: normalized.thinkingSummaries } : {}), + ...(normalized.reasoningEffort ? { reasoningEffort: normalized.reasoningEffort } : {}), + ...(normalized.reasoningSummary ? { reasoningSummary: normalized.reasoningSummary } : {}), + ...(normalized.reasoningSummaries !== undefined ? { reasoningSummaries: normalized.reasoningSummaries } : {}), + ...(normalized.textVerbosity ? { textVerbosity: normalized.textVerbosity } : {}), ...(normalized.verbosityEnabled !== undefined ? { verbosityEnabled: normalized.verbosityEnabled } : {}), ...(normalized.verbosity ? { verbosity: normalized.verbosity } : {}), - ...(normalized.serviceTier ? { serviceTier: normalized.serviceTier } : {}) + ...(normalized.serviceTier ? { serviceTier: normalized.serviceTier } : {}), + ...(normalized.include ? { include: normalized.include } : {}), + ...(normalized.parallelToolCalls !== undefined ? { parallelToolCalls: normalized.parallelToolCalls } : {}) } } if (Object.keys(variantMap).length > 0) { @@ -195,14 +353,36 @@ function normalizeModelConfigOverride(raw: unknown): ModelConfigOverride | undef return { ...(modelBehavior?.personality ? { personality: modelBehavior.personality } : {}), - ...(modelBehavior?.thinkingSummaries !== undefined ? { thinkingSummaries: modelBehavior.thinkingSummaries } : {}), + ...(modelBehavior?.reasoningEffort ? { reasoningEffort: modelBehavior.reasoningEffort } : {}), + ...(modelBehavior?.reasoningSummary ? { reasoningSummary: modelBehavior.reasoningSummary } : {}), + ...(modelBehavior?.reasoningSummaries !== undefined + ? { reasoningSummaries: modelBehavior.reasoningSummaries } + : {}), + ...(modelBehavior?.textVerbosity ? { textVerbosity: modelBehavior.textVerbosity } : {}), ...(modelBehavior?.verbosityEnabled !== undefined ? { verbosityEnabled: modelBehavior.verbosityEnabled } : {}), ...(modelBehavior?.verbosity ? { verbosity: modelBehavior.verbosity } : {}), ...(modelBehavior?.serviceTier ? { serviceTier: modelBehavior.serviceTier } : {}), + ...(modelBehavior?.include ? { include: modelBehavior.include } : {}), + ...(modelBehavior?.parallelToolCalls !== undefined ? { parallelToolCalls: modelBehavior.parallelToolCalls } : {}), ...(variants ? { variants } : {}) } } +function normalizeCustomModelConfig(raw: unknown): CustomModelConfig | undefined { + if (!isRecord(raw)) return undefined + const behavior = normalizeModelConfigOverride(raw) + const targetModel = normalizeNonEmptyString(raw.targetModel) + const name = normalizeNonEmptyString(raw.name) + if (!targetModel && !behavior && !name) return undefined + if (!targetModel) return undefined + + return { + targetModel, + ...(name ? { name } : {}), + ...(behavior ?? {}) + } +} + function normalizeNewBehaviorSections(raw: Record): BehaviorSettings | undefined { const global = normalizeModelBehaviorSettings(raw.global) const perModelRaw = isRecord(raw.perModel) ? raw.perModel : undefined @@ -228,6 +408,21 @@ function normalizeNewBehaviorSections(raw: Record): BehaviorSet } } +function normalizeCustomModels(raw: Record): Record | undefined { + const customModelsRaw = isRecord(raw.customModels) ? raw.customModels : undefined + if (!customModelsRaw) return undefined + + const out: Record = {} + for (const [slug, value] of Object.entries(customModelsRaw)) { + const normalizedSlug = normalizeNonEmptyString(slug)?.toLowerCase() + if (!normalizedSlug) continue + const normalized = normalizeCustomModelConfig(value) + if (!normalized) continue + out[normalizedSlug] = normalized + } + return Object.keys(out).length > 0 ? out : undefined +} + function validateModelBehaviorShape(value: unknown, pathPrefix: string, issues: string[]): void { if (!isRecord(value)) { pushValidationIssue(issues, { path: pathPrefix, expected: "object", actual: value }) @@ -237,6 +432,31 @@ function validateModelBehaviorShape(value: unknown, pathPrefix: string, issues: if ("personality" in value && typeof value.personality !== "string") { pushValidationIssue(issues, { path: `${pathPrefix}.personality`, expected: "string", actual: value.personality }) } + if ("reasoningEffort" in value && typeof value.reasoningEffort !== "string") { + pushValidationIssue(issues, { + path: `${pathPrefix}.reasoningEffort`, + expected: "string", + actual: value.reasoningEffort + }) + } + if ("reasoningSummary" in value) { + const reasoningSummary = value.reasoningSummary + const normalized = typeof reasoningSummary === "string" ? reasoningSummary.trim().toLowerCase() : "" + if (!(normalized === "auto" || normalized === "concise" || normalized === "detailed" || normalized === "none")) { + pushValidationIssue(issues, { + path: `${pathPrefix}.reasoningSummary`, + expected: '"auto" | "concise" | "detailed" | "none"', + actual: reasoningSummary + }) + } + } + if ("reasoningSummaries" in value && typeof value.reasoningSummaries !== "boolean") { + pushValidationIssue(issues, { + path: `${pathPrefix}.reasoningSummaries`, + expected: "boolean", + actual: value.reasoningSummaries + }) + } if ("thinkingSummaries" in value && typeof value.thinkingSummaries !== "boolean") { pushValidationIssue(issues, { path: `${pathPrefix}.thinkingSummaries`, @@ -262,17 +482,85 @@ function validateModelBehaviorShape(value: unknown, pathPrefix: string, issues: }) } } + if ("textVerbosity" in value) { + const textVerbosity = value.textVerbosity + const normalized = typeof textVerbosity === "string" ? textVerbosity.trim().toLowerCase() : "" + if ( + !( + normalized === "default" || + normalized === "low" || + normalized === "medium" || + normalized === "high" || + normalized === "none" + ) + ) { + pushValidationIssue(issues, { + path: `${pathPrefix}.textVerbosity`, + expected: '"default" | "low" | "medium" | "high" | "none"', + actual: textVerbosity + }) + } + } if ("serviceTier" in value) { const serviceTier = value.serviceTier const normalized = typeof serviceTier === "string" ? serviceTier.trim().toLowerCase() : "" - if (!(normalized === "default" || normalized === "priority" || normalized === "flex")) { + if (!(normalized === "default" || normalized === "auto" || normalized === "priority" || normalized === "flex")) { pushValidationIssue(issues, { path: `${pathPrefix}.serviceTier`, - expected: '"default" | "priority" | "flex"', + expected: '"auto" | "priority" | "flex" (deprecated alias: "default")', actual: serviceTier }) } } + if ("include" in value) { + if (!Array.isArray(value.include)) { + pushValidationIssue(issues, { + path: `${pathPrefix}.include`, + expected: "array", + actual: value.include + }) + } else { + for (const entry of value.include) { + const normalized = typeof entry === "string" ? entry.trim().toLowerCase() : "" + if (!SUPPORTED_INCLUDE_OPTIONS.includes(normalized as IncludeOption)) { + pushValidationIssue(issues, { + path: `${pathPrefix}.include`, + expected: SUPPORTED_INCLUDE_OPTIONS.map((item) => `"${item}"`).join(" | "), + actual: entry + }) + } + } + } + } + if ("parallelToolCalls" in value && typeof value.parallelToolCalls !== "boolean") { + pushValidationIssue(issues, { + path: `${pathPrefix}.parallelToolCalls`, + expected: "boolean", + actual: value.parallelToolCalls + }) + } +} + +function validateCustomModelShape(value: unknown, pathPrefix: string, issues: string[]): void { + validateModelBehaviorShape(value, pathPrefix, issues) + if (!isRecord(value)) return + if ("targetModel" in value && typeof value.targetModel !== "string") { + pushValidationIssue(issues, { + path: `${pathPrefix}.targetModel`, + expected: "string", + actual: value.targetModel + }) + } + if (!("targetModel" in value)) { + issues.push(`${pathPrefix}.targetModel: expected string, found missing (custom models require targetModel)`) + } + if ("name" in value && typeof value.name !== "string") { + pushValidationIssue(issues, { + path: `${pathPrefix}.name`, + expected: "string", + actual: value.name + }) + } } export function parseConfigJsonWithComments(raw: string): unknown { @@ -331,12 +619,7 @@ export function normalizeVerbosityOption(value: unknown): VerbosityOption | unde } export function normalizeServiceTierOption(value: unknown): ServiceTierOption | undefined { - if (typeof value !== "string") return undefined - const normalized = value.trim().toLowerCase() - if (normalized === "default" || normalized === "priority" || normalized === "flex") { - return normalized - } - return undefined + return normalizeServiceTierInput(value).value } export function validateConfigFileObject(raw: unknown): ConfigValidationResult { @@ -454,13 +737,99 @@ export function validateConfigFileObject(raw: unknown): ConfigValidationResult { } } + if ("customModels" in raw) { + if (!isRecord(raw.customModels)) { + pushValidationIssue(issues, { path: "customModels", expected: "object", actual: raw.customModels }) + } else { + for (const [slug, value] of Object.entries(raw.customModels)) { + validateCustomModelShape(value, `customModels.${slug}`, issues) + if (!isRecord(value) || !("variants" in value)) continue + const variants = value.variants + if (!isRecord(variants)) { + pushValidationIssue(issues, { + path: `customModels.${slug}.variants`, + expected: "object", + actual: variants + }) + continue + } + for (const [variantName, variantValue] of Object.entries(variants)) { + validateModelBehaviorShape(variantValue, `customModels.${slug}.variants.${variantName}`, issues) + } + } + } + } + return { valid: issues.length === 0, issues } } export function parseConfigFileObject(raw: unknown): Partial { - if (!isRecord(raw)) return {} + return parseConfigFileObjectWithMetadata(raw).config +} + +function collectDeprecatedModelBehaviorKeys(raw: unknown): string[] { + if (!isRecord(raw)) return [] + + const keys: string[] = [] + const collectBehaviorAlias = (value: unknown, pathPrefix: string) => { + if (!isRecord(value)) return + if (typeof value.reasoningSummaries === "boolean") { + keys.push(`${pathPrefix}.reasoningSummaries`) + } + if (typeof value.thinkingSummaries === "boolean") { + keys.push(`${pathPrefix}.thinkingSummaries`) + } + if ("verbosityEnabled" in value) { + keys.push(`${pathPrefix}.verbosityEnabled`) + } + if ("verbosity" in value) { + keys.push(`${pathPrefix}.verbosity`) + } + if (typeof value.serviceTier === "string" && value.serviceTier.trim().toLowerCase() === "default") { + keys.push(`${pathPrefix}.serviceTier="default"`) + } + } + + collectBehaviorAlias(raw.global, "global") + + if (isRecord(raw.perModel)) { + for (const [modelName, modelValue] of Object.entries(raw.perModel)) { + collectBehaviorAlias(modelValue, `perModel.${modelName}`) + + if (!isRecord(modelValue)) continue + const variants = isRecord(modelValue.variants) ? modelValue.variants : undefined + if (!variants) continue + for (const [variantName, variantValue] of Object.entries(variants)) { + collectBehaviorAlias(variantValue, `perModel.${modelName}.variants.${variantName}`) + } + } + } + + if (isRecord(raw.customModels)) { + for (const [slug, modelValue] of Object.entries(raw.customModels)) { + collectBehaviorAlias(modelValue, `customModels.${slug}`) + if (!isRecord(modelValue)) continue + const variants = isRecord(modelValue.variants) ? modelValue.variants : undefined + if (!variants) continue + for (const [variantName, variantValue] of Object.entries(variants)) { + collectBehaviorAlias(variantValue, `customModels.${slug}.variants.${variantName}`) + } + } + } + + return keys +} + +function parseConfigFileObjectWithMetadata(raw: unknown): ParsedConfigFile { + if (!isRecord(raw)) { + return { + config: {}, + deprecatedKeys: [] + } + } const behaviorSettings = normalizeNewBehaviorSections(raw) + const customModels = normalizeCustomModels(raw) const personalityFromBehavior = behaviorSettings?.global?.personality const runtime = isRecord(raw.runtime) ? raw.runtime : undefined @@ -491,28 +860,32 @@ export function parseConfigFileObject(raw: unknown): Partial { typeof runtime?.orchestratorSubagents === "boolean" ? runtime.orchestratorSubagents : undefined return { - debug, - proactiveRefresh, - proactiveRefreshBufferMs, - quiet: quietMode, - quietMode, - pidOffsetEnabled, - personality: personalityFromBehavior, - mode, - rotationStrategy, - promptCacheKeyStrategy, - spoofMode, - compatInputSanitizer, - remapDeveloperMessagesToUser, - codexCompactionOverride, - headerSnapshots, - headerSnapshotBodies, - headerTransformDebug, - collaborationProfile: collaborationProfileEnabled, - collaborationProfileEnabled, - orchestratorSubagents: orchestratorSubagentsEnabled, - orchestratorSubagentsEnabled, - behaviorSettings + config: { + debug, + proactiveRefresh, + proactiveRefreshBufferMs, + quiet: quietMode, + quietMode, + pidOffsetEnabled, + personality: personalityFromBehavior, + mode, + rotationStrategy, + promptCacheKeyStrategy, + spoofMode, + compatInputSanitizer, + remapDeveloperMessagesToUser, + codexCompactionOverride, + headerSnapshots, + headerSnapshotBodies, + headerTransformDebug, + collaborationProfile: collaborationProfileEnabled, + collaborationProfileEnabled, + orchestratorSubagents: orchestratorSubagentsEnabled, + orchestratorSubagentsEnabled, + behaviorSettings, + customModels + }, + deprecatedKeys: collectDeprecatedModelBehaviorKeys(raw) } } @@ -524,11 +897,53 @@ export function resolveDefaultConfigPath(env: Record return path.join(os.homedir(), ".config", "opencode", CONFIG_FILE) } +export function resolveLegacyDefaultConfigPath(env: Record): string { + const xdgRoot = env.XDG_CONFIG_HOME?.trim() + if (xdgRoot) { + return path.join(xdgRoot, "opencode", LEGACY_CONFIG_FILE) + } + return path.join(os.homedir(), ".config", "opencode", LEGACY_CONFIG_FILE) +} + +function quarantineLegacyConfigSync(filePath: string): string | undefined { + try { + const quarantineDir = path.join(path.dirname(filePath), "quarantine") + fs.mkdirSync(quarantineDir, { recursive: true }) + const dest = path.join(quarantineDir, `${path.basename(filePath)}.${Date.now()}.quarantine.json`) + fs.renameSync(filePath, dest) + return dest + } catch { + return undefined + } +} + +function resolveDefaultConfigCandidates(env: Record): string[] { + const filePath = resolveDefaultConfigPath(env) + const legacyPath = resolveLegacyDefaultConfigPath(env) + const hasFile = fs.existsSync(filePath) + const hasLegacy = fs.existsSync(legacyPath) + + if (hasFile && hasLegacy) { + return [filePath, legacyPath] + } + if (hasFile) return [filePath] + if (hasLegacy) return [legacyPath] + return [filePath] +} + export async function ensureDefaultConfigFile( input: { env?: Record; filePath?: string; overwrite?: boolean } = {} ): Promise { const env = input.env ?? process.env - const filePath = input.filePath ?? resolveDefaultConfigPath(env) + const filePath = + input.filePath ?? + (() => { + const canonicalPath = resolveDefaultConfigPath(env) + const legacyPath = resolveLegacyDefaultConfigPath(env) + if (fs.existsSync(canonicalPath)) return canonicalPath + if (fs.existsSync(legacyPath)) return legacyPath + return canonicalPath + })() const overwrite = input.overwrite === true if (!overwrite && fs.existsSync(filePath)) { @@ -552,7 +967,16 @@ export function loadConfigFile( ): Partial { const env = input.env ?? process.env const explicitPath = input.filePath ?? env.OPENCODE_OPENAI_MULTI_CONFIG_PATH?.trim() - const candidates = explicitPath ? [explicitPath] : [resolveDefaultConfigPath(env)] + const candidates = explicitPath ? [explicitPath] : resolveDefaultConfigCandidates(env) + const canonicalPath = explicitPath ? undefined : resolveDefaultConfigPath(env) + const legacyPath = explicitPath ? undefined : resolveLegacyDefaultConfigPath(env) + const shouldQuarantineLegacyAfterCanonicalLoad = + !explicitPath && + canonicalPath !== undefined && + legacyPath !== undefined && + candidates.length > 1 && + fs.existsSync(canonicalPath) && + fs.existsSync(legacyPath) for (const filePath of candidates) { if (!filePath || !fs.existsSync(filePath)) continue @@ -564,7 +988,25 @@ export function loadConfigFile( console.warn(`[opencode-codex-auth] Invalid codex-config at ${filePath}. ${validation.issues.join("; ")}`) continue } - return parseConfigFileObject(parsed) + const result = parseConfigFileObjectWithMetadata(parsed) + if (result.deprecatedKeys.length > 0) { + console.warn( + `[opencode-codex-auth] Deprecated config key(s) in ${filePath}: ${result.deprecatedKeys.join(", ")}. Use reasoningSummary, textVerbosity, and serviceTier: "auto" instead.` + ) + } + if ( + shouldQuarantineLegacyAfterCanonicalLoad && + filePath === canonicalPath && + legacyPath && + fs.existsSync(legacyPath) + ) { + const quarantinedPath = quarantineLegacyConfigSync(legacyPath) + const suffix = quarantinedPath ? ` Quarantined legacy file to ${quarantinedPath}.` : "" + console.warn( + `[opencode-codex-auth] Found both ${CONFIG_FILE} and ${LEGACY_CONFIG_FILE}. Using ${CONFIG_FILE}.${suffix}` + ) + } + return result.config } catch (error) { const detail = error instanceof Error ? error.message : String(error) console.warn(`[opencode-codex-auth] Failed to read codex-config at ${filePath}. ${detail}`) diff --git a/lib/config/resolve.ts b/lib/config/resolve.ts index b17db69..06e4069 100644 --- a/lib/config/resolve.ts +++ b/lib/config/resolve.ts @@ -2,6 +2,7 @@ import type { RotationStrategy } from "../types.js" import { normalizePersonalityOption, normalizeServiceTierOption, + normalizeTextVerbosityOption, normalizeVerbosityOption, parseEnvBoolean, parseEnvNumber, @@ -13,6 +14,7 @@ import { import type { BehaviorSettings, CodexSpoofMode, + CustomModelConfig, ModelBehaviorOverride, PersonalityOption, PluginConfig, @@ -25,6 +27,23 @@ function cloneBehaviorOverride>(input: T | und return { ...input } } +function cloneCustomModelConfig(input: CustomModelConfig | undefined): CustomModelConfig | undefined { + if (!input) return undefined + return { + ...input, + ...(input.variants + ? { + variants: Object.fromEntries( + Object.entries(input.variants).map(([variantKey, variantValue]) => [ + variantKey, + cloneBehaviorOverride(variantValue) ?? {} + ]) + ) + } + : {}) + } +} + export function cloneBehaviorSettings(input: BehaviorSettings | undefined): BehaviorSettings | undefined { if (!input) return undefined return { @@ -59,9 +78,10 @@ export function cloneBehaviorSettings(input: BehaviorSettings | undefined): Beha export function buildResolvedBehaviorSettings(input: { fileBehavior: BehaviorSettings | undefined envPersonality: PersonalityOption | undefined - envThinkingSummaries: boolean | undefined + envReasoningSummaries: boolean | undefined envVerbosityEnabled: boolean | undefined envVerbosity: ModelBehaviorOverride["verbosity"] + envTextVerbosity: ModelBehaviorOverride["textVerbosity"] envServiceTier: ModelBehaviorOverride["serviceTier"] }): BehaviorSettings | undefined { const behaviorSettings = cloneBehaviorSettings(input.fileBehavior) ?? {} @@ -72,14 +92,28 @@ export function buildResolvedBehaviorSettings(input: { if (input.envPersonality) { globalBehavior.personality = input.envPersonality } - if (input.envThinkingSummaries !== undefined) { - globalBehavior.thinkingSummaries = input.envThinkingSummaries + if (input.envReasoningSummaries !== undefined) { + globalBehavior.reasoningSummary = input.envReasoningSummaries ? "auto" : "none" + globalBehavior.reasoningSummaries = input.envReasoningSummaries } - if (input.envVerbosityEnabled !== undefined) { - globalBehavior.verbosityEnabled = input.envVerbosityEnabled - } - if (input.envVerbosity) { - globalBehavior.verbosity = input.envVerbosity + if (input.envTextVerbosity) { + globalBehavior.textVerbosity = input.envTextVerbosity + globalBehavior.verbosityEnabled = input.envTextVerbosity !== "none" + globalBehavior.verbosity = input.envTextVerbosity === "none" ? undefined : input.envTextVerbosity + } else { + if (input.envVerbosityEnabled === false) { + globalBehavior.textVerbosity = "none" + globalBehavior.verbosityEnabled = false + globalBehavior.verbosity = undefined + } else if (input.envVerbosity) { + globalBehavior.textVerbosity = input.envVerbosity + globalBehavior.verbosityEnabled = true + globalBehavior.verbosity = input.envVerbosity + } else if (input.envVerbosityEnabled === true) { + globalBehavior.textVerbosity = "default" + globalBehavior.verbosityEnabled = true + globalBehavior.verbosity = "default" + } } if (input.envServiceTier) { globalBehavior.serviceTier = input.envServiceTier @@ -87,7 +121,9 @@ export function buildResolvedBehaviorSettings(input: { if ( globalBehavior.personality !== undefined || - globalBehavior.thinkingSummaries !== undefined || + globalBehavior.reasoningSummary !== undefined || + globalBehavior.reasoningSummaries !== undefined || + globalBehavior.textVerbosity !== undefined || globalBehavior.verbosityEnabled !== undefined || globalBehavior.verbosity !== undefined || globalBehavior.serviceTier !== undefined @@ -98,6 +134,13 @@ export function buildResolvedBehaviorSettings(input: { return behaviorSettings.global !== undefined || behaviorSettings.perModel !== undefined ? behaviorSettings : undefined } +export function getCustomModels(cfg: PluginConfig): Record | undefined { + if (!cfg.customModels) return undefined + return Object.fromEntries( + Object.entries(cfg.customModels).map(([slug, config]) => [slug, cloneCustomModelConfig(config) ?? config]) + ) +} + export function resolveConfig(input: { env: Record file?: Partial @@ -118,9 +161,25 @@ export function resolveConfig(input: { parsePromptCacheKeyStrategy(env.OPENCODE_OPENAI_MULTI_PROMPT_CACHE_KEY_STRATEGY) ?? file.promptCacheKeyStrategy const envPersonality = normalizePersonalityOption(env.OPENCODE_OPENAI_MULTI_PERSONALITY) - const envThinkingSummaries = parseEnvBoolean(env.OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES) + if ( + env.OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES !== undefined && + env.OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES === undefined + ) { + console.warn( + "[opencode-codex-auth] Deprecated env var `OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES` is set. Use `OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES` instead." + ) + } + const envReasoningSummaries = + parseEnvBoolean(env.OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES) ?? + parseEnvBoolean(env.OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES) const envVerbosityEnabled = parseEnvBoolean(env.OPENCODE_OPENAI_MULTI_VERBOSITY_ENABLED) const envVerbosity = normalizeVerbosityOption(env.OPENCODE_OPENAI_MULTI_VERBOSITY) + const envTextVerbosity = normalizeTextVerbosityOption(env.OPENCODE_OPENAI_MULTI_TEXT_VERBOSITY) + if (env.OPENCODE_OPENAI_MULTI_SERVICE_TIER?.trim().toLowerCase() === "default") { + console.warn( + "[opencode-codex-auth] Deprecated env value `OPENCODE_OPENAI_MULTI_SERVICE_TIER=default` is set. Use `auto` instead." + ) + } const envServiceTier = normalizeServiceTierOption(env.OPENCODE_OPENAI_MULTI_SERVICE_TIER) const spoofModeFromEnv = parseSpoofMode(env.OPENCODE_OPENAI_MULTI_SPOOF_MODE) const modeFromEnv = parseRuntimeMode(env.OPENCODE_OPENAI_MULTI_MODE) @@ -139,9 +198,10 @@ export function resolveConfig(input: { const resolvedBehaviorSettings = buildResolvedBehaviorSettings({ fileBehavior, envPersonality, - envThinkingSummaries, + envReasoningSummaries, envVerbosityEnabled, envVerbosity, + envTextVerbosity, envServiceTier }) @@ -279,6 +339,16 @@ export function getBehaviorSettings(cfg: PluginConfig): BehaviorSettings | undef return cfg.behaviorSettings } +export function getReasoningSummariesOverride(cfg: PluginConfig): boolean | undefined { + const summary = cfg.behaviorSettings?.global?.reasoningSummary + if (summary !== undefined) return summary !== "none" + return cfg.behaviorSettings?.global?.reasoningSummaries +} + export function getThinkingSummariesOverride(cfg: PluginConfig): boolean | undefined { - return cfg.behaviorSettings?.global?.thinkingSummaries + return getReasoningSummariesOverride(cfg) +} + +export function getReasoningSummaryOverride(cfg: PluginConfig): ModelBehaviorOverride["reasoningSummary"] | undefined { + return cfg.behaviorSettings?.global?.reasoningSummary } diff --git a/lib/config/types.ts b/lib/config/types.ts index 143e286..d3ddfb9 100644 --- a/lib/config/types.ts +++ b/lib/config/types.ts @@ -4,21 +4,35 @@ export type PersonalityOption = string export type CodexSpoofMode = "native" | "codex" export type PluginRuntimeMode = "native" | "codex" export type VerbosityOption = "default" | "low" | "medium" | "high" -export type ServiceTierOption = "default" | "priority" | "flex" +export type TextVerbosityOption = VerbosityOption | "none" +export type ReasoningSummaryOption = "auto" | "concise" | "detailed" | "none" +export type IncludeOption = "reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs" +export type ServiceTierOption = "auto" | "priority" | "flex" export type PromptCacheKeyStrategy = "default" | "project" export type ModelBehaviorOverride = { personality?: PersonalityOption + reasoningEffort?: string + reasoningSummary?: ReasoningSummaryOption + reasoningSummaries?: boolean thinkingSummaries?: boolean + textVerbosity?: TextVerbosityOption verbosityEnabled?: boolean verbosity?: VerbosityOption serviceTier?: ServiceTierOption + include?: IncludeOption[] + parallelToolCalls?: boolean } export type ModelConfigOverride = ModelBehaviorOverride & { variants?: Record } +export type CustomModelConfig = ModelConfigOverride & { + targetModel: string + name?: string +} + export type BehaviorSettings = { global?: ModelBehaviorOverride perModel?: Record @@ -47,9 +61,11 @@ export type PluginConfig = { orchestratorSubagents?: boolean orchestratorSubagentsEnabled?: boolean behaviorSettings?: BehaviorSettings + customModels?: Record } -export const CONFIG_FILE = "codex-config.json" +export const CONFIG_FILE = "codex-config.jsonc" +export const LEGACY_CONFIG_FILE = "codex-config.json" export const DEFAULT_CODEX_CONFIG = { $schema: "https://schemas.iam-brain.dev/opencode-codex-auth/codex-config.schema.json", @@ -72,9 +88,11 @@ export const DEFAULT_CODEX_CONFIG = { }, global: { personality: "pragmatic", - verbosityEnabled: true, - verbosity: "default" + reasoningEffort: "high", + reasoningSummary: "auto", + textVerbosity: "default" }, + customModels: {}, perModel: {} } as const @@ -131,6 +149,11 @@ export const DEFAULT_CODEX_CONFIG_TEMPLATE = `{ // default: "default" "promptCacheKeyStrategy": "default", + // Codex-rs compaction/profile override. + // options: true | false + // mode default: false in "native", true in "codex" + // "codexCompactionOverride": true, + // Write request header snapshots to plugin logs. // options: true | false // default: false @@ -146,14 +169,20 @@ export const DEFAULT_CODEX_CONFIG_TEMPLATE = `{ // default: false "headerTransformDebug": false, + // Collaboration profile toggles. + // options: true | false + // mode default: false in "native", true in "codex" + // "collaborationProfile": true, + + // Subagent header hints. + // options: true | false + // default: inherits collaborationProfile + // "orchestratorSubagents": true, + // Session-aware offset for account selection. // options: true | false // default: false "pidOffset": false - - // Experimental collaboration controls (optional): - // "collaborationProfile": true, - // "orchestratorSubagents": true }, "global": { @@ -163,29 +192,59 @@ export const DEFAULT_CODEX_CONFIG_TEMPLATE = `{ // default: "pragmatic" "personality": "pragmatic", - // Thinking summaries behavior: - // true => force on - // false => force off - // omit => use model default from catalog cache (recommended) - // "thinkingSummaries": true + // Reasoning effort override. + // examples: "minimal", "low", "medium", "high" + // omit => use the selected model/catalog default + "reasoningEffort": "high", - // Text verbosity behavior: - // verbosityEnabled: true => apply verbosity setting/default - // verbosityEnabled: false => do not send textVerbosity - // default: true - "verbosityEnabled": true, - - // options: "default" | "low" | "medium" | "high" - // "default" uses each model's catalog default verbosity. - // default: "default" - "verbosity": "default" + // Reasoning summary format sent upstream as reasoning.summary. + // options: "auto" | "concise" | "detailed" | "none" + // "none" disables reasoning summaries entirely. + // deprecated aliases: reasoningSummaries, thinkingSummaries + "reasoningSummary": "auto", - // Service tier / fast-mode behavior: - // "default" => do not force a service_tier override + // Fast Mode behavior (serviceTier): + // "auto" => do not force a service_tier override // "priority" => fast mode for GPT-5.4* requests only // "flex" => pass through service_tier: "flex" // omit => leave request body unchanged (recommended) - // "serviceTier": "priority" + // "serviceTier": "priority", + + // Text verbosity behavior sent upstream as text.verbosity. + // options: "default" | "low" | "medium" | "high" | "none" + // "default" uses each model's catalog default verbosity. + // "none" disables text verbosity entirely. + "textVerbosity": "default" + + // Optional extra response includes. + // allowed: "reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs" + // "include": ["file_search_call.results"], + + // Whether to allow multiple tool calls in parallel. + // options: true | false + // omit => use the selected model/catalog default + // "parallelToolCalls": true + }, + + // Optional custom selectable model aliases. + // The config key becomes the model slug users select, while targetModel stays the backend-facing model id. + "customModels": { + // "my-fast-codex": { + // "targetModel": "gpt-5.3-codex", + // "name": "My Fast Codex", + // "reasoningEffort": "low", + // "reasoningSummary": "concise", + // "textVerbosity": "medium", + // "serviceTier": "auto", + // "include": ["file_search_call.results"], + // "parallelToolCalls": true, + // "variants": { + // "high": { + // "reasoningEffort": "high", + // "reasoningSummary": "detailed" + // } + // } + // } }, // Optional model-specific overrides. @@ -193,16 +252,17 @@ export const DEFAULT_CODEX_CONFIG_TEMPLATE = `{ "perModel": { // "gpt-5.3-codex": { // "personality": "friendly", - // "thinkingSummaries": true, - // "verbosityEnabled": true, - // "verbosity": "default", + // "reasoningEffort": "medium", + // "reasoningSummary": "concise", + // "textVerbosity": "medium", // "serviceTier": "priority", + // "include": ["file_search_call.results"], + // "parallelToolCalls": false, // "variants": { // "high": { // "personality": "pragmatic", - // "thinkingSummaries": false, - // "verbosityEnabled": true, - // "verbosity": "high", + // "reasoningSummary": "detailed", + // "textVerbosity": "high", // "serviceTier": "flex" // } // } diff --git a/lib/fatal-errors.ts b/lib/fatal-errors.ts index d8e8929..b44b6d6 100644 --- a/lib/fatal-errors.ts +++ b/lib/fatal-errors.ts @@ -3,12 +3,16 @@ export type PluginFatalErrorInput = { status?: number type?: string param?: string + source?: string + hint?: string } export class PluginFatalError extends Error { readonly status: number readonly type: string readonly param?: string + readonly source?: string + readonly hint?: string constructor(input: PluginFatalErrorInput) { super(input.message) @@ -16,6 +20,8 @@ export class PluginFatalError extends Error { this.status = input.status ?? 400 this.type = input.type ?? "hard_stop" this.param = input.param + this.source = input.source + this.hint = input.hint } } @@ -27,9 +33,11 @@ export function createSyntheticErrorResponse( message: string, status = 400, type = "hard_stop", - param?: string + param?: string, + source?: string, + hint?: string ): Response { - const errorPayload: { error: { message: string; type: string; param?: string } } = { + const errorPayload: { error: { message: string; type: string; param?: string; source?: string; hint?: string } } = { error: { message, type @@ -39,6 +47,12 @@ export function createSyntheticErrorResponse( if (param) { errorPayload.error.param = param } + if (source) { + errorPayload.error.source = source + } + if (hint) { + errorPayload.error.hint = hint + } return new Response(JSON.stringify(errorPayload), { status, @@ -47,7 +61,7 @@ export function createSyntheticErrorResponse( } export function toSyntheticErrorResponse(error: PluginFatalError): Response { - return createSyntheticErrorResponse(error.message, error.status, error.type, error.param) + return createSyntheticErrorResponse(error.message, error.status, error.type, error.param, error.source, error.hint) } export function formatWaitTime(ms: number): string { diff --git a/lib/fetch-orchestrator.ts b/lib/fetch-orchestrator.ts index e414a01..90bfd3d 100644 --- a/lib/fetch-orchestrator.ts +++ b/lib/fetch-orchestrator.ts @@ -1,5 +1,5 @@ import { computeBackoffMs, parseRetryAfterMs } from "./rate-limit.js" -import { createSyntheticErrorResponse, formatWaitTime } from "./fatal-errors.js" +import { createSyntheticErrorResponse, formatWaitTime, isPluginFatalError } from "./fatal-errors.js" import { DEFAULT_ACCOUNT_SWITCH_TOAST_DEBOUNCE_MS, DEFAULT_RATE_LIMIT_TOAST_DEBOUNCE_MS, @@ -325,6 +325,9 @@ export class FetchOrchestrator { request = maybeRequest } } catch (error) { + if (isPluginFatalError(error)) { + throw error + } if (error instanceof Error) { // Snapshot/debug hooks should never block request execution. } diff --git a/lib/model-catalog.ts b/lib/model-catalog.ts index 7da8fee..262da83 100644 --- a/lib/model-catalog.ts +++ b/lib/model-catalog.ts @@ -2,6 +2,7 @@ export { type ApplyCodexCatalogInput, CACHE_TTL_MS, type CodexModelCatalogEvent, + type CustomModelBehaviorConfig, type CodexModelInfo, type CodexModelRuntimeDefaults, type CodexModelsCache, diff --git a/lib/model-catalog/provider.ts b/lib/model-catalog/provider.ts index 8331e51..4ab379a 100644 --- a/lib/model-catalog/provider.ts +++ b/lib/model-catalog/provider.ts @@ -1,6 +1,7 @@ import { resolveCustomPersonalityDescription } from "../personalities.js" import { type ApplyCodexCatalogInput, + type CustomModelBehaviorConfig, type CodexModelInfo, type CodexModelRuntimeDefaults, compareModelSlugs, @@ -154,6 +155,38 @@ function buildVariants(model: CodexModelInfo): Record [effort, { reasoningEffort: effort }])) } +function cloneValue(value: T): T { + if (Array.isArray(value)) { + return value.map((entry) => cloneValue(entry)) as T + } + if (typeof value === "object" && value !== null) { + return Object.fromEntries( + Object.entries(value as Record).map(([key, entry]) => [key, cloneValue(entry)]) + ) as T + } + return value +} + +function mergeVariantMaps( + baseVariants: Record> | undefined, + overlayVariants: CustomModelBehaviorConfig["variants"] | undefined +): Record> | undefined { + const nextVariants: Record> = {} + + for (const [variantName, variantValue] of Object.entries(baseVariants ?? {})) { + nextVariants[variantName] = cloneValue(variantValue) + } + + for (const [variantName, variantValue] of Object.entries(overlayVariants ?? {})) { + nextVariants[variantName] = { + ...(nextVariants[variantName] ?? {}), + ...cloneValue(variantValue ?? {}) + } + } + + return Object.keys(nextVariants).length > 0 ? nextVariants : undefined +} + function buildProviderModelFromCatalog( model: CodexModelInfo, providerModels: Record>, @@ -215,6 +248,43 @@ function buildProviderModelFromCatalog( } } +function buildCustomProviderModel(input: { + slug: string + config: CustomModelBehaviorConfig + targetModel: Record +}): Record { + const nextModel = cloneValue(input.targetModel) + nextModel.id = input.slug + nextModel.slug = input.slug + nextModel.model = input.slug + + if (input.config.name) { + nextModel.name = input.config.name + nextModel.displayName = input.config.name + nextModel.display_name = input.config.name + } + + const api = + typeof nextModel.api === "object" && nextModel.api !== null && !Array.isArray(nextModel.api) + ? (nextModel.api as Record) + : {} + api.id = input.config.targetModel + nextModel.api = api + + nextModel.variants = mergeVariantMaps( + asRecord(nextModel.variants) as Record> | undefined, + input.config.variants + ) + + const options = ensureModelOptions(nextModel) + options.codexCustomModelConfig = cloneValue({ + slug: input.slug, + ...input.config + }) + + return nextModel +} + function resolvePersonalityText(model: CodexModelInfo, personality: PersonalityOption | undefined): string | undefined { const vars = model.model_messages?.instructions_variables @@ -391,6 +461,7 @@ export function applyCodexCatalogToProviderModels(input: ApplyCodexCatalogInput) const allowedSlugs = Array.from(new Set(catalogModels.map((model) => model.slug))).sort(compareModelSlugs) const allowed = new Set(allowedSlugs) const bySlug = new Map(catalogModels.map((model) => [model.slug, model])) + const customTargetBySlug = new Map() for (const slug of allowedSlugs) { const catalogModel = bySlug.get(slug) @@ -424,6 +495,26 @@ export function applyCodexCatalogToProviderModels(input: ApplyCodexCatalogInput) } } + for (const [slug, customModel] of Object.entries(input.customModels ?? {})) { + const targetSlug = customModel.targetModel.trim().toLowerCase() + const targetModel = input.providerModels[targetSlug] + if (!targetModel) { + input.warn?.( + `[opencode-codex-auth] customModels.${slug}.targetModel points to ${JSON.stringify(customModel.targetModel)}, but that model was not present in the active Codex catalog. Skipping custom model synthesis.` + ) + delete input.providerModels[slug] + continue + } + + input.providerModels[slug] = buildCustomProviderModel({ + slug, + config: customModel, + targetModel + }) + allowed.add(slug) + customTargetBySlug.set(slug, targetSlug) + } + for (const modelId of Object.keys(input.providerModels)) { if (!allowed.has(modelId)) { delete input.providerModels[modelId] @@ -431,8 +522,8 @@ export function applyCodexCatalogToProviderModels(input: ApplyCodexCatalogInput) } const orderedModelIds = Object.keys(input.providerModels).sort((a, b) => { - const aPriority = bySlug.get(a)?.priority - const bPriority = bySlug.get(b)?.priority + const aPriority = bySlug.get(a)?.priority ?? bySlug.get(customTargetBySlug.get(a) ?? "")?.priority + const bPriority = bySlug.get(b)?.priority ?? bySlug.get(customTargetBySlug.get(b) ?? "")?.priority const normalizedAPriority = typeof aPriority === "number" && Number.isFinite(aPriority) ? aPriority : Number.POSITIVE_INFINITY const normalizedBPriority = diff --git a/lib/model-catalog/shared.ts b/lib/model-catalog/shared.ts index 1ab9229..17cf620 100644 --- a/lib/model-catalog/shared.ts +++ b/lib/model-catalog/shared.ts @@ -1,4 +1,27 @@ export type PersonalityOption = string +export type CustomModelBehaviorConfig = { + targetModel: string + name?: string + personality?: string + reasoningEffort?: string + reasoningSummary?: "auto" | "concise" | "detailed" | "none" + textVerbosity?: "default" | "low" | "medium" | "high" | "none" + serviceTier?: "auto" | "priority" | "flex" + include?: Array<"reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs"> + parallelToolCalls?: boolean + variants?: Record< + string, + { + personality?: string + reasoningEffort?: string + reasoningSummary?: "auto" | "concise" | "detailed" | "none" + textVerbosity?: "default" | "low" | "medium" | "high" | "none" + serviceTier?: "auto" | "priority" | "flex" + include?: Array<"reasoning.encrypted_content" | "file_search_call.results" | "message.output_text.logprobs"> + parallelToolCalls?: boolean + } + > +} type ModelInstructionsVariables = { personality?: string | null @@ -90,6 +113,8 @@ export type ApplyCodexCatalogInput = { providerModels: Record> catalogModels?: CodexModelInfo[] personality?: PersonalityOption + customModels?: Record + warn?: (message: string) => void } export const CODEX_MODELS_ENDPOINT = "https://chatgpt.com/backend-api/codex/models" diff --git a/lib/personality-command.ts b/lib/personality-command.ts index d37632b..e515911 100644 --- a/lib/personality-command.ts +++ b/lib/personality-command.ts @@ -32,7 +32,7 @@ Workflow: - \`name\`, \`sourceText\`, \`targetStyle\`, \`voiceFidelity\`, \`competenceStrictness\`, \`domain\` - Then persist file with \`scope\` and \`overwrite\` as needed. 4. Confirm the resulting key + path, then show how to activate: - - set \`global.personality\` in \`codex-config.json\` + - set \`global.personality\` in \`codex-config.jsonc\` - or set \`perModel..personality\` Initial user context (if any): diff --git a/lib/personality-skill.ts b/lib/personality-skill.ts index 823024b..af106eb 100644 --- a/lib/personality-skill.ts +++ b/lib/personality-skill.ts @@ -33,7 +33,7 @@ Use this skill when a user wants to create or refine a personality profile for O 2. Keep the personality grounded in terminal coding-agent behavior. 3. If a source document is provided, read \`references/personality-patterns.md\` and map voice cues into constraints. 4. Call \`create-personality\` with structured fields when ready. -5. Confirm resulting key/path and activation path in \`codex-config.json\`. +5. Confirm resulting key/path and activation path in \`codex-config.jsonc\`. ## Tool Contract diff --git a/lib/ui/auth-menu.ts b/lib/ui/auth-menu.ts index 15002c6..5e27d33 100644 --- a/lib/ui/auth-menu.ts +++ b/lib/ui/auth-menu.ts @@ -173,7 +173,7 @@ export function buildAuthMenuItems( { label: "Add new account", value: { type: "add" } }, { label: "Check quotas", value: { type: "check" } }, { label: "Manage accounts (enable/disable)", value: { type: "manage" } }, - { label: "Configure models in codex-config.json", value: { type: "configure-models" } }, + { label: "Configure models in codex-config.jsonc", value: { type: "configure-models" } }, ...(options.allowTransfer ? [ { diff --git a/package.json b/package.json index e3be559..7df32fe 100644 --- a/package.json +++ b/package.json @@ -40,6 +40,8 @@ "patch:plugin-dts": "node scripts/patch-opencode-plugin-dts.js", "typecheck": "npm run patch:plugin-dts && tsc --noEmit", "typecheck:test": "npm run patch:plugin-dts && tsc --noEmit -p tsconfig.test.json", + "verify:local": "node scripts/enforce-local-verify.mjs manual", + "prepush": "npm run verify:local", "test": "vitest run", "test:coverage": "vitest run --coverage.enabled true --coverage.provider=v8", "test:anti-mock": "node scripts/check-test-mocking.mjs", @@ -54,6 +56,7 @@ "perf:profile:compare": "npm run build && node dist/scripts/perf-profile.js 300", "check:upstream": "node scripts/check-upstream-watch.js", "check:upstream:update": "node scripts/check-upstream-watch.js --update", + "hooks:install": "node scripts/install-git-hooks.mjs", "prepack": "npm run build", "release": "node scripts/release.js", "release:patch": "npm run release -- patch", diff --git a/schemas/codex-config.schema.json b/schemas/codex-config.schema.json index 806ea52..fa9bda1 100644 --- a/schemas/codex-config.schema.json +++ b/schemas/codex-config.schema.json @@ -2,7 +2,7 @@ "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://schemas.iam-brain.dev/opencode-codex-auth/codex-config.schema.json", "title": "OpenCode Codex Auth Config", - "description": "Schema for ~/.config/opencode/codex-config.json", + "description": "Schema for ~/.config/opencode/codex-config.jsonc", "type": "object", "additionalProperties": false, "properties": { @@ -76,6 +76,12 @@ "global": { "$ref": "#/$defs/modelBehavior" }, + "customModels": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/customModel" + } + }, "perModel": { "type": "object", "additionalProperties": { @@ -89,6 +95,30 @@ "minLength": 1, "pattern": "^(?!.*(?:\\.\\.|/|\\\\)).+$" }, + "reasoningSummary": { + "type": "string", + "enum": ["auto", "concise", "detailed", "none"], + "description": "Reasoning summary format sent upstream as reasoning.summary. Use `none` to disable summaries." + }, + "textVerbosity": { + "type": "string", + "enum": ["default", "low", "medium", "high", "none"], + "description": "Text verbosity override. `default` uses the selected model's catalog default. `none` disables text verbosity injection." + }, + "serviceTier": { + "type": "string", + "enum": ["auto", "priority", "flex", "default"], + "description": "Fast Mode preference. `priority` enables GPT-5.4 fast mode, `flex` passes through service_tier:flex, and `auto` leaves the request unchanged. `default` is a deprecated alias for `auto`." + }, + "include": { + "type": "array", + "items": { + "type": "string", + "enum": ["reasoning.encrypted_content", "file_search_call.results", "message.output_text.logprobs"] + }, + "uniqueItems": true, + "description": "Extra include values forwarded to the OpenAI responses API." + }, "modelBehavior": { "type": "object", "additionalProperties": false, @@ -96,19 +126,43 @@ "personality": { "$ref": "#/$defs/personality" }, + "reasoningEffort": { + "type": "string", + "minLength": 1, + "description": "Reasoning effort override forwarded upstream." + }, + "reasoningSummary": { + "$ref": "#/$defs/reasoningSummary" + }, + "reasoningSummaries": { + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, "thinkingSummaries": { - "type": "boolean" + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, + "textVerbosity": { + "$ref": "#/$defs/textVerbosity" }, "verbosityEnabled": { - "type": "boolean" + "type": "boolean", + "description": "Deprecated alias for textVerbosity. false => none, true without verbosity => default." }, "verbosity": { "type": "string", - "enum": ["default", "low", "medium", "high"] + "enum": ["default", "low", "medium", "high"], + "description": "Deprecated alias for textVerbosity." }, "serviceTier": { - "type": "string", - "enum": ["default", "priority", "flex"] + "$ref": "#/$defs/serviceTier" + }, + "include": { + "$ref": "#/$defs/include" + }, + "parallelToolCalls": { + "type": "boolean", + "description": "Whether to allow multiple tool calls in parallel." } } }, @@ -124,19 +178,43 @@ "personality": { "$ref": "#/$defs/personality" }, + "reasoningEffort": { + "type": "string", + "minLength": 1, + "description": "Reasoning effort override forwarded upstream." + }, + "reasoningSummary": { + "$ref": "#/$defs/reasoningSummary" + }, + "reasoningSummaries": { + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, "thinkingSummaries": { - "type": "boolean" + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, + "textVerbosity": { + "$ref": "#/$defs/textVerbosity" }, "verbosityEnabled": { - "type": "boolean" + "type": "boolean", + "description": "Deprecated alias for textVerbosity. false => none, true without verbosity => default." }, "verbosity": { "type": "string", - "enum": ["default", "low", "medium", "high"] + "enum": ["default", "low", "medium", "high"], + "description": "Deprecated alias for textVerbosity." }, "serviceTier": { - "type": "string", - "enum": ["default", "priority", "flex"] + "$ref": "#/$defs/serviceTier" + }, + "include": { + "$ref": "#/$defs/include" + }, + "parallelToolCalls": { + "type": "boolean", + "description": "Whether to allow multiple tool calls in parallel." }, "variants": { "type": "object", @@ -147,6 +225,70 @@ } } ] + }, + "customModel": { + "type": "object", + "additionalProperties": false, + "required": ["targetModel"], + "properties": { + "targetModel": { + "type": "string", + "minLength": 1, + "description": "Backend-facing target model id inherited by this selectable custom model slug." + }, + "name": { + "type": "string", + "minLength": 1, + "description": "Optional display name shown for the custom selectable model." + }, + "personality": { + "$ref": "#/$defs/personality" + }, + "reasoningEffort": { + "type": "string", + "minLength": 1, + "description": "Reasoning effort override forwarded upstream." + }, + "reasoningSummary": { + "$ref": "#/$defs/reasoningSummary" + }, + "reasoningSummaries": { + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, + "thinkingSummaries": { + "type": "boolean", + "description": "Deprecated alias for reasoningSummary. true => auto, false => none." + }, + "textVerbosity": { + "$ref": "#/$defs/textVerbosity" + }, + "verbosityEnabled": { + "type": "boolean", + "description": "Deprecated alias for textVerbosity. false => none, true without verbosity => default." + }, + "verbosity": { + "type": "string", + "enum": ["default", "low", "medium", "high"], + "description": "Deprecated alias for textVerbosity." + }, + "serviceTier": { + "$ref": "#/$defs/serviceTier" + }, + "include": { + "$ref": "#/$defs/include" + }, + "parallelToolCalls": { + "type": "boolean", + "description": "Whether to allow multiple tool calls in parallel." + }, + "variants": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/modelBehavior" + } + } + } } } } diff --git a/scripts/coverage-ratchet.baseline.json b/scripts/coverage-ratchet.baseline.json index c996d4d..9680fee 100644 --- a/scripts/coverage-ratchet.baseline.json +++ b/scripts/coverage-ratchet.baseline.json @@ -55,10 +55,10 @@ "statements": 100 }, "lib/codex-native.ts": { - "lines": 96.2, - "branches": 82.19, - "functions": 92.3, - "statements": 96.2 + "lines": 94.88, + "branches": 83.33, + "functions": 88.57, + "statements": 94.88 }, "lib/codex-native/accounts.ts": { "lines": 92.09, @@ -175,10 +175,10 @@ "statements": 69.66 }, "lib/codex-native/openai-loader-fetch.ts": { - "lines": 90.9, - "branches": 85.1, + "lines": 89.72, + "branches": 86.48, "functions": 83.33, - "statements": 90.9 + "statements": 89.72 }, "lib/codex-native/originator.ts": { "lines": 100, @@ -192,6 +192,12 @@ "functions": 100, "statements": 100 }, + "lib/codex-native/reasoning-summary.ts": { + "lines": 100, + "branches": 95.23, + "functions": 100, + "statements": 100 + }, "lib/codex-native/request-routing.ts": { "lines": 98.46, "branches": 88.46, @@ -217,10 +223,10 @@ "statements": 92.94 }, "lib/codex-native/request-transform-model.ts": { - "lines": 91.02, - "branches": 82.95, - "functions": 100, - "statements": 91.02 + "lines": 93.22, + "branches": 89.07, + "functions": 92.85, + "statements": 93.22 }, "lib/codex-native/request-transform-payload-helpers.ts": { "lines": 95.59, @@ -349,10 +355,10 @@ "statements": 88.88 }, "lib/config/resolve.ts": { - "lines": 99.34, - "branches": 96.38, + "lines": 96.83, + "branches": 90.9, "functions": 100, - "statements": 99.34 + "statements": 96.83 }, "lib/config/types.ts": { "lines": 100, @@ -427,10 +433,10 @@ "statements": 91.17 }, "lib/model-catalog/provider.ts": { - "lines": 95.88, - "branches": 88.54, + "lines": 96.32, + "branches": 87.89, "functions": 100, - "statements": 95.88 + "statements": 96.32 }, "lib/model-catalog/shared.ts": { "lines": 86.95, diff --git a/scripts/enforce-local-verify.mjs b/scripts/enforce-local-verify.mjs new file mode 100644 index 0000000..f589992 --- /dev/null +++ b/scripts/enforce-local-verify.mjs @@ -0,0 +1,116 @@ +import { createHash } from "node:crypto" +import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs" +import path from "node:path" +import { spawnSync } from "node:child_process" + +function runGit(args, options = {}) { + const result = spawnSync("git", args, { + cwd: options.cwd ?? process.cwd(), + encoding: "utf8", + stdio: ["ignore", "pipe", "pipe"] + }) + + if (result.status !== 0) { + const stderr = result.stderr?.trim() + throw new Error(stderr || `git ${args.join(" ")} failed with status ${result.status ?? "unknown"}`) + } + + return result.stdout +} + +function resolveRepoRoot() { + return runGit(["rev-parse", "--show-toplevel"]).trim() +} + +function resolveGitPath(repoRoot, relativePath) { + return runGit(["rev-parse", "--git-path", relativePath], { cwd: repoRoot }).trim() +} + +function listGitPaths(repoRoot, args) { + return runGit(args, { cwd: repoRoot }) + .split("\0") + .filter(Boolean) + .sort((a, b) => a.localeCompare(b)) +} + +function summarizePaths(paths) { + if (paths.length === 0) return "" + const preview = paths.slice(0, 5).join(", ") + const suffix = paths.length > 5 ? ` (+${paths.length - 5} more)` : "" + return `${preview}${suffix}` +} + +function resolveHookTarget(repoRoot, hookName) { + const stagedPaths = listGitPaths(repoRoot, ["diff", "--cached", "--name-only", "-z"]) + const unstagedPaths = listGitPaths(repoRoot, ["diff", "--name-only", "-z"]) + const untrackedPaths = listGitPaths(repoRoot, ["ls-files", "--others", "--exclude-standard", "-z"]) + + if (hookName === "pre-push") { + const dirtyPaths = [...new Set([...stagedPaths, ...unstagedPaths, ...untrackedPaths])] + if (dirtyPaths.length > 0) { + throw new Error( + `pre-push verify requires a clean working tree so it validates the pushed commits, not local WIP. Dirty paths: ${summarizePaths(dirtyPaths)}` + ) + } + + const headTree = runGit(["rev-parse", "HEAD^{tree}"], { cwd: repoRoot }).trim() + return `head:${headTree}` + } + + if (unstagedPaths.length > 0 || untrackedPaths.length > 0) { + const dirtyPaths = [...new Set([...unstagedPaths, ...untrackedPaths])] + throw new Error( + `pre-commit verify requires staged-only commit-ready changes with no extra local WIP. Dirty paths: ${summarizePaths(dirtyPaths)}` + ) + } + + const indexTree = runGit(["write-tree"], { cwd: repoRoot }).trim() + const stagedFingerprint = + stagedPaths.length > 0 ? createHash("sha256").update(stagedPaths.join("\0")).digest("hex") : "clean" + return `index:${indexTree}:${stagedFingerprint}` +} + +function readStamp(stampPath) { + if (!existsSync(stampPath)) return undefined + + try { + return JSON.parse(readFileSync(stampPath, "utf8")) + } catch { + return undefined + } +} + +function writeStamp(stampPath, payload) { + mkdirSync(path.dirname(stampPath), { recursive: true }) + writeFileSync(stampPath, `${JSON.stringify(payload, null, 2)}\n`, "utf8") +} + +const repoRoot = resolveRepoRoot() +const hookName = process.argv[2] ?? "manual" +const fingerprint = resolveHookTarget(repoRoot, hookName) +const stampPath = resolveGitPath(repoRoot, "opencode-codex-auth/verify-stamp.json") +const stamp = readStamp(stampPath) + +if (stamp?.fingerprint === fingerprint) { + console.log(`Skipping local verify for ${hookName}; current tree already passed npm run verify.`) + process.exit(0) +} + +console.log(`Running npm run verify for ${hookName}...`) + +const verifyResult = spawnSync("npm", ["run", "verify"], { + cwd: repoRoot, + stdio: "inherit" +}) + +if (verifyResult.status !== 0) { + process.exit(verifyResult.status ?? 1) +} + +writeStamp(stampPath, { + fingerprint: resolveHookTarget(repoRoot, hookName), + hookName, + verifiedAt: new Date().toISOString() +}) + +console.log(`Local verify passed for ${hookName}.`) diff --git a/scripts/install-git-hooks.mjs b/scripts/install-git-hooks.mjs new file mode 100644 index 0000000..347c459 --- /dev/null +++ b/scripts/install-git-hooks.mjs @@ -0,0 +1,29 @@ +import { existsSync } from "node:fs" +import { dirname, resolve } from "node:path" +import { fileURLToPath } from "node:url" +import { spawnSync } from "node:child_process" + +const repoRoot = resolve(dirname(fileURLToPath(import.meta.url)), "..") +const gitDir = resolve(repoRoot, ".git") +const hooksDir = resolve(repoRoot, ".githooks") + +if (!existsSync(gitDir)) { + console.error("Not a git repository; skipping hook installation.") + process.exit(1) +} + +if (!existsSync(hooksDir)) { + console.error(`Missing hooks directory: ${hooksDir}`) + process.exit(1) +} + +const result = spawnSync("git", ["config", "core.hooksPath", ".githooks"], { + cwd: repoRoot, + stdio: "inherit" +}) + +if (result.status !== 0) { + process.exit(result.status ?? 1) +} + +console.log("Installed local git hooks from .githooks/") diff --git a/scripts/test-mocking-allowlist.json b/scripts/test-mocking-allowlist.json index 84cdc6c..37adee5 100644 --- a/scripts/test-mocking-allowlist.json +++ b/scripts/test-mocking-allowlist.json @@ -92,7 +92,7 @@ "stubGlobal": 0 }, "test/openai-loader-fetch.prompt-cache-key.core-behavior.test.ts": { - "doMock": 4, + "doMock": 5, "mock": 0, "stubGlobal": 0 }, diff --git a/test/codex-native-config-variants.test.ts b/test/codex-native-config-variants.test.ts index e1e245e..f1fbe7d 100644 --- a/test/codex-native-config-variants.test.ts +++ b/test/codex-native-config-variants.test.ts @@ -8,15 +8,16 @@ import { resetStubbedGlobals, stubGlobalForTest } from "./helpers/mock-policy" type VariantConfigMap = Record> +type ModelConfigEntry = { + name?: string + api?: { id?: string } + variants: VariantConfigMap +} + type PluginConfigLike = { provider: { openai: { - models: Record< - string, - { - variants: VariantConfigMap - } - > + models: Record } } } @@ -227,4 +228,115 @@ describe("codex-native config variants", () => { expect(config).toEqual(baseline) }) }) + + it("adds selectable custom models to provider config when their targets exist", async () => { + await withIsolatedHome(async () => { + await seedAuthFixture(Date.now() + 60_000) + stubGlobalForTest( + "fetch", + vi.fn(async (url: string | URL | Request) => { + const endpoint = + typeof url === "string" ? url : url instanceof URL ? url.toString() : new URL(url.url).toString() + if (endpoint.includes("/backend-api/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.4", + context_window: 272000, + input_modalities: ["text", "image"], + supported_reasoning_levels: [{ effort: "medium" }, { effort: "high" }] + } + ] + }), + { status: 200 } + ) + } + if (endpoint.includes("raw.githubusercontent.com/openai/codex/")) { + return new Response(JSON.stringify({ models: [] }), { status: 200 }) + } + return new Response("ok", { status: 200 }) + }) + ) + + const { CodexAuthPlugin } = await import("../lib/codex-native") + const hooks = await CodexAuthPlugin({} as never, { + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.4", + name: "My Fast Codex", + reasoningSummary: "concise" + } + } + }) + const config = makeConfig() + + await hooks.config?.(config as never) + + expect(config.provider.openai.models["openai/my-fast-codex"]).toBeDefined() + expect(config.provider.openai.models["openai/my-fast-codex"].name).toBe("My Fast Codex") + expect(config.provider.openai.models["openai/my-fast-codex"].api).toMatchObject({ + id: "gpt-5.4" + }) + expect(config.provider.openai.models["openai/my-fast-codex"].variants.high).toEqual({ + reasoningEffort: "high", + reasoningSummary: "auto", + include: ["reasoning.encrypted_content"] + }) + }) + }) + + it("warns and removes stale custom model entries when their target is missing", async () => { + await withIsolatedHome(async () => { + await seedAuthFixture(Date.now() + 60_000) + stubGlobalForTest( + "fetch", + vi.fn(async (url: string | URL | Request) => { + const endpoint = + typeof url === "string" ? url : url instanceof URL ? url.toString() : new URL(url.url).toString() + if (endpoint.includes("/backend-api/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.4", + context_window: 272000, + input_modalities: ["text", "image"] + } + ] + }), + { status: 200 } + ) + } + if (endpoint.includes("raw.githubusercontent.com/openai/codex/")) { + return new Response(JSON.stringify({ models: [] }), { status: 200 }) + } + return new Response("ok", { status: 200 }) + }) + ) + + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) + try { + const { CodexAuthPlugin } = await import("../lib/codex-native") + const hooks = await CodexAuthPlugin({} as never, { + customModels: { + "openai/missing-fast-codex": { + targetModel: "gpt-5.3-codex" + } + } + }) + const config = makeConfig() + config.provider.openai.models["openai/missing-fast-codex"] = { variants: {} } + + await hooks.config?.(config as never) + + expect(config.provider.openai.models["openai/missing-fast-codex"]).toBeUndefined() + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("customModels.openai/missing-fast-codex.targetModel") + ) + } finally { + warnSpy.mockRestore() + } + }) + }) }) diff --git a/test/codex-native-request-transform.test.ts b/test/codex-native-request-transform.test.ts index b8efae6..f197069 100644 --- a/test/codex-native-request-transform.test.ts +++ b/test/codex-native-request-transform.test.ts @@ -533,6 +533,113 @@ describe("compat sanitizer wrapper", () => { }) }) +describe("reasoning summary validation diagnostics", () => { + it("reports invalid explicit request reasoning summary sources", async () => { + const request = new Request("https://chatgpt.com/backend-api/codex/responses", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + model: "gpt-5.3-codex", + reasoning: { + effort: "high", + summary: "experimental" + }, + input: "hello" + }) + }) + + const transformed = await transformOutboundRequestPayload({ + request, + stripReasoningReplayEnabled: false, + remapDeveloperMessagesToUserEnabled: false, + compatInputSanitizerEnabled: false, + promptCacheKeyOverrideEnabled: false + }) + + expect(transformed.reasoningSummaryValidation).toEqual({ + actual: "experimental", + source: "request.reasoning.summary", + sourceType: "request_option" + }) + }) + + it("reports invalid internal catalog reasoning summary defaults", async () => { + const request = new Request("https://chatgpt.com/backend-api/codex/responses", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + model: "gpt-5.3-codex", + reasoning: { + effort: "high" + }, + input: "hello" + }) + }) + + const transformed = await transformOutboundRequestPayload({ + request, + stripReasoningReplayEnabled: false, + remapDeveloperMessagesToUserEnabled: false, + compatInputSanitizerEnabled: false, + promptCacheKeyOverrideEnabled: false, + catalogModels: [ + { + slug: "gpt-5.3-codex", + default_reasoning_level: "high", + supports_reasoning_summaries: true, + reasoning_summary_format: "experimental" + } + ] + }) + + expect(transformed.reasoningSummaryValidation).toEqual({ + actual: "experimental", + model: "gpt-5.3-codex", + source: "codexRuntimeDefaults.reasoningSummaryFormat", + sourceType: "catalog_default" + }) + }) + + it("honors custom model reasoningSummary overrides during payload validation", async () => { + const request = new Request("https://chatgpt.com/backend-api/codex/responses", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + model: "gpt-5.3-codex", + reasoning: { + effort: "high" + }, + input: "hello" + }) + }) + + const transformed = await transformOutboundRequestPayload({ + request, + selectedModelSlug: "openai/my-fast-codex", + stripReasoningReplayEnabled: false, + remapDeveloperMessagesToUserEnabled: false, + compatInputSanitizerEnabled: false, + promptCacheKeyOverrideEnabled: false, + catalogModels: [ + { + slug: "gpt-5.3-codex", + default_reasoning_level: "high", + supports_reasoning_summaries: true, + reasoning_summary_format: "experimental" + } + ], + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + reasoningSummary: "none" + } + } + }) + + expect(transformed.reasoningSummaryValidation).toBeUndefined() + }) +}) + describe("catalog-scoped payload cleanup", () => { const previousCatalogModels = [ { diff --git a/test/codex-native-spoof-mode.test.ts b/test/codex-native-spoof-mode.test.ts index a0d4b89..e973d1f 100644 --- a/test/codex-native-spoof-mode.test.ts +++ b/test/codex-native-spoof-mode.test.ts @@ -62,7 +62,7 @@ describe("codex-native spoof + params hooks", () => { expect(output.options.include).toEqual(["web_search_call.action.sources", "reasoning.encrypted_content"]) }) - it("applies model reasoning summary format default verbatim", async () => { + it("drops invalid model reasoning summary format defaults from chat params", async () => { const hooks = await CodexAuthPlugin({} as never) const chatParams = hooks["chat.params"] expect(chatParams).toBeTypeOf("function") @@ -93,7 +93,7 @@ describe("codex-native spoof + params hooks", () => { } await chatParams?.(input, output) - expect(output.options.reasoningSummary).toBe("experimental") + expect(output.options.reasoningSummary).toBeUndefined() }) it("treats model reasoning summary format none as disabled", async () => { @@ -465,11 +465,11 @@ describe("codex-native spoof + params hooks", () => { expect(output.options.instructions).toBe("Base Strict voice") }) - it("honors thinking_summaries false override", async () => { + it("honors reasoning summaries false override", async () => { const hooks = await CodexAuthPlugin({} as never, { behaviorSettings: { global: { - thinkingSummaries: false + reasoningSummaries: false } } }) @@ -507,15 +507,68 @@ describe("codex-native spoof + params hooks", () => { expect(output.options.reasoningSummary).toBeUndefined() }) - it("prefers per-model thinking summaries over global setting", async () => { + it("applies canonical reasoningSummary, textVerbosity, include, and parallelToolCalls config overrides", async () => { const hooks = await CodexAuthPlugin({} as never, { behaviorSettings: { global: { - thinkingSummaries: true + reasoningEffort: "medium", + reasoningSummary: "concise", + textVerbosity: "high", + include: ["file_search_call.results"], + parallelToolCalls: false + } + } + }) + const chatParams = hooks["chat.params"] + expect(chatParams).toBeTypeOf("function") + + const input = { + sessionID: "ses_canonical_behavior", + agent: "default", + provider: {}, + message: {}, + model: { + id: "gpt-5.3-codex", + api: { id: "gpt-5.3-codex" }, + providerID: "openai", + capabilities: { toolcall: true }, + options: { + codexInstructions: "Catalog instructions", + codexRuntimeDefaults: { + defaultReasoningEffort: "high", + supportsReasoningSummaries: true, + defaultVerbosity: "medium", + supportsParallelToolCalls: true + } + } + } + } as unknown as Parameters>[0] + + const output: any = { + temperature: 0, + topP: 1, + topK: 0, + options: {} + } + + await chatParams?.(input, output) + + expect(output.options.reasoningEffort).toBe("medium") + expect(output.options.reasoningSummary).toBe("concise") + expect(output.options.textVerbosity).toBe("high") + expect(output.options.parallelToolCalls).toBe(false) + expect(output.options.include).toEqual(["file_search_call.results", "reasoning.encrypted_content"]) + }) + + it("prefers per-model reasoning summaries over global setting", async () => { + const hooks = await CodexAuthPlugin({} as never, { + behaviorSettings: { + global: { + reasoningSummaries: true }, perModel: { "gpt-5.3-codex": { - thinkingSummaries: false + reasoningSummaries: false } } } @@ -554,17 +607,17 @@ describe("codex-native spoof + params hooks", () => { expect(output.options.reasoningSummary).toBeUndefined() }) - it("prefers per-variant thinking summaries over per-model and global", async () => { + it("prefers per-variant reasoning summaries over per-model and global", async () => { const hooks = await CodexAuthPlugin({} as never, { behaviorSettings: { global: { - thinkingSummaries: false + reasoningSummaries: false }, perModel: { "gpt-5.3-codex": { - thinkingSummaries: false, + reasoningSummaries: false, variants: { - high: { thinkingSummaries: true } + high: { reasoningSummaries: true } } } } @@ -604,6 +657,81 @@ describe("codex-native spoof + params hooks", () => { expect(output.options.reasoningSummary).toBe("auto") }) + it("applies custom selectable model defaults and lets perModel custom slug overrides win", async () => { + const hooks = await CodexAuthPlugin({} as never, { + behaviorSettings: { + global: { + reasoningSummary: "auto", + textVerbosity: "low", + parallelToolCalls: true + }, + perModel: { + "openai/my-fast-codex": { + reasoningSummary: "detailed", + textVerbosity: "high" + }, + "gpt-5.3-codex": { + reasoningSummary: "none", + serviceTier: "priority" + } + } + } + }) + const chatParams = hooks["chat.params"] + expect(chatParams).toBeTypeOf("function") + + const input = { + sessionID: "ses_custom_model", + agent: "default", + provider: {}, + message: { variant: "high" }, + model: { + id: "openai/my-fast-codex", + api: { id: "gpt-5.3-codex" }, + providerID: "openai", + capabilities: { toolcall: true }, + options: { + codexRuntimeDefaults: { + defaultReasoningEffort: "medium", + supportsReasoningSummaries: true, + defaultVerbosity: "medium", + supportsParallelToolCalls: false + }, + codexCustomModelConfig: { + slug: "openai/my-fast-codex", + targetModel: "gpt-5.3-codex", + reasoningEffort: "low", + reasoningSummary: "concise", + textVerbosity: "medium", + parallelToolCalls: true, + variants: { + high: { + reasoningEffort: "high", + reasoningSummary: "detailed", + serviceTier: "flex" + } + } + } + } + } + } as unknown as Parameters>[0] + + const output: any = { + temperature: 0, + topP: 1, + topK: 0, + options: {} + } + + await chatParams?.(input, output) + + expect(output.options.reasoningEffort).toBe("high") + expect(output.options.reasoningSummary).toBe("detailed") + expect(output.options.textVerbosity).toBe("high") + expect(output.options.parallelToolCalls).toBe(true) + expect(output.options.serviceTier).toBe("flex") + }) + it("applies global verbosity override when enabled", async () => { const hooks = await CodexAuthPlugin({} as never, { behaviorSettings: { diff --git a/test/config-file-loading.test.ts b/test/config-file-loading.test.ts index de7a223..7b6b596 100644 --- a/test/config-file-loading.test.ts +++ b/test/config-file-loading.test.ts @@ -38,7 +38,7 @@ describe("config file loading", () => { orchestratorSubagents: true }, global: { - thinkingSummaries: true, + reasoningSummaries: true, personality: "friendly", verbosityEnabled: true, verbosity: "high", @@ -47,14 +47,14 @@ describe("config file loading", () => { perModel: { "gpt-5.3-codex": { personality: "pirate", - thinkingSummaries: false, + reasoningSummaries: false, verbosityEnabled: false, verbosity: "default", serviceTier: "flex", variants: { high: { personality: "strict", - thinkingSummaries: true, + reasoningSummaries: true, verbosityEnabled: true, verbosity: "medium", serviceTier: "priority" @@ -86,24 +86,102 @@ describe("config file loading", () => { expect(loaded.rotationStrategy).toBe("hybrid") expect(loaded.promptCacheKeyStrategy).toBe("project") expect(loaded.mode).toBe("codex") - expect(loaded.behaviorSettings?.global?.thinkingSummaries).toBe(true) + expect(loaded.behaviorSettings?.global?.reasoningSummary).toBe("auto") + expect(loaded.behaviorSettings?.global?.reasoningSummaries).toBe(true) + expect(loaded.behaviorSettings?.global?.textVerbosity).toBe("high") expect(loaded.behaviorSettings?.global?.verbosityEnabled).toBe(true) expect(loaded.behaviorSettings?.global?.verbosity).toBe("high") expect(loaded.behaviorSettings?.global?.serviceTier).toBe("priority") expect(loaded.behaviorSettings?.global?.personality).toBe("friendly") expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.personality).toBe("pirate") - expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.thinkingSummaries).toBe(false) + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.reasoningSummary).toBe("none") + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.reasoningSummaries).toBe(false) + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.textVerbosity).toBe("none") expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.verbosityEnabled).toBe(false) - expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.verbosity).toBe("default") + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.verbosity).toBeUndefined() expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.serviceTier).toBe("flex") expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.personality).toBe("strict") - expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.thinkingSummaries).toBe(true) + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.reasoningSummary).toBe("auto") + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.reasoningSummaries).toBe(true) + expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.textVerbosity).toBe("medium") expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.verbosityEnabled).toBe(true) expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.verbosity).toBe("medium") expect(loaded.behaviorSettings?.perModel?.["gpt-5.3-codex"]?.variants?.high?.serviceTier).toBe("priority") expect(loaded.personality).toBe("friendly") }) + it("loads canonical codex-style model behavior keys", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const filePath = path.join(root, "codex-config.jsonc") + await fs.writeFile( + filePath, + JSON.stringify({ + global: { + reasoningEffort: "medium", + reasoningSummary: "concise", + textVerbosity: "high", + serviceTier: "auto", + include: ["file_search_call.results"], + parallelToolCalls: false + } + }), + "utf8" + ) + + const loaded = loadConfigFile({ filePath }) + + expect(loaded.behaviorSettings?.global).toMatchObject({ + reasoningEffort: "medium", + reasoningSummary: "concise", + textVerbosity: "high", + serviceTier: "auto", + include: ["file_search_call.results"], + parallelToolCalls: false + }) + }) + + it("loads custom selectable models from codex-config.jsonc", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const filePath = path.join(root, "codex-config.jsonc") + await fs.writeFile( + filePath, + JSON.stringify({ + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + name: "My Fast Codex", + reasoningSummary: "concise", + textVerbosity: "medium", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + } + }), + "utf8" + ) + + const loaded = loadConfigFile({ filePath }) + + expect(loaded.customModels?.["openai/my-fast-codex"]).toEqual({ + targetModel: "gpt-5.3-codex", + name: "My Fast Codex", + reasoningSummary: "concise", + reasoningSummaries: true, + textVerbosity: "medium", + verbosityEnabled: true, + verbosity: "medium", + variants: { + high: { + reasoningSummary: "detailed", + reasoningSummaries: true + } + } + }) + }) + it("ignores top-level mode field in config file", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const filePath = path.join(root, "codex-config.json") @@ -159,11 +237,11 @@ describe("config file loading", () => { } }) - it("loads codex-config.json from XDG config home", async () => { + it("loads codex-config.jsonc from XDG config home", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const configDir = path.join(root, "opencode") await fs.mkdir(configDir, { recursive: true }) - const filePath = path.join(configDir, "codex-config.json") + const filePath = path.join(configDir, "codex-config.jsonc") await fs.writeFile(filePath, JSON.stringify({ quiet: true }), "utf8") const loaded = loadConfigFile({ env: { XDG_CONFIG_HOME: root } }) @@ -199,15 +277,15 @@ describe("config file loading", () => { expect(result.created).toBe(true) expect(raw).toContain('// default: "native"') expect(raw).toContain('// default: "sticky"') - expect(raw).toContain("// Thinking summaries behavior:") - expect(raw).toContain("// Text verbosity behavior:") + expect(raw).toContain('// options: "auto" | "concise" | "detailed" | "none"') + expect(raw).toContain('// options: "default" | "low" | "medium" | "high" | "none"') expect(written).toEqual(DEFAULT_CODEX_CONFIG) }) it("does not overwrite existing codex config by default", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const configDir = path.join(root, "opencode") - const filePath = path.join(configDir, "codex-config.json") + const filePath = path.join(configDir, "codex-config.jsonc") await fs.mkdir(configDir, { recursive: true }) await fs.writeFile(filePath, JSON.stringify({ debug: true }), "utf8") @@ -221,7 +299,7 @@ describe("config file loading", () => { it("overwrites codex config when requested", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const configDir = path.join(root, "opencode") - const filePath = path.join(configDir, "codex-config.json") + const filePath = path.join(configDir, "codex-config.jsonc") await fs.mkdir(configDir, { recursive: true }) await fs.writeFile(filePath, JSON.stringify({ debug: true }), "utf8") @@ -240,7 +318,7 @@ describe("config file loading", () => { it("enforces 0600 mode when overwriting codex config", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const configDir = path.join(root, "opencode") - const filePath = path.join(configDir, "codex-config.json") + const filePath = path.join(configDir, "codex-config.jsonc") await fs.mkdir(configDir, { recursive: true }) await fs.writeFile(filePath, JSON.stringify({ debug: true }), { encoding: "utf8", mode: 0o644 }) await fs.chmod(filePath, 0o644) @@ -277,6 +355,88 @@ describe("config file loading", () => { expect(loaded.spoofMode).toBe("codex") }) + it("warns and maps deprecated thinkingSummaries keys to reasoningSummaries", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const filePath = path.join(root, "codex-config.json") + await fs.writeFile( + filePath, + JSON.stringify({ + global: { + thinkingSummaries: true + } + }), + "utf8" + ) + + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) + try { + const loaded = loadConfigFile({ filePath }) + expect(loaded.behaviorSettings?.global?.reasoningSummary).toBe("auto") + expect(loaded.behaviorSettings?.global?.reasoningSummaries).toBe(true) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining("Deprecated config key(s)")) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining("global.thinkingSummaries")) + } finally { + warnSpy.mockRestore() + } + }) + + it("loads legacy codex-config.json when canonical codex-config.jsonc is absent", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const configDir = path.join(root, "opencode") + const legacyPath = path.join(configDir, "codex-config.json") + await fs.mkdir(configDir, { recursive: true }) + await fs.writeFile(legacyPath, JSON.stringify({ quiet: true }), "utf8") + + const loaded = loadConfigFile({ env: { XDG_CONFIG_HOME: root } }) + expect(loaded.quietMode).toBe(true) + }) + + it("quarantines legacy codex-config.json when both config files exist", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const configDir = path.join(root, "opencode") + const canonicalPath = path.join(configDir, "codex-config.jsonc") + const legacyPath = path.join(configDir, "codex-config.json") + await fs.mkdir(configDir, { recursive: true }) + await fs.writeFile(canonicalPath, JSON.stringify({ quiet: true }), "utf8") + await fs.writeFile(legacyPath, JSON.stringify({ quiet: false }), "utf8") + + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) + try { + const loaded = loadConfigFile({ env: { XDG_CONFIG_HOME: root } }) + expect(loaded.quietMode).toBe(true) + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("Found both codex-config.jsonc and codex-config.json") + ) + await expect(fs.access(legacyPath)).rejects.toThrow() + const quarantineDir = path.join(configDir, "quarantine") + const quarantined = await fs.readdir(quarantineDir) + expect(quarantined.some((name) => name.startsWith("codex-config.json."))).toBe(true) + } finally { + warnSpy.mockRestore() + } + }) + + it("falls back to legacy codex-config.json when canonical codex-config.jsonc is invalid", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) + const configDir = path.join(root, "opencode") + const canonicalPath = path.join(configDir, "codex-config.jsonc") + const legacyPath = path.join(configDir, "codex-config.json") + await fs.mkdir(configDir, { recursive: true }) + await fs.writeFile(canonicalPath, '{"quiet": true,', "utf8") + await fs.writeFile(legacyPath, JSON.stringify({ quiet: false }), "utf8") + + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) + try { + const loaded = loadConfigFile({ env: { XDG_CONFIG_HOME: root } }) + expect(loaded.quietMode).toBe(false) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining(`Failed to read codex-config at ${canonicalPath}`)) + await expect(fs.access(legacyPath)).resolves.toBeUndefined() + await expect(fs.access(path.join(configDir, "quarantine"))).rejects.toThrow() + } finally { + warnSpy.mockRestore() + } + }) + it("ignores config file when known fields have invalid types", async () => { const root = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-codex-auth-config-file-")) const filePath = path.join(root, "codex-config.json") diff --git a/test/config-getters.test.ts b/test/config-getters.test.ts index ea71fc9..9253245 100644 --- a/test/config-getters.test.ts +++ b/test/config-getters.test.ts @@ -6,9 +6,12 @@ import { getCollaborationProfileEnabled, getCodexCompactionOverrideEnabled, getCompatInputSanitizerEnabled, + getCustomModels, getOrchestratorSubagentsEnabled, getProactiveRefreshBufferMs, getProactiveRefreshEnabled, + getReasoningSummaryOverride, + getThinkingSummariesOverride, getRemapDeveloperMessagesToUserEnabled, resolveConfig } from "../lib/config" @@ -41,7 +44,7 @@ describe("config", () => { const fileBehavior = { global: { personality: "balanced", - thinkingSummaries: false + reasoningSummaries: false } } as const @@ -53,19 +56,102 @@ describe("config", () => { buildResolvedBehaviorSettings({ fileBehavior, envPersonality: "concise", - envThinkingSummaries: undefined, + envReasoningSummaries: undefined, envVerbosityEnabled: undefined, envVerbosity: undefined, + envTextVerbosity: undefined, envServiceTier: undefined }) ).toEqual({ global: { personality: "concise", - thinkingSummaries: false + reasoningSummaries: false } }) }) + it("maps env reasoning summaries, text verbosity, and service tier into canonical behavior settings", () => { + expect( + buildResolvedBehaviorSettings({ + fileBehavior: undefined, + envPersonality: undefined, + envReasoningSummaries: false, + envVerbosityEnabled: undefined, + envVerbosity: undefined, + envTextVerbosity: "high", + envServiceTier: "priority" + }) + ).toEqual({ + global: { + reasoningSummary: "none", + reasoningSummaries: false, + textVerbosity: "high", + verbosityEnabled: true, + verbosity: "high", + serviceTier: "priority" + } + }) + }) + + it("maps legacy verbosity env toggles when canonical text verbosity is unset", () => { + expect( + buildResolvedBehaviorSettings({ + fileBehavior: undefined, + envPersonality: undefined, + envReasoningSummaries: undefined, + envVerbosityEnabled: true, + envVerbosity: undefined, + envTextVerbosity: undefined, + envServiceTier: undefined + }) + ).toEqual({ + global: { + textVerbosity: "default", + verbosityEnabled: true, + verbosity: "default" + } + }) + }) + + it("deep-clones custom models and surfaces canonical reasoning summary getters", () => { + const cfg = resolveConfig({ + env: {}, + file: { + behaviorSettings: { + global: { + reasoningSummary: "concise" + } + }, + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + } + } + }) + + const customModels = getCustomModels(cfg) + expect(customModels).toEqual({ + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + }) + expect(customModels).not.toBe(cfg.customModels) + expect(customModels?.["openai/my-fast-codex"]).not.toBe(cfg.customModels?.["openai/my-fast-codex"]) + expect(getReasoningSummaryOverride(cfg)).toBe("concise") + expect(getThinkingSummariesOverride(cfg)).toBe(true) + }) + it("clamps and floors buffer", () => { expect(getProactiveRefreshBufferMs({ proactiveRefreshBufferMs: -500 })).toBe(0) expect(getProactiveRefreshBufferMs({ proactiveRefreshBufferMs: 1234.56 })).toBe(1234) diff --git a/test/config-loading-resolve.test.ts b/test/config-loading-resolve.test.ts index a4bf95e..d48e66b 100644 --- a/test/config-loading-resolve.test.ts +++ b/test/config-loading-resolve.test.ts @@ -1,10 +1,11 @@ -import { describe, expect, it } from "vitest" +import { describe, expect, it, vi } from "vitest" import { getBehaviorSettings, getCollaborationProfileEnabled, getCodexCompactionOverrideEnabled, getCompatInputSanitizerEnabled, + getCustomModels, getDebugEnabled, getHeaderSnapshotBodiesEnabled, getHeaderSnapshotsEnabled, @@ -15,10 +16,10 @@ import { getPidOffsetEnabled, getPromptCacheKeyStrategy, getQuietMode, + getReasoningSummariesOverride, getRemapDeveloperMessagesToUserEnabled, getRotationStrategy, getSpoofMode, - getThinkingSummariesOverride, resolveConfig } from "../lib/config" @@ -236,7 +237,7 @@ describe("config loading", () => { behaviorSettings: { global: { personality: "friendly", - thinkingSummaries: false + reasoningSummaries: false }, perModel: { "gpt-5.3-codex": { @@ -248,7 +249,7 @@ describe("config loading", () => { }) expect(getPersonality(cfg)).toBe("friendly") - expect(getThinkingSummariesOverride(cfg)).toBe(false) + expect(getReasoningSummariesOverride(cfg)).toBe(false) expect(getBehaviorSettings(cfg)?.perModel?.["gpt-5.3-codex"]?.personality).toBe("pragmatic") }) @@ -266,19 +267,82 @@ describe("config loading", () => { expect(getBehaviorSettings(cfg)?.global?.personality).toBe("pirate") }) - it("lets env thinking summaries override file behavior settings", () => { + it("lets env reasoning summaries override file behavior settings", () => { + const cfg = resolveConfig({ + env: { OPENCODE_OPENAI_MULTI_REASONING_SUMMARIES: "1" }, + file: { + behaviorSettings: { + global: { + reasoningSummaries: false + } + } + } + }) + + expect(getReasoningSummariesOverride(cfg)).toBe(true) + }) + + it("preserves custom selectable model definitions from file config", () => { + const cfg = resolveConfig({ + env: {}, + file: { + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + reasoningSummary: "concise", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + } + } + }) + + expect(getCustomModels(cfg)).toEqual({ + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + reasoningSummary: "concise", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + }) + }) + + it("prefers canonical reasoningSummary over deprecated boolean aliases in the same scope", () => { const cfg = resolveConfig({ - env: { OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES: "1" }, + env: {}, file: { behaviorSettings: { global: { + reasoningSummary: "detailed", + reasoningSummaries: false, thinkingSummaries: false } } } }) - expect(getThinkingSummariesOverride(cfg)).toBe(true) + expect(getBehaviorSettings(cfg)?.global?.reasoningSummary).toBe("detailed") + expect(getReasoningSummariesOverride(cfg)).toBe(true) + }) + + it("accepts deprecated env thinking summaries alias with a warning", () => { + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) + try { + const cfg = resolveConfig({ + env: { OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES: "0" } + }) + + expect(getReasoningSummariesOverride(cfg)).toBe(false) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining("OPENCODE_OPENAI_MULTI_THINKING_SUMMARIES")) + } finally { + warnSpy.mockRestore() + } }) it("parses verbosity overrides from env", () => { @@ -289,8 +353,9 @@ describe("config loading", () => { } }) + expect(getBehaviorSettings(cfg)?.global?.textVerbosity).toBe("none") expect(getBehaviorSettings(cfg)?.global?.verbosityEnabled).toBe(false) - expect(getBehaviorSettings(cfg)?.global?.verbosity).toBe("low") + expect(getBehaviorSettings(cfg)?.global?.verbosity).toBeUndefined() }) it("parses service tier override from env", () => { diff --git a/test/config-schema.test.ts b/test/config-schema.test.ts index 3e73bf9..fa0b0a0 100644 --- a/test/config-schema.test.ts +++ b/test/config-schema.test.ts @@ -8,21 +8,48 @@ describe("codex config schema", () => { it("includes serviceTier for model behavior and model configs", () => { const schema = JSON.parse(readFileSync(schemaPath, "utf8")) as { $defs?: { + serviceTier?: { + enum?: string[] + } modelBehavior?: { - properties?: Record + properties?: Record } modelConfig?: { allOf?: Array<{ - properties?: Record + properties?: Record }> } } } - const behaviorServiceTier = schema.$defs?.modelBehavior?.properties?.serviceTier?.enum - const modelConfigServiceTier = schema.$defs?.modelConfig?.allOf?.[1]?.properties?.serviceTier?.enum + const behaviorServiceTier = + schema.$defs?.serviceTier?.enum ?? schema.$defs?.modelBehavior?.properties?.serviceTier?.enum + const modelConfigServiceTier = + schema.$defs?.serviceTier?.enum ?? schema.$defs?.modelConfig?.allOf?.[1]?.properties?.serviceTier?.enum + + expect(behaviorServiceTier).toEqual(["auto", "priority", "flex", "default"]) + expect(modelConfigServiceTier).toEqual(["auto", "priority", "flex", "default"]) + }) + + it("defines customModels with required targetModel", () => { + const schema = JSON.parse(readFileSync(schemaPath, "utf8")) as { + properties?: { + customModels?: { + additionalProperties?: { + $ref?: string + } + } + } + $defs?: { + customModel?: { + required?: string[] + properties?: Record + } + } + } - expect(behaviorServiceTier).toEqual(["default", "priority", "flex"]) - expect(modelConfigServiceTier).toEqual(["default", "priority", "flex"]) + expect(schema.properties?.customModels?.additionalProperties?.$ref).toBe("#/$defs/customModel") + expect(schema.$defs?.customModel?.required).toContain("targetModel") + expect(schema.$defs?.customModel?.properties).toHaveProperty("variants") }) }) diff --git a/test/config-validation.test.ts b/test/config-validation.test.ts index ce5d5d8..0ee3514 100644 --- a/test/config-validation.test.ts +++ b/test/config-validation.test.ts @@ -1,6 +1,11 @@ import { describe, expect, it } from "vitest" -import { validateConfigFileObject } from "../lib/config" +import { + parseConfigFileObject, + resolveDefaultConfigPath, + resolveLegacyDefaultConfigPath, + validateConfigFileObject +} from "../lib/config" describe("config validation", () => { it("returns actionable issues for invalid known fields", () => { @@ -17,4 +22,94 @@ describe("config validation", () => { expect(result.issues[0]).toContain("runtime.promptCacheKeyStrategy") expect(result.issues[1]).toContain("global.serviceTier") }) + + it("reports precise custom model validation issues", () => { + const result = validateConfigFileObject({ + customModels: { + "openai/my-fast-codex": { + targetModel: 42, + name: false, + include: ["bad_include"], + parallelToolCalls: "yes", + variants: [] + } + } + }) + + expect(result.valid).toBe(false) + expect(result.issues).toEqual( + expect.arrayContaining([ + expect.stringContaining("customModels.openai/my-fast-codex.targetModel"), + expect.stringContaining("customModels.openai/my-fast-codex.name"), + expect.stringContaining("customModels.openai/my-fast-codex.include"), + expect.stringContaining("customModels.openai/my-fast-codex.parallelToolCalls"), + expect.stringContaining("customModels.openai/my-fast-codex.variants") + ]) + ) + }) + + it("normalizes canonical config fields and custom model aliases", () => { + const parsed = parseConfigFileObject({ + global: { + textVerbosity: "HIGH", + serviceTier: "default", + include: ["FILE_SEARCH_CALL.RESULTS", "bad"], + parallelToolCalls: false + }, + customModels: { + " OpenAI/My-Fast-Codex ": { + targetModel: " gpt-5.3-codex ", + thinkingSummaries: false, + verbosityEnabled: true, + verbosity: "medium", + serviceTier: "default", + include: ["MESSAGE.OUTPUT_TEXT.LOGPROBS", "bad"], + parallelToolCalls: true, + variants: { + high: { + reasoningSummaries: true, + verbosityEnabled: false + } + } + } + } + }) + + expect(parsed.behaviorSettings?.global).toEqual({ + textVerbosity: "high", + verbosityEnabled: true, + verbosity: "high", + serviceTier: "auto", + include: ["file_search_call.results"], + parallelToolCalls: false + }) + expect(parsed.customModels?.["openai/my-fast-codex"]).toEqual({ + targetModel: "gpt-5.3-codex", + reasoningSummary: "none", + reasoningSummaries: false, + textVerbosity: "medium", + verbosityEnabled: true, + verbosity: "medium", + serviceTier: "auto", + include: ["message.output_text.logprobs"], + parallelToolCalls: true, + variants: { + high: { + reasoningSummary: "auto", + reasoningSummaries: true, + textVerbosity: "none", + verbosityEnabled: false + } + } + }) + }) + + it("resolves canonical and legacy config paths from XDG config home", () => { + expect(resolveDefaultConfigPath({ XDG_CONFIG_HOME: "/tmp/config-root" })).toBe( + "/tmp/config-root/opencode/codex-config.jsonc" + ) + expect(resolveLegacyDefaultConfigPath({ XDG_CONFIG_HOME: "/tmp/config-root" })).toBe( + "/tmp/config-root/opencode/codex-config.json" + ) + }) }) diff --git a/test/fetch-orchestrator.snapshots-and-redirects.test.ts b/test/fetch-orchestrator.snapshots-and-redirects.test.ts index e229ea2..ac7d857 100644 --- a/test/fetch-orchestrator.snapshots-and-redirects.test.ts +++ b/test/fetch-orchestrator.snapshots-and-redirects.test.ts @@ -1,4 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest" +import { PluginFatalError } from "../lib/fatal-errors" import { FetchOrchestrator } from "../lib/fetch-orchestrator" import { resetStubbedGlobals, stubGlobalForTest } from "./helpers/mock-policy" @@ -100,6 +101,60 @@ describe("FetchOrchestrator snapshots and redirect policy", () => { expect(fetchMock).toHaveBeenCalledTimes(1) }) + it("propagates PluginFatalError from onAttemptRequest", async () => { + const acquireAuth = vi.fn(async () => ({ + access: "token_abc", + identityKey: "id1", + accountId: "acc1" + })) + const setCooldown = vi.fn<(identityKey: string, cooldownUntil: number) => Promise>(async () => {}) + const fetchMock = vi.fn(async () => new Response("OK", { status: 200 })) + stubGlobalForTest("fetch", fetchMock) + + const orch = new FetchOrchestrator({ + acquireAuth, + setCooldown, + onAttemptRequest: async () => { + throw new PluginFatalError({ + message: "fatal request prep", + type: "fatal_request_prep", + param: "request.reasoning.summary", + source: "request.reasoning.summary" + }) + } + }) + + await expect(orch.execute("https://api.com")).rejects.toMatchObject({ + message: "fatal request prep", + type: "fatal_request_prep", + source: "request.reasoning.summary" + }) + expect(fetchMock).not.toHaveBeenCalled() + }) + + it("continues request execution when onAttemptRequest throws a non-fatal error", async () => { + const acquireAuth = vi.fn(async () => ({ + access: "token_abc", + identityKey: "id1", + accountId: "acc1" + })) + const setCooldown = vi.fn<(identityKey: string, cooldownUntil: number) => Promise>(async () => {}) + const fetchMock = vi.fn(async () => new Response("OK", { status: 200 })) + stubGlobalForTest("fetch", fetchMock) + + const orch = new FetchOrchestrator({ + acquireAuth, + setCooldown, + onAttemptRequest: async () => { + throw new Error("debug hook failed") + } + }) + + const response = await orch.execute("https://api.com") + expect(response.status).toBe(200) + expect(fetchMock).toHaveBeenCalledTimes(1) + }) + it("provides standardized failover reason codes to attempt hooks", async () => { const auths = [ { access: "a1", identityKey: "id1", accountId: "acc1" }, diff --git a/test/installer-cli.test.ts b/test/installer-cli.test.ts index 6ba862d..e9d2435 100644 --- a/test/installer-cli.test.ts +++ b/test/installer-cli.test.ts @@ -53,7 +53,7 @@ describe("installer cli", () => { const config = JSON.parse(await fs.readFile(configPath, "utf8")) as { plugin: string[] } expect(config.plugin).toContain("@iam-brain/opencode-codex-auth@latest") const codexConfig = parseConfigJsonWithComments( - await fs.readFile(path.join(root, "opencode", "codex-config.json"), "utf8") + await fs.readFile(path.join(root, "opencode", "codex-config.jsonc"), "utf8") ) as { runtime?: { mode?: string } } expect(codexConfig.runtime?.mode).toBe("native") diff --git a/test/mode-smoke.test.ts b/test/mode-smoke.test.ts index 0240ed2..5a4a901 100644 --- a/test/mode-smoke.test.ts +++ b/test/mode-smoke.test.ts @@ -96,8 +96,8 @@ describe("mode smoke: native vs codex", () => { expect(codexWithHost.options.instructions).toBe("Catalog Codex Instructions") expect(nativeNoHost.options.instructions).toBe("Catalog Codex Instructions") expect(codexNoHost.options.instructions).toBe("Catalog Codex Instructions") - expect(nativeNoHost.options.reasoningSummary).toBe("experimental") - expect(codexNoHost.options.reasoningSummary).toBe("experimental") + expect(nativeNoHost.options.reasoningSummary).toBeUndefined() + expect(codexNoHost.options.reasoningSummary).toBeUndefined() expect(nativeNoHost.options.textVerbosity).toBe("medium") expect(codexNoHost.options.textVerbosity).toBe("medium") expect(nativeNoHost.options.serviceTier).toBe("priority") diff --git a/test/model-catalog.provider-models.test.ts b/test/model-catalog.provider-models.test.ts index 8327838..fb3a630 100644 --- a/test/model-catalog.provider-models.test.ts +++ b/test/model-catalog.provider-models.test.ts @@ -1,11 +1,12 @@ import { readFile } from "node:fs/promises" -import { describe, expect, it } from "vitest" +import { describe, expect, it, vi } from "vitest" import { applyCodexCatalogToProviderModels, getRuntimeDefaultsForSlug, - parseCatalogResponse + parseCatalogResponse, + resolveInstructionsForModel } from "../lib/model-catalog" function makeBaselineModel(id: string): Record { @@ -231,6 +232,113 @@ describe("model catalog provider model mapping", () => { } }) + it("synthesizes selectable custom models from active catalog targets", () => { + const providerModels: Record> = {} + + applyCodexCatalogToProviderModels({ + providerModels, + catalogModels: [ + { + slug: "gpt-5.3-codex", + display_name: "GPT-5.3 Codex", + context_window: 272000, + input_modalities: ["text", "image"] as const, + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { personality_default: "Default voice" } + }, + default_reasoning_level: "medium", + supported_reasoning_levels: [{ effort: "low" }, { effort: "high" }], + supports_reasoning_summaries: true, + reasoning_summary_format: "auto", + supports_parallel_tool_calls: false, + support_verbosity: true, + default_verbosity: "high" + } + ], + customModels: { + "openai/my-fast-codex": { + targetModel: "gpt-5.3-codex", + name: "My Fast Codex", + reasoningSummary: "concise", + variants: { + high: { + reasoningSummary: "detailed" + } + } + } + } + }) + + expect(providerModels["gpt-5.3-codex"]).toBeDefined() + expect(providerModels["openai/my-fast-codex"]).toMatchObject({ + id: "openai/my-fast-codex", + slug: "openai/my-fast-codex", + model: "openai/my-fast-codex", + name: "My Fast Codex", + displayName: "My Fast Codex", + display_name: "My Fast Codex", + api: { + id: "gpt-5.3-codex" + } + }) + expect(providerModels["openai/my-fast-codex"].options).toMatchObject({ + codexCatalogModel: { + slug: "gpt-5.3-codex" + }, + codexInstructions: "Base Default voice", + codexRuntimeDefaults: { + defaultReasoningEffort: "medium", + supportedReasoningEfforts: ["low", "high"], + supportsReasoningSummaries: true, + reasoningSummaryFormat: "auto", + supportsParallelToolCalls: false, + supportsVerbosity: true, + defaultVerbosity: "high" + }, + codexCustomModelConfig: { + slug: "openai/my-fast-codex", + targetModel: "gpt-5.3-codex", + reasoningSummary: "concise" + } + }) + expect(providerModels["openai/my-fast-codex"].instructions).toBe("Base Default voice") + expect(providerModels["openai/my-fast-codex"].limit).toEqual(providerModels["gpt-5.3-codex"].limit) + expect(providerModels["openai/my-fast-codex"].capabilities).toEqual(providerModels["gpt-5.3-codex"].capabilities) + expect(providerModels["openai/my-fast-codex"].codexRuntimeDefaults).toEqual( + providerModels["gpt-5.3-codex"].codexRuntimeDefaults + ) + expect(providerModels["openai/my-fast-codex"].variants).toEqual({ + low: { reasoningEffort: "low" }, + high: { reasoningEffort: "high", reasoningSummary: "detailed" } + }) + }) + + it("warns and skips custom models whose target is missing from the active catalog", () => { + const providerModels: Record> = {} + const warn = vi.fn() + + applyCodexCatalogToProviderModels({ + providerModels, + catalogModels: [ + { + slug: "gpt-5.3-codex", + context_window: 272000, + input_modalities: ["text"] + } + ], + customModels: { + "openai/missing-target": { + targetModel: "gpt-5.4" + } + }, + warn + }) + + expect(providerModels["openai/missing-target"]).toBeUndefined() + expect(warn).toHaveBeenCalledWith(expect.stringContaining("customModels.openai/missing-target.targetModel")) + }) + it("uses richer catalog display names when provided", () => { const providerModels: Record> = { "gpt-5.4": makeBaselineModel("gpt-5.4") @@ -326,6 +434,27 @@ describe("model catalog provider model mapping", () => { expect((providerModels["gpt-5.4"].options as Record).codexInstructions).toBeUndefined() }) + it("falls back to safe base instructions when rendered templates contain stale bridge markers", () => { + expect( + resolveInstructionsForModel( + { + slug: "gpt-5.4", + context_window: 272000, + base_instructions: "Use the safe base", + model_messages: { + instructions_template: "Use {{ personality }} with multi_tool_use.parallel", + instructions_variables: { + personalities: { + default: "Default tone" + } + } + } + }, + undefined + ) + ).toBe("Use the safe base") + }) + it("clears provider models instead of synthesizing a fallback model set when no catalog is available", () => { const providerModels: Record> = { "gpt-5.4": makeBaselineModel("gpt-5.4"), diff --git a/test/openai-loader-fetch.prompt-cache-key.core-behavior.test.ts b/test/openai-loader-fetch.prompt-cache-key.core-behavior.test.ts index f3ce568..b16e2de 100644 --- a/test/openai-loader-fetch.prompt-cache-key.core-behavior.test.ts +++ b/test/openai-loader-fetch.prompt-cache-key.core-behavior.test.ts @@ -110,6 +110,88 @@ describe("openai loader fetch prompt cache key (core behavior)", () => { expect(outboundAttemptMeta?.selectionRefreshLeaseCount).toBeUndefined() }) + it("returns a synthetic plugin error when a selected catalog default has an invalid reasoning summary format", async () => { + vi.resetModules() + + const auth = { + access: "access-token", + accountId: "acc_123", + identityKey: "acc_123|user@example.com|plus", + email: "user@example.com", + plan: "plus", + accountLabel: "user@example.com (plus)" + } + + const acquireOpenAIAuth = vi.fn(async () => auth) + vi.doMock("../lib/codex-native/acquire-auth", () => ({ + acquireOpenAIAuth + })) + + const { createOpenAIFetchHandler } = await import("../lib/codex-native/openai-loader-fetch") + const { createFetchOrchestratorState } = await import("../lib/fetch-orchestrator") + const { createStickySessionState } = await import("../lib/rotation") + const fetchMock = vi.fn(async () => new Response("ok", { status: 200 })) + stubGlobalForTest("fetch", fetchMock) + + const handler = createOpenAIFetchHandler({ + authMode: "native", + spoofMode: "native", + remapDeveloperMessagesToUserEnabled: false, + quietMode: true, + pidOffsetEnabled: false, + headerTransformDebug: false, + compatInputSanitizerEnabled: false, + internalCollaborationModeHeader: "x-opencode-collaboration-mode-kind", + requestSnapshots: { + captureRequest: async () => {}, + captureResponse: async () => {} + }, + sessionAffinityState: { + orchestratorState: createFetchOrchestratorState(), + stickySessionState: createStickySessionState(), + hybridSessionState: createStickySessionState(), + persistSessionAffinityState: () => {} + }, + getCatalogModels: () => [ + { + slug: "gpt-5.3-codex", + default_reasoning_level: "high", + supports_reasoning_summaries: true, + reasoning_summary_format: "experimental" + } + ], + syncCatalogFromAuth: async () => undefined, + setCooldown: async () => {}, + showToast: async () => {} + }) + + const response = await handler("https://api.openai.com/v1/responses", { + method: "POST", + headers: { + "content-type": "application/json", + session_id: "ses_reasoning_validation" + }, + body: JSON.stringify({ + model: "gpt-5.3-codex", + reasoning: { effort: "high" }, + input: "hello" + }) + }) + + expect(response.status).toBe(400) + expect(fetchMock).not.toHaveBeenCalled() + await expect(response.json()).resolves.toEqual({ + error: { + message: + "Invalid reasoning summary setting source: selected model catalog default `codexRuntimeDefaults.reasoningSummaryFormat` for `gpt-5.3-codex` is `experimental`. Supported values are `auto`, `concise`, `detailed`, `none`.", + type: "invalid_reasoning_summary", + param: "reasoning.summary", + source: "codexRuntimeDefaults.reasoningSummaryFormat", + hint: 'This source is internal, not a user config key. Disable summaries with `reasoningSummary: "none"` if you need a workaround.' + } + }) + }) + it("does not mutate shared affinity maps for subagent-marked requests", async () => { vi.resetModules() diff --git a/test/reasoning-summary.test.ts b/test/reasoning-summary.test.ts new file mode 100644 index 0000000..4e30ad6 --- /dev/null +++ b/test/reasoning-summary.test.ts @@ -0,0 +1,105 @@ +import { describe, expect, it } from "vitest" + +import { + inspectReasoningSummaryValue, + resolveReasoningSummaryValue, + toReasoningSummaryPluginFatalError +} from "../lib/codex-native/reasoning-summary.js" + +describe("reasoning summary helpers", () => { + it("classifies absent, disabled, valid, and invalid values", () => { + expect(inspectReasoningSummaryValue(undefined)).toEqual({ state: "absent" }) + expect(inspectReasoningSummaryValue(" none ")).toEqual({ state: "disabled", raw: "none" }) + expect(inspectReasoningSummaryValue("CONCISE")).toEqual({ + state: "valid", + raw: "CONCISE", + value: "concise" + }) + expect(inspectReasoningSummaryValue("experimental")).toEqual({ + state: "invalid", + raw: "experimental" + }) + }) + + it("returns request-option diagnostics for invalid explicit and configured values", () => { + expect( + resolveReasoningSummaryValue({ + explicitValue: "experimental", + explicitSource: "request.reasoning.summary", + hasReasoning: true, + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat" + }) + ).toEqual({ + diagnostic: { + actual: "experimental", + source: "request.reasoning.summary", + sourceType: "request_option" + } + }) + + expect( + resolveReasoningSummaryValue({ + explicitSource: "request.reasoning.summary", + hasReasoning: true, + configuredValue: "invalid", + configuredSource: "config.reasoningSummary", + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat" + }) + ).toEqual({ + diagnostic: { + actual: "invalid", + source: "config.reasoningSummary", + sourceType: "request_option" + } + }) + }) + + it("returns catalog diagnostics for invalid runtime defaults and defaults to auto otherwise", () => { + expect( + resolveReasoningSummaryValue({ + explicitSource: "request.reasoning.summary", + hasReasoning: true, + supportsReasoningSummaries: true, + defaultReasoningSummaryFormat: "experimental", + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat", + model: "gpt-5.3-codex" + }) + ).toEqual({ + diagnostic: { + actual: "experimental", + model: "gpt-5.3-codex", + source: "codexRuntimeDefaults.reasoningSummaryFormat", + sourceType: "catalog_default" + } + }) + + expect( + resolveReasoningSummaryValue({ + explicitSource: "request.reasoning.summary", + hasReasoning: true, + supportsReasoningSummaries: true, + defaultReasoningSummarySource: "codexRuntimeDefaults.reasoningSummaryFormat" + }) + ).toEqual({ value: "auto" }) + }) + + it("builds source-aware plugin errors for request and catalog failures", () => { + const requestError = toReasoningSummaryPluginFatalError({ + actual: "experimental", + source: "request.reasoning.summary", + sourceType: "request_option" + }) + expect(requestError.message).toContain("request setting `request.reasoning.summary`") + expect(requestError.hint).toContain("Update the request") + + const catalogError = toReasoningSummaryPluginFatalError({ + actual: "experimental", + model: "gpt-5.3-codex", + source: "codexRuntimeDefaults.reasoningSummaryFormat", + sourceType: "catalog_default" + }) + expect(catalogError.message).toContain("selected model catalog default") + expect(catalogError.message).toContain("gpt-5.3-codex") + expect(catalogError.hint).toContain('reasoningSummary: "none"') + }) +}) diff --git a/test/release-hygiene.test.ts b/test/release-hygiene.test.ts index 1220097..488e0ab 100644 --- a/test/release-hygiene.test.ts +++ b/test/release-hygiene.test.ts @@ -1,6 +1,7 @@ import { describe, it, expect } from "vitest" -import { existsSync, readFileSync } from "node:fs" +import { execFileSync } from "node:child_process" +import { existsSync, readFileSync, statSync } from "node:fs" import { join } from "node:path" const REQUIRED_RELEASE_RUNTIME_CI_JOBS = [ @@ -10,12 +11,16 @@ const REQUIRED_RELEASE_RUNTIME_CI_JOBS = [ "Security Audit" ] const REQUIRED_WORKFLOW_STATIC_JOB_NAMES = ["Package Smoke Test", "Windows Compatibility Smoke", "Security Audit"] +const REQUIRED_PR_CI_JOB_NAMES = ["Verify (Node.js 22.x)", "Package Smoke Test", "Windows Compatibility Smoke"] describe("release hygiene", () => { it("package.json has verify script", () => { const pkgPath = join(process.cwd(), "package.json") const pkg = JSON.parse(readFileSync(pkgPath, "utf-8")) const verifyScript = String(pkg.scripts?.verify ?? "") + expect(pkg.scripts?.["verify:local"]).toBe("node scripts/enforce-local-verify.mjs manual") + expect(pkg.scripts?.prepush).toBe("npm run verify:local") + expect(pkg.scripts?.["hooks:install"]).toBe("node scripts/install-git-hooks.mjs") const verifyOrder = [ "npm run check:esm-imports", "npm run lint", @@ -48,10 +53,34 @@ describe("release hygiene", () => { expect(pkg.scripts?.build).toBe("npm run patch:plugin-dts && npm run clean:dist && tsc") expect(pkg.scripts?.["clean:dist"]).toBe("node scripts/clean-dist.js") expect(existsSync(join(process.cwd(), "scripts", "clean-dist.js"))).toBe(true) + expect(existsSync(join(process.cwd(), "scripts", "enforce-local-verify.mjs"))).toBe(true) + expect(existsSync(join(process.cwd(), "scripts", "install-git-hooks.mjs"))).toBe(true) expect(existsSync(join(process.cwd(), "scripts", "check-esm-import-specifiers.mjs"))).toBe(true) expect(existsSync(join(process.cwd(), "scripts", "check-dist-esm-import-specifiers.mjs"))).toBe(false) expect(existsSync(join(process.cwd(), "scripts", "check-file-size.mjs"))).toBe(false) expect(existsSync(join(process.cwd(), "scripts", "file-size-allowlist.json"))).toBe(false) + expect(existsSync(join(process.cwd(), ".githooks", "pre-commit"))).toBe(true) + expect(existsSync(join(process.cwd(), ".githooks", "pre-push"))).toBe(true) + expect((statSync(join(process.cwd(), ".githooks", "pre-commit")).mode & 0o111) !== 0).toBe(true) + expect((statSync(join(process.cwd(), ".githooks", "pre-push")).mode & 0o111) !== 0).toBe(true) + expect(readFileSync(join(process.cwd(), ".githooks", "pre-commit"), "utf-8")).toContain( + "node scripts/enforce-local-verify.mjs pre-commit" + ) + expect(readFileSync(join(process.cwd(), ".githooks", "pre-push"), "utf-8")).toContain( + "node scripts/enforce-local-verify.mjs pre-push" + ) + expect( + execFileSync("git", ["ls-files", "--error-unmatch", ".githooks/pre-commit"], { + cwd: process.cwd(), + encoding: "utf-8" + }).trim() + ).toBe(".githooks/pre-commit") + expect( + execFileSync("git", ["ls-files", "--error-unmatch", ".githooks/pre-push"], { + cwd: process.cwd(), + encoding: "utf-8" + }).trim() + ).toBe(".githooks/pre-push") }) it("declares the Node engine aligned with CI", () => { @@ -101,6 +130,35 @@ describe("release hygiene", () => { } }) + it("keeps PR CI lean while retaining security audit on main pushes", () => { + const workflowPath = join(process.cwd(), ".github", "workflows", "ci.yml") + const workflow = readFileSync(workflowPath, "utf-8") + expect(workflow).toMatch(/on:\s*\n\s+push:\s*\n\s+branches:\s*\n\s+-\s+main\s*\n\s+pull_request:/) + for (const job of REQUIRED_PR_CI_JOB_NAMES) { + expect(workflow).toContain(job) + } + const securityAuditBlock = workflow + .split(/\n/) + .slice(workflow.split(/\n/).findIndex((line) => line.includes("security-audit:"))) + .join("\n") + expect(securityAuditBlock).toContain("name: Security Audit") + expect(securityAuditBlock).toContain("if: github.event_name == 'push'") + expect(securityAuditBlock).toContain("npm audit --audit-level=high") + }) + + it("keeps dependency review and secret scanning on pull requests", () => { + const dependencyReviewWorkflow = readFileSync( + join(process.cwd(), ".github", "workflows", "dependency-review.yml"), + "utf-8" + ) + const secretScanWorkflow = readFileSync(join(process.cwd(), ".github", "workflows", "secret-scan.yml"), "utf-8") + expect(dependencyReviewWorkflow).toMatch(/on:\s*\n\s+pull_request:/) + expect(dependencyReviewWorkflow).toContain("name: Dependency Review") + expect(secretScanWorkflow).toMatch(/on:\s*\n\s+push:\s*\n\s+branches:\s*\n\s+-\s+main\s*\n\s+pull_request:/) + expect(secretScanWorkflow).toContain("name: Secret Scan") + expect(secretScanWorkflow).toContain("name: Gitleaks") + }) + it("release workflow validates tag/package version parity and idempotent publish", () => { const workflowPath = join(process.cwd(), ".github", "workflows", "release.yml") const workflow = readFileSync(workflowPath, "utf-8") @@ -199,6 +257,7 @@ describe("package publish surface", () => { const workflowPath = join(process.cwd(), ".github", "workflows", "ci.yml") const workflow = readFileSync(workflowPath, "utf-8") expect(workflow).toContain("Audit dependencies (including dev toolchain)") + expect(workflow).toContain("if: github.event_name == 'push'") expect(workflow).toContain("npm audit --audit-level=high") expect(workflow).not.toContain("npm audit --omit=dev") }) diff --git a/test/request-transform-model.test.ts b/test/request-transform-model.test.ts new file mode 100644 index 0000000..d2c794d --- /dev/null +++ b/test/request-transform-model.test.ts @@ -0,0 +1,124 @@ +import { describe, expect, it } from "vitest" + +import type { CustomModelConfig } from "../lib/config.js" +import type { CodexModelInfo } from "../lib/model-catalog.js" +import { + applyResolvedCodexRuntimeDefaults, + findCatalogModelForCandidates, + getConfiguredCustomModelReasoningSummaryOverride, + getCustomModelIncludeOverride, + getCustomModelParallelToolCallsOverride, + getCustomModelReasoningEffortOverride, + getCustomModelTextVerbosityOverride, + getModelLookupCandidates, + getSelectedModelLookupCandidates, + getVariantLookupCandidates +} from "../lib/codex-native/request-transform-model.js" + +describe("request transform model helpers", () => { + it("builds model and variant lookup candidates from ids and slash tails", () => { + expect( + getModelLookupCandidates({ + id: "openai/my-fast-codex-high", + api: { id: "gpt-5.3-codex-high" } + }) + ).toEqual(["openai/my-fast-codex-high", "gpt-5.3-codex-high", "my-fast-codex-high"]) + + expect(getSelectedModelLookupCandidates({ id: "openai/my-fast-codex-high" })).toEqual([ + "openai/my-fast-codex-high", + "my-fast-codex-high" + ]) + + expect( + getVariantLookupCandidates({ + message: { variant: "high" }, + modelCandidates: ["openai/my-fast-codex/high", "gpt-5.3-codex-high"] + }) + ).toEqual(["high"]) + }) + + it("matches catalog and configured custom models with case-insensitive effort fallback", () => { + const customModels: Record = { + "OpenAI/My-Fast-Codex": { + targetModel: "gpt-5.3-codex", + reasoningSummaries: false, + variants: { + HIGH: { + reasoningSummary: "detailed" + } + } + } + } + + expect( + getConfiguredCustomModelReasoningSummaryOverride(customModels, ["openai/my-fast-codex-high"], ["high"]) + ).toBe("detailed") + expect(getConfiguredCustomModelReasoningSummaryOverride(customModels, ["openai/my-fast-codex-high"], [])).toBe( + "none" + ) + + const catalogModels: CodexModelInfo[] = [ + { + slug: "gpt-5.3-codex", + context_window: 272000, + supported_reasoning_levels: [{ effort: "high" }], + input_modalities: ["text"] + } + ] + expect(findCatalogModelForCandidates(catalogModels, ["gpt-5.3-codex-high"])?.slug).toBe("gpt-5.3-codex") + }) + + it("reads custom model overrides from codexCustomModelConfig model options", () => { + const modelOptions = { + codexCustomModelConfig: { + targetModel: "gpt-5.3-codex", + reasoningEffort: "high", + textVerbosity: "HIGH", + include: ["file_search_call.results"], + parallelToolCalls: false + } + } + + expect(getCustomModelReasoningEffortOverride(modelOptions, [])).toBe("high") + expect(getCustomModelTextVerbosityOverride(modelOptions, [])).toBe("high") + expect(getCustomModelIncludeOverride(modelOptions, [])).toEqual(["file_search_call.results"]) + expect(getCustomModelParallelToolCallsOverride(modelOptions, [])).toBe(false) + }) + + it("applies resolved defaults, dedupes includes, and strips unsupported explicit verbosity", () => { + const options: Record = { + textVerbosity: "LOUD", + include: ["file_search_call.results"], + reasoningEffort: "high" + } + + applyResolvedCodexRuntimeDefaults({ + options, + codexInstructions: "Catalog instructions", + defaults: { + applyPatchToolType: "apply_patch", + supportsReasoningSummaries: true, + reasoningSummaryFormat: "auto", + supportsParallelToolCalls: true, + defaultVerbosity: "low", + supportsVerbosity: true + }, + modelToolCallCapable: true, + resolvedBehavior: { + reasoningSummary: "concise", + textVerbosity: "default", + include: ["reasoning.encrypted_content"], + parallelToolCalls: false + }, + modelId: "gpt-5.3-codex", + preferCodexInstructions: false + }) + + expect(options.instructions).toBe("Catalog instructions") + expect(options.reasoningSummary).toBe("concise") + expect(options.textVerbosity).toBe("low") + expect(options.applyPatchToolType).toBe("apply_patch") + expect(options.parallelToolCalls).toBe(false) + expect(options.include).toEqual(["file_search_call.results", "reasoning.encrypted_content"]) + }) +})