diff --git a/config/opencode-legacy.json b/config/opencode-legacy.json index 5381226..14aaad2 100644 --- a/config/opencode-legacy.json +++ b/config/opencode-legacy.json @@ -15,6 +15,240 @@ "store": false }, "models": { + "gpt-5.4-none": { + "name": "GPT 5.4 None (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-low": { + "name": "GPT 5.4 Low (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-medium": { + "name": "GPT 5.4 Medium (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-high": { + "name": "GPT 5.4 High (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-xhigh": { + "name": "GPT 5.4 Extra High (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-pro-medium": { + "name": "GPT 5.4 Pro Medium (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-pro-high": { + "name": "GPT 5.4 Pro High (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.4-pro-xhigh": { + "name": "GPT 5.4 Pro Extra High (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", + "limit": { + "context": 400000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.3-codex-medium": { + "name": "GPT 5.3 Codex Medium (OAuth)", + "limit": { + "context": 400000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.3-codex-high": { + "name": "GPT 5.3 Codex High (OAuth)", + "limit": { + "context": 400000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.3-codex-xhigh": { + "name": "GPT 5.3 Codex Extra High (OAuth)", + "limit": { + "context": 400000, + "output": 128000 + }, + "modalities": { + "input": ["text", "image"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, + "gpt-5.3-codex-spark": { + "name": "GPT 5.3 Codex Spark (OAuth)", + "limit": { + "context": 128000, + "output": 128000 + }, + "modalities": { + "input": ["text"], + "output": ["text"] + }, + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": ["reasoning.encrypted_content"], + "store": false + } + }, "gpt-5.2-none": { "name": "GPT 5.2 None (OAuth)", "limit": { diff --git a/config/opencode-modern.json b/config/opencode-modern.json index 9161024..a029131 100644 --- a/config/opencode-modern.json +++ b/config/opencode-modern.json @@ -15,6 +15,135 @@ "store": false }, "models": { + "gpt-5.4": { + "name": "GPT 5.4 (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.4-pro": { + "name": "GPT 5.4 Pro (OAuth)", + "limit": { + "context": 1050000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.3-codex": { + "name": "GPT 5.3 Codex (OAuth)", + "limit": { + "context": 400000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.3-codex-spark": { + "name": "GPT 5.3 Codex Spark (OAuth)", + "limit": { + "context": 128000, + "output": 128000 + }, + "modalities": { + "input": [ + "text" + ], + "output": [ + "text" + ] + } + }, "gpt-5.2": { "name": "GPT 5.2 (OAuth)", "limit": { diff --git a/lib/request/helpers/model-map.ts b/lib/request/helpers/model-map.ts index 20ca329..9807145 100644 --- a/lib/request/helpers/model-map.ts +++ b/lib/request/helpers/model-map.ts @@ -39,6 +39,43 @@ export const MODEL_MAP: Record = { "gpt-5.2-high": "gpt-5.2", "gpt-5.2-xhigh": "gpt-5.2", + // ============================================================================ + // GPT-5.4 Models (supports none/low/medium/high/xhigh) + // ============================================================================ + "gpt-5.4": "gpt-5.4", + "gpt-5.4-none": "gpt-5.4", + "gpt-5.4-low": "gpt-5.4", + "gpt-5.4-medium": "gpt-5.4", + "gpt-5.4-high": "gpt-5.4", + "gpt-5.4-xhigh": "gpt-5.4", + + // ============================================================================ + // GPT-5.4 Pro Models (supports medium/high/xhigh) + // ============================================================================ + "gpt-5.4-pro": "gpt-5.4-pro", + "gpt-5.4-pro-medium": "gpt-5.4-pro", + "gpt-5.4-pro-high": "gpt-5.4-pro", + "gpt-5.4-pro-xhigh": "gpt-5.4-pro", + + // ============================================================================ + // GPT-5.3 Codex Models (supports low/medium/high/xhigh) + // ============================================================================ + "gpt-5.3-codex": "gpt-5.3-codex", + "gpt-5.3-codex-low": "gpt-5.3-codex", + "gpt-5.3-codex-medium": "gpt-5.3-codex", + "gpt-5.3-codex-high": "gpt-5.3-codex", + "gpt-5.3-codex-xhigh": "gpt-5.3-codex", + + // ============================================================================ + // GPT-5.3 Codex Spark Models (text-only, fast) + // ============================================================================ + "gpt-5.3-codex-spark": "gpt-5.3-codex-spark", + + // ============================================================================ + // GPT-5.3 General Purpose Models + // ============================================================================ + "gpt-5.3-chat-latest": "gpt-5.3-chat-latest", + // ============================================================================ // GPT-5.2 Codex Models (low/medium/high/xhigh) // ============================================================================ diff --git a/lib/request/request-transformer.ts b/lib/request/request-transformer.ts index 07f9961..d7fd2f9 100644 --- a/lib/request/request-transformer.ts +++ b/lib/request/request-transformer.ts @@ -47,7 +47,41 @@ export function normalizeModel(model: string | undefined): string { const normalized = modelId.toLowerCase(); // Priority order for pattern matching (most specific first): - // 1. GPT-5.2 Codex (newest codex model) + // 1. GPT-5.4 Pro (extended reasoning) + if ( + normalized.includes("gpt-5.4-pro") || + normalized.includes("gpt 5.4 pro") + ) { + return "gpt-5.4-pro"; + } + + // 2. GPT-5.4 (newest general purpose model) + if (normalized.includes("gpt-5.4") || normalized.includes("gpt 5.4")) { + return "gpt-5.4"; + } + + // 3. GPT-5.3 Codex Spark (text-only, fast) + if ( + normalized.includes("gpt-5.3-codex-spark") || + normalized.includes("gpt 5.3 codex spark") + ) { + return "gpt-5.3-codex-spark"; + } + + // 4. GPT-5.3 Codex + if ( + normalized.includes("gpt-5.3-codex") || + normalized.includes("gpt 5.3 codex") + ) { + return "gpt-5.3-codex"; + } + + // 5. GPT-5.3 (general purpose) + if (normalized.includes("gpt-5.3") || normalized.includes("gpt 5.3")) { + return "gpt-5.3-chat-latest"; + } + + // 6. GPT-5.2 Codex (newest codex model) if ( normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex") @@ -55,12 +89,12 @@ export function normalizeModel(model: string | undefined): string { return "gpt-5.2-codex"; } - // 2. GPT-5.2 (general purpose) + // 3. GPT-5.2 (general purpose) if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { return "gpt-5.2"; } - // 3. GPT-5.1 Codex Max + // 4. GPT-5.1 Codex Max if ( normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max") @@ -195,6 +229,33 @@ export function getReasoningConfig( ): ReasoningConfig { const normalizedName = modelName?.toLowerCase() ?? ""; + // GPT-5.4 Pro (extended reasoning, supports medium/high/xhigh only) + const isGpt54Pro = + normalizedName.includes("gpt-5.4-pro") || + normalizedName.includes("gpt 5.4 pro"); + + // GPT-5.4 general purpose (newest model, supports none/low/medium/high/xhigh) + const isGpt54General = + (normalizedName.includes("gpt-5.4") || normalizedName.includes("gpt 5.4")) && + !isGpt54Pro; + + // GPT-5.3 Codex Spark (text-only, fast - limited reasoning) + const isCodexSpark = + normalizedName.includes("codex-spark") || + normalizedName.includes("codex spark"); + + // GPT-5.3 Codex (supports low/medium/high/xhigh) + const isGpt53Codex = + (normalizedName.includes("gpt-5.3-codex") || + normalizedName.includes("gpt 5.3 codex")) && + !isCodexSpark; + + // GPT-5.3 general purpose + const isGpt53General = + (normalizedName.includes("gpt-5.3") || normalizedName.includes("gpt 5.3")) && + !isGpt53Codex && + !isCodexSpark; + // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || @@ -212,9 +273,10 @@ export function getReasoningConfig( normalizedName.includes("codex mini") || normalizedName.includes("codex_mini") || normalizedName.includes("codex-mini-latest"); - const isCodex = normalizedName.includes("codex") && !isCodexMini; + const isCodex = normalizedName.includes("codex") && !isCodexMini && !isCodexSpark; const isLightweight = !isCodexMini && + !isCodexSpark && (normalizedName.includes("nano") || normalizedName.includes("mini")); @@ -225,8 +287,8 @@ export function getReasoningConfig( !isCodexMax && !isCodexMini; - // GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning - const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; + // GPT 5.4, GPT 5.4 Pro, GPT 5.3 Codex, GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning + const supportsXhigh = isGpt54General || isGpt54Pro || isGpt53Codex || isGpt52General || isGpt52Codex || isCodexMax; // GPT 5.1 general and GPT 5.2 general support "none" reasoning per: // - OpenAI API docs: "gpt-5.1 defaults to none, supports: none, low, medium, high" @@ -234,7 +296,7 @@ export function getReasoningConfig( // - Codex CLI: docs/config.md lists "none" as valid for model_reasoning_effort // - gpt-5.2 (being newer) also supports: none, low, medium, high, xhigh // - Codex models (including GPT-5.2 Codex) do NOT support "none" - const supportsNone = isGpt52General || isGpt51General; + const supportsNone = isGpt54General || isGpt52General || isGpt51General; // Default based on model type (Codex CLI defaults) // Note: OpenAI docs say gpt-5.1 defaults to "none", but we default to "medium" @@ -262,6 +324,20 @@ export function getReasoningConfig( } } + // GPT-5.4 Pro: minimum reasoning is medium (no none/low) + if (isGpt54Pro) { + if (effort === "none" || effort === "low" || effort === "minimal") { + effort = "medium"; + } + } + + // Codex Spark: text-only fast model, default to medium + if (isCodexSpark) { + if (effort === "none" || effort === "minimal") { + effort = "low"; + } + } + // For models that don't support xhigh, downgrade to high if (!supportsXhigh && effort === "xhigh") { effort = "high";