diff --git a/packages/config/src/cli-registry.generated.ts b/packages/config/src/cli-registry.generated.ts index 8003982f0..a783d0c11 100644 --- a/packages/config/src/cli-registry.generated.ts +++ b/packages/config/src/cli-registry.generated.ts @@ -318,10 +318,22 @@ export const OpencodeModels = { export type OpencodeModel = (typeof OpencodeModels)[keyof typeof OpencodeModels]; -/** Model option type for UI dropdowns */ +/** Reasoning effort levels supported by model providers. */ +export const ReasoningEfforts = { + LOW: 'low', + MEDIUM: 'medium', + HIGH: 'high', + XHIGH: 'xhigh', +} as const; + +export type ReasoningEffort = (typeof ReasoningEfforts)[keyof typeof ReasoningEfforts]; + +/** Model option type for UI dropdowns and model capability metadata. */ export interface ModelOption { value: string; label: string; + reasoningEfforts?: ReasoningEffort[]; + defaultReasoningEffort?: ReasoningEffort; } /** @@ -337,13 +349,13 @@ export const CLAUDE_MODEL_OPTIONS: ModelOption[] = [ * Codex CLI model options for UI dropdowns. */ export const CODEX_MODEL_OPTIONS: ModelOption[] = [ - { value: 'gpt-5.4', label: 'GPT-5.4 — Latest frontier agentic coding model' }, - { value: 'gpt-5.3-codex', label: 'GPT-5.3 Codex — Frontier agentic coding model' }, - { value: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark — Ultra-fast coding model' }, - { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex — Frontier agentic coding model' }, - { value: 'gpt-5.2', label: 'GPT-5.2 — Frontier model, knowledge & reasoning' }, - { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max — Deep and fast reasoning' }, - { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini — Cheaper, faster' }, + { value: 'gpt-5.4', label: 'GPT-5.4 — Latest frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.3-codex', label: 'GPT-5.3 Codex — Frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark — Ultra-fast coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex — Frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.2', label: 'GPT-5.2 — Frontier model, knowledge & reasoning', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max — Deep and fast reasoning', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini — Cheaper, faster', reasoningEfforts: ["medium","high"], defaultReasoningEffort: 'high' }, ]; /** @@ -472,6 +484,154 @@ export const OPENCODE_MODEL_OPTIONS: ModelOption[] = [ { value: 'openai/o4-mini-deep-research', label: 'O4 Mini Deep Research' }, ]; +/** + * Claude Code model metadata keyed by model id. + */ +export const CLAUDE_MODEL_METADATA: Record = { + 'sonnet': { value: 'sonnet', label: 'Sonnet' }, + 'opus': { value: 'opus', label: 'Opus' }, + 'haiku': { value: 'haiku', label: 'Haiku' }, +}; + +/** + * Codex CLI model metadata keyed by model id. + */ +export const CODEX_MODEL_METADATA: Record = { + 'gpt-5.4': { value: 'gpt-5.4', label: 'GPT-5.4 — Latest frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.3-codex': { value: 'gpt-5.3-codex', label: 'GPT-5.3 Codex — Frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.3-codex-spark': { value: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark — Ultra-fast coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.2-codex': { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex — Frontier agentic coding model', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.2': { value: 'gpt-5.2', label: 'GPT-5.2 — Frontier model, knowledge & reasoning', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.1-codex-max': { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max — Deep and fast reasoning', reasoningEfforts: ["low","medium","high","xhigh"], defaultReasoningEffort: 'xhigh' }, + 'gpt-5.1-codex-mini': { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini — Cheaper, faster', reasoningEfforts: ["medium","high"], defaultReasoningEffort: 'high' }, +}; + +/** + * Gemini CLI model metadata keyed by model id. + */ +export const GEMINI_MODEL_METADATA: Record = { + 'gemini-3.1-pro-preview': { value: 'gemini-3.1-pro-preview', label: 'Gemini 3.1 Pro Preview' }, + 'gemini-3-flash-preview': { value: 'gemini-3-flash-preview', label: 'Gemini 3 Flash Preview' }, + 'gemini-2.5-pro': { value: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' }, + 'gemini-2.5-flash': { value: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + 'gemini-2.5-flash-lite': { value: 'gemini-2.5-flash-lite', label: 'Gemini 2.5 Flash Lite' }, +}; + +/** + * Cursor model metadata keyed by model id. + */ +export const CURSOR_MODEL_METADATA: Record = { + 'opus-4.6-thinking': { value: 'opus-4.6-thinking', label: 'Claude 4.6 Opus (Thinking)' }, + 'opus-4.6': { value: 'opus-4.6', label: 'Claude 4.6 Opus' }, + 'opus-4.5': { value: 'opus-4.5', label: 'Claude 4.5 Opus' }, + 'opus-4.5-thinking': { value: 'opus-4.5-thinking', label: 'Claude 4.5 Opus (Thinking)' }, + 'sonnet-4.6': { value: 'sonnet-4.6', label: 'Claude 4.6 Sonnet' }, + 'sonnet-4.6-thinking': { value: 'sonnet-4.6-thinking', label: 'Claude 4.6 Sonnet (Thinking)' }, + 'sonnet-4.5': { value: 'sonnet-4.5', label: 'Claude 4.5 Sonnet' }, + 'sonnet-4.5-thinking': { value: 'sonnet-4.5-thinking', label: 'Claude 4.5 Sonnet (Thinking)' }, + 'composer-1.5': { value: 'composer-1.5', label: 'Composer 1.5' }, + 'composer-1': { value: 'composer-1', label: 'Composer 1' }, + 'gpt-5.4-xhigh': { value: 'gpt-5.4-xhigh', label: 'GPT-5.4 Extra High' }, + 'gpt-5.4-xhigh-fast': { value: 'gpt-5.4-xhigh-fast', label: 'GPT-5.4 Extra High Fast' }, + 'gpt-5.4-high': { value: 'gpt-5.4-high', label: 'GPT-5.4 High' }, + 'gpt-5.4-high-fast': { value: 'gpt-5.4-high-fast', label: 'GPT-5.4 High Fast' }, + 'gpt-5.4-medium': { value: 'gpt-5.4-medium', label: 'GPT-5.4' }, + 'gpt-5.4-medium-fast': { value: 'gpt-5.4-medium-fast', label: 'GPT-5.4 Fast' }, + 'gpt-5.4-low': { value: 'gpt-5.4-low', label: 'GPT-5.4 Low' }, + 'gpt-5.3-codex-xhigh': { value: 'gpt-5.3-codex-xhigh', label: 'GPT-5.3 Codex Extra High' }, + 'gpt-5.3-codex-xhigh-fast': { value: 'gpt-5.3-codex-xhigh-fast', label: 'GPT-5.3 Codex Extra High Fast' }, + 'gpt-5.3-codex-high': { value: 'gpt-5.3-codex-high', label: 'GPT-5.3 Codex High' }, + 'gpt-5.3-codex-high-fast': { value: 'gpt-5.3-codex-high-fast', label: 'GPT-5.3 Codex High Fast' }, + 'gpt-5.3-codex': { value: 'gpt-5.3-codex', label: 'GPT-5.3 Codex' }, + 'gpt-5.3-codex-fast': { value: 'gpt-5.3-codex-fast', label: 'GPT-5.3 Codex Fast' }, + 'gpt-5.3-codex-low': { value: 'gpt-5.3-codex-low', label: 'GPT-5.3 Codex Low' }, + 'gpt-5.3-codex-low-fast': { value: 'gpt-5.3-codex-low-fast', label: 'GPT-5.3 Codex Low Fast' }, + 'gpt-5.3-codex-spark-preview': { value: 'gpt-5.3-codex-spark-preview', label: 'GPT-5.3 Codex Spark' }, + 'gpt-5.2-codex-xhigh': { value: 'gpt-5.2-codex-xhigh', label: 'GPT-5.2 Codex Extra High' }, + 'gpt-5.2-codex-xhigh-fast': { value: 'gpt-5.2-codex-xhigh-fast', label: 'GPT-5.2 Codex Extra High Fast' }, + 'gpt-5.2-codex-high': { value: 'gpt-5.2-codex-high', label: 'GPT-5.2 Codex High' }, + 'gpt-5.2-codex-high-fast': { value: 'gpt-5.2-codex-high-fast', label: 'GPT-5.2 Codex High Fast' }, + 'gpt-5.2-codex': { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex' }, + 'gpt-5.2-codex-fast': { value: 'gpt-5.2-codex-fast', label: 'GPT-5.2 Codex Fast' }, + 'gpt-5.2-codex-low': { value: 'gpt-5.2-codex-low', label: 'GPT-5.2 Codex Low' }, + 'gpt-5.2-codex-low-fast': { value: 'gpt-5.2-codex-low-fast', label: 'GPT-5.2 Codex Low Fast' }, + 'gpt-5.2': { value: 'gpt-5.2', label: 'GPT-5.2' }, + 'gpt-5.2-high': { value: 'gpt-5.2-high', label: 'GPT-5.2 High' }, + 'gpt-5.1-codex-max': { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max' }, + 'gpt-5.1-codex-max-high': { value: 'gpt-5.1-codex-max-high', label: 'GPT-5.1 Codex Max High' }, + 'gpt-5.1-codex-mini': { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, + 'gpt-5.1-high': { value: 'gpt-5.1-high', label: 'GPT-5.1 High' }, + 'gemini-3.1-pro': { value: 'gemini-3.1-pro', label: 'Gemini 3.1 Pro' }, + 'gemini-3-pro': { value: 'gemini-3-pro', label: 'Gemini 3 Pro' }, + 'gemini-3-flash': { value: 'gemini-3-flash', label: 'Gemini 3 Flash' }, + 'grok': { value: 'grok', label: 'Grok' }, + 'kimi-k2.5': { value: 'kimi-k2.5', label: 'Kimi K2.5' }, +}; + +/** + * Droid model metadata keyed by model id. + */ +export const DROID_MODEL_METADATA: Record = { + 'opus-4.6-fast': { value: 'opus-4.6-fast', label: 'Opus 4.6 Fast Mode (12x)' }, + 'opus-4.5': { value: 'opus-4.5', label: 'Opus 4.5 (2x)' }, + 'sonnet-4.5': { value: 'sonnet-4.5', label: 'Sonnet 4.5 (1.2x)' }, + 'haiku-4.5': { value: 'haiku-4.5', label: 'Haiku 4.5 (0.4x)' }, + 'gpt-5.2': { value: 'gpt-5.2', label: 'GPT-5.2 (0.7x)' }, + 'gpt-5.2-codex': { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex (0.7x)' }, + 'gemini-3-flash': { value: 'gemini-3-flash', label: 'Gemini 3 Flash (0.2x)' }, + 'droid-core-glm-4.7': { value: 'droid-core-glm-4.7', label: 'Droid Core (GLM-4.7) (0.25x)' }, +}; + +/** + * OpenCode model metadata keyed by model id. + */ +export const OPENCODE_MODEL_METADATA: Record = { + 'opencode/big-pickle': { value: 'opencode/big-pickle', label: 'Big Pickle' }, + 'opencode/gpt-5-nano': { value: 'opencode/gpt-5-nano', label: 'GPT-5 Nano (OpenCode)' }, + 'opencode/mimo-v2-flash-free': { value: 'opencode/mimo-v2-flash-free', label: 'Mimo V2 Flash Free' }, + 'opencode/minimax-m2.5-free': { value: 'opencode/minimax-m2.5-free', label: 'MiniMax M2.5 Free' }, + 'openai/codex-mini-latest': { value: 'openai/codex-mini-latest', label: 'Codex Mini Latest' }, + 'openai/gpt-3.5-turbo': { value: 'openai/gpt-3.5-turbo', label: 'GPT-3.5 Turbo' }, + 'openai/gpt-4': { value: 'openai/gpt-4', label: 'GPT-4' }, + 'openai/gpt-4-turbo': { value: 'openai/gpt-4-turbo', label: 'GPT-4 Turbo' }, + 'openai/gpt-4.1': { value: 'openai/gpt-4.1', label: 'GPT-4.1' }, + 'openai/gpt-4.1-mini': { value: 'openai/gpt-4.1-mini', label: 'GPT-4.1 Mini' }, + 'openai/gpt-4.1-nano': { value: 'openai/gpt-4.1-nano', label: 'GPT-4.1 Nano' }, + 'openai/gpt-4o': { value: 'openai/gpt-4o', label: 'GPT-4o' }, + 'openai/gpt-4o-2024-05-13': { value: 'openai/gpt-4o-2024-05-13', label: 'GPT-4o (2024-05-13)' }, + 'openai/gpt-4o-2024-08-06': { value: 'openai/gpt-4o-2024-08-06', label: 'GPT-4o (2024-08-06)' }, + 'openai/gpt-4o-2024-11-20': { value: 'openai/gpt-4o-2024-11-20', label: 'GPT-4o (2024-11-20)' }, + 'openai/gpt-4o-mini': { value: 'openai/gpt-4o-mini', label: 'GPT-4o Mini' }, + 'openai/gpt-5': { value: 'openai/gpt-5', label: 'GPT-5' }, + 'openai/gpt-5-codex': { value: 'openai/gpt-5-codex', label: 'GPT-5 Codex' }, + 'openai/gpt-5-mini': { value: 'openai/gpt-5-mini', label: 'GPT-5 Mini' }, + 'openai/gpt-5-nano': { value: 'openai/gpt-5-nano', label: 'GPT-5 Nano' }, + 'openai/gpt-5-pro': { value: 'openai/gpt-5-pro', label: 'GPT-5 Pro' }, + 'openai/gpt-5.1': { value: 'openai/gpt-5.1', label: 'GPT-5.1' }, + 'openai/gpt-5.1-chat-latest': { value: 'openai/gpt-5.1-chat-latest', label: 'GPT-5.1 Chat Latest' }, + 'openai/gpt-5.1-codex': { value: 'openai/gpt-5.1-codex', label: 'GPT-5.1 Codex' }, + 'openai/gpt-5.1-codex-max': { value: 'openai/gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max' }, + 'openai/gpt-5.1-codex-mini': { value: 'openai/gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, + 'openai/gpt-5.2': { value: 'openai/gpt-5.2', label: 'GPT-5.2' }, + 'openai/gpt-5.2-chat-latest': { value: 'openai/gpt-5.2-chat-latest', label: 'GPT-5.2 Chat Latest' }, + 'openai/gpt-5.2-codex': { value: 'openai/gpt-5.2-codex', label: 'GPT-5.2 Codex' }, + 'openai/gpt-5.2-pro': { value: 'openai/gpt-5.2-pro', label: 'GPT-5.2 Pro' }, + 'openai/gpt-5.3-codex': { value: 'openai/gpt-5.3-codex', label: 'GPT-5.3 Codex' }, + 'openai/gpt-5.3-codex-spark': { value: 'openai/gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark' }, + 'openai/gpt-5.4': { value: 'openai/gpt-5.4', label: 'GPT-5.4' }, + 'openai/gpt-5.4-pro': { value: 'openai/gpt-5.4-pro', label: 'GPT-5.4 Pro' }, + 'openai/o1': { value: 'openai/o1', label: 'O1' }, + 'openai/o1-mini': { value: 'openai/o1-mini', label: 'O1 Mini' }, + 'openai/o1-preview': { value: 'openai/o1-preview', label: 'O1 Preview' }, + 'openai/o1-pro': { value: 'openai/o1-pro', label: 'O1 Pro' }, + 'openai/o3': { value: 'openai/o3', label: 'O3' }, + 'openai/o3-deep-research': { value: 'openai/o3-deep-research', label: 'O3 Deep Research' }, + 'openai/o3-mini': { value: 'openai/o3-mini', label: 'O3 Mini' }, + 'openai/o3-pro': { value: 'openai/o3-pro', label: 'O3 Pro' }, + 'openai/o4-mini': { value: 'openai/o4-mini', label: 'O4 Mini' }, + 'openai/o4-mini-deep-research': { value: 'openai/o4-mini-deep-research', label: 'O4 Mini Deep Research' }, +}; + /** * All models grouped by CLI tool. * @@ -513,6 +673,56 @@ export const ModelOptions = { Opencode: OPENCODE_MODEL_OPTIONS, } as const; +/** + * All model metadata grouped by CLI tool and keyed by model id. + */ +export const ModelMetadata = { + Claude: CLAUDE_MODEL_METADATA, + Codex: CODEX_MODEL_METADATA, + Gemini: GEMINI_MODEL_METADATA, + Cursor: CURSOR_MODEL_METADATA, + Droid: DROID_MODEL_METADATA, + Opencode: OPENCODE_MODEL_METADATA, +} as const; + +const MODEL_METADATA_BY_CLI: Record> = { + claude: CLAUDE_MODEL_METADATA, + codex: CODEX_MODEL_METADATA, + gemini: GEMINI_MODEL_METADATA, + cursor: CURSOR_MODEL_METADATA, + droid: DROID_MODEL_METADATA, + opencode: OPENCODE_MODEL_METADATA, + aider: {}, + goose: {}, +}; + +/** + * Look up metadata for a specific CLI/model pair. + */ +export function getModelMetadata(cli: CLI, model: string): ModelOption | undefined { + return MODEL_METADATA_BY_CLI[cli]?.[model]; +} + +/** + * Supported reasoning effort values for a specific CLI/model pair. + */ +export function getSupportedReasoningEfforts( + cli: CLI, + model: string +): ReasoningEffort[] | undefined { + return getModelMetadata(cli, model)?.reasoningEfforts; +} + +/** + * Default reasoning effort for a specific CLI/model pair. + */ +export function getDefaultReasoningEffort( + cli: CLI, + model: string +): ReasoningEffort | undefined { + return getModelMetadata(cli, model)?.defaultReasoningEffort; +} + /** * Swarm patterns for multi-agent workflows. */ diff --git a/packages/sdk/src/__tests__/models.test.ts b/packages/sdk/src/__tests__/models.test.ts new file mode 100644 index 000000000..489220258 --- /dev/null +++ b/packages/sdk/src/__tests__/models.test.ts @@ -0,0 +1,61 @@ +/** + * Model metadata tests. + * + * Run: + * npm run build && node --test dist/__tests__/models.test.js + */ +import assert from 'node:assert/strict'; +import test from 'node:test'; + +import { + ModelMetadata, + ModelOptions, + Models, + ReasoningEfforts, + getDefaultReasoningEffort, + getModelMetadata, + getSupportedReasoningEfforts, +} from '../models.js'; + +test('codex model options include reasoning effort metadata', () => { + const mini = ModelOptions.Codex.find((model) => model.value === Models.Codex.GPT_5_1_CODEX_MINI); + const frontier = ModelOptions.Codex.find((model) => model.value === Models.Codex.GPT_5_4); + + assert.deepEqual(mini?.reasoningEfforts, [ReasoningEfforts.MEDIUM, ReasoningEfforts.HIGH]); + assert.equal(mini?.defaultReasoningEffort, ReasoningEfforts.HIGH); + + assert.deepEqual(frontier?.reasoningEfforts, [ + ReasoningEfforts.LOW, + ReasoningEfforts.MEDIUM, + ReasoningEfforts.HIGH, + ReasoningEfforts.XHIGH, + ]); + assert.equal(frontier?.defaultReasoningEffort, ReasoningEfforts.XHIGH); +}); + +test('reasoning helper lookups return codex defaults and supported values', () => { + assert.equal( + getDefaultReasoningEffort('codex', Models.Codex.GPT_5_1_CODEX_MINI), + ReasoningEfforts.HIGH, + ); + assert.equal( + getDefaultReasoningEffort('codex', Models.Codex.GPT_5_4), + ReasoningEfforts.XHIGH, + ); + assert.deepEqual( + getSupportedReasoningEfforts('codex', Models.Codex.GPT_5_1_CODEX_MINI), + [ReasoningEfforts.MEDIUM, ReasoningEfforts.HIGH], + ); + assert.equal(getDefaultReasoningEffort('claude', Models.Claude.SONNET), undefined); +}); + +test('model metadata is keyed by model id for direct lookup', () => { + assert.deepEqual( + ModelMetadata.Codex[Models.Codex.GPT_5_1_CODEX_MINI], + getModelMetadata('codex', Models.Codex.GPT_5_1_CODEX_MINI), + ); + assert.equal( + ModelMetadata.Codex[Models.Codex.GPT_5_1_CODEX_MINI].defaultReasoningEffort, + ReasoningEfforts.HIGH, + ); +}); diff --git a/packages/sdk/src/models.ts b/packages/sdk/src/models.ts index 600053fbb..3dfc29b96 100644 --- a/packages/sdk/src/models.ts +++ b/packages/sdk/src/models.ts @@ -11,6 +11,7 @@ export { CLIs, CLIVersions, CLIRegistry, + ReasoningEfforts, // Model constants ClaudeModels, CodexModels, @@ -23,7 +24,11 @@ export { CODEX_MODEL_OPTIONS, GEMINI_MODEL_OPTIONS, CURSOR_MODEL_OPTIONS, + ModelMetadata, ModelOptions, + getDefaultReasoningEffort, + getModelMetadata, + getSupportedReasoningEfforts, // Swarm patterns (type is in workflows/types.ts) SwarmPatterns, // Types @@ -33,4 +38,5 @@ export { type GeminiModel, type CursorModel, type ModelOption, + type ReasoningEffort, } from '@agent-relay/config'; diff --git a/packages/sdk/tsconfig.build.json b/packages/sdk/tsconfig.build.json index bf9f9c577..b9a0e835b 100644 --- a/packages/sdk/tsconfig.build.json +++ b/packages/sdk/tsconfig.build.json @@ -3,6 +3,11 @@ "target": "ES2022", "module": "ES2022", "moduleResolution": "Bundler", + "baseUrl": ".", + "paths": { + "@agent-relay/config": ["../config/dist/index.d.ts"], + "@agent-relay/config/*": ["../config/dist/*"] + }, "strict": true, "declaration": true, "declarationMap": true, diff --git a/packages/shared/cli-registry.yaml b/packages/shared/cli-registry.yaml index 357825850..11852a93b 100644 --- a/packages/shared/cli-registry.yaml +++ b/packages/shared/cli-registry.yaml @@ -37,24 +37,38 @@ clis: id: "gpt-5.4" label: "GPT-5.4 — Latest frontier agentic coding model" default: true + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_3_codex: id: "gpt-5.3-codex" label: "GPT-5.3 Codex — Frontier agentic coding model" + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_3_codex_spark: id: "gpt-5.3-codex-spark" label: "GPT-5.3 Codex Spark — Ultra-fast coding model" + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_2_codex: id: "gpt-5.2-codex" label: "GPT-5.2 Codex — Frontier agentic coding model" + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_2: id: "gpt-5.2" label: "GPT-5.2 — Frontier model, knowledge & reasoning" + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_1_codex_max: id: "gpt-5.1-codex-max" label: "GPT-5.1 Codex Max — Deep and fast reasoning" + reasoning_efforts: ["low", "medium", "high", "xhigh"] + default_reasoning_effort: "xhigh" gpt_5_1_codex_mini: id: "gpt-5.1-codex-mini" label: "GPT-5.1 Codex Mini — Cheaper, faster" + reasoning_efforts: ["medium", "high"] + default_reasoning_effort: "high" gemini: name: "Gemini CLI" diff --git a/packages/shared/codegen-ts.mjs b/packages/shared/codegen-ts.mjs index 5a3e8404f..f39462c98 100644 --- a/packages/shared/codegen-ts.mjs +++ b/packages/shared/codegen-ts.mjs @@ -25,6 +25,19 @@ function toConstantCase(str) { return str.toUpperCase().replace(/-/g, '_'); } +function formatModelOption(modelConfig) { + const parts = [`value: '${modelConfig.id}'`, `label: '${modelConfig.label || modelConfig.id}'`]; + + if (Array.isArray(modelConfig.reasoning_efforts) && modelConfig.reasoning_efforts.length > 0) { + parts.push(`reasoningEfforts: ${JSON.stringify(modelConfig.reasoning_efforts)}`); + } + if (modelConfig.default_reasoning_effort) { + parts.push(`defaultReasoningEffort: '${modelConfig.default_reasoning_effort}'`); + } + + return `{ ${parts.join(', ')} }`; +} + let output = `/** * CLI Registry - AUTO-GENERATED FILE - DO NOT EDIT * Generated from packages/shared/cli-registry.yaml @@ -91,10 +104,22 @@ export type ${pascalCli}Model = (typeof ${pascalCli}Models)[keyof typeof ${pasca } // Generate model options per CLI (for dashboard dropdowns) -output += `/** Model option type for UI dropdowns */ +output += `/** Reasoning effort levels supported by model providers. */ +export const ReasoningEfforts = { + LOW: 'low', + MEDIUM: 'medium', + HIGH: 'high', + XHIGH: 'xhigh', +} as const; + +export type ReasoningEffort = (typeof ReasoningEfforts)[keyof typeof ReasoningEfforts]; + +/** Model option type for UI dropdowns and model capability metadata. */ export interface ModelOption { value: string; label: string; + reasoningEfforts?: ReasoningEffort[]; + defaultReasoningEffort?: ReasoningEffort; } `; @@ -110,8 +135,7 @@ for (const [cli, config] of Object.entries(registry.clis)) { export const ${constantCli}_MODEL_OPTIONS: ModelOption[] = [ `; for (const [, modelConfig] of Object.entries(models)) { - const label = modelConfig.label || modelConfig.id; - output += ` { value: '${modelConfig.id}', label: '${label}' },\n`; + output += ` ${formatModelOption(modelConfig)},\n`; } output += `]; @@ -119,6 +143,27 @@ export const ${constantCli}_MODEL_OPTIONS: ModelOption[] = [ } } +// Generate model metadata per CLI (keyed by model id) +for (const [cli, config] of Object.entries(registry.clis)) { + const pascalCli = toPascalCase(cli); + const constantCli = toConstantCase(cli); + const models = config.models || {}; + + if (Object.keys(models).length > 0) { + output += `/** + * ${config.name} model metadata keyed by model id. + */ +export const ${constantCli}_MODEL_METADATA: Record<${pascalCli}Model, ModelOption> = { +`; + for (const [, modelConfig] of Object.entries(models)) { + output += ` '${modelConfig.id}': ${formatModelOption(modelConfig)},\n`; + } + output += `}; + +`; + } +} + // Generate combined Models object output += `/** * All models grouped by CLI tool. @@ -171,6 +216,63 @@ output += `} as const; `; +output += `/** + * All model metadata grouped by CLI tool and keyed by model id. + */ +export const ModelMetadata = { +`; +for (const [cli, config] of Object.entries(registry.clis)) { + const pascalCli = toPascalCase(cli); + const constantCli = toConstantCase(cli); + const models = config.models || {}; + if (Object.keys(models).length > 0) { + output += ` ${pascalCli}: ${constantCli}_MODEL_METADATA,\n`; + } +} +output += `} as const; + +const MODEL_METADATA_BY_CLI: Record> = { +`; +for (const [cli, config] of Object.entries(registry.clis)) { + const constantCli = toConstantCase(cli); + const models = config.models || {}; + if (Object.keys(models).length > 0) { + output += ` ${cli}: ${constantCli}_MODEL_METADATA,\n`; + } else { + output += ` ${cli}: {},\n`; + } +} +output += `}; + +/** + * Look up metadata for a specific CLI/model pair. + */ +export function getModelMetadata(cli: CLI, model: string): ModelOption | undefined { + return MODEL_METADATA_BY_CLI[cli]?.[model]; +} + +/** + * Supported reasoning effort values for a specific CLI/model pair. + */ +export function getSupportedReasoningEfforts( + cli: CLI, + model: string +): ReasoningEffort[] | undefined { + return getModelMetadata(cli, model)?.reasoningEfforts; +} + +/** + * Default reasoning effort for a specific CLI/model pair. + */ +export function getDefaultReasoningEffort( + cli: CLI, + model: string +): ReasoningEffort | undefined { + return getModelMetadata(cli, model)?.defaultReasoningEffort; +} + +`; + // Generate swarm patterns output += `/** * Swarm patterns for multi-agent workflows.