Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 15 additions & 4 deletions src/agents/core/session/SessionStore.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import { readFile, writeFile, mkdir } from 'fs/promises';
import { existsSync } from 'fs';
import { dirname } from 'path';
import { dirname, basename, join } from 'path';
import type { Session } from './types.js';
import { getSessionPath } from './session-config.js';
import { logger } from '../../../utils/logger.js';
Expand Down Expand Up @@ -41,13 +41,24 @@ export class SessionStore {

/**
* Load session from disk
*
* Falls back to the 'completed_' prefixed filename when the primary path is
* not found. This handles the race where handleSessionEnd renames
* {sessionId}.json → completed_{sessionId}.json before the final SSO sync runs.
*/
async loadSession(sessionId: string): Promise<Session | null> {
const sessionPath = getSessionPath(sessionId);
let sessionPath = getSessionPath(sessionId);

if (!existsSync(sessionPath)) {
logger.debug(`[SessionStore] Session file not found: ${sessionId}`);
return null;
// Fallback: session may have been renamed with 'completed_' prefix by handleSessionEnd
const completedPath = join(dirname(sessionPath), `completed_${basename(sessionPath)}`);
if (existsSync(completedPath)) {
sessionPath = completedPath;
logger.debug(`[SessionStore] Using completed session file: ${sessionId}`);
} else {
logger.debug(`[SessionStore] Session file not found: ${sessionId}`);
return null;
}
}

try {
Expand Down
10 changes: 8 additions & 2 deletions src/agents/plugins/__tests__/codemie-code-plugin.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ vi.mock('../opencode/opencode.session.js', () => ({
}),
}));

// Mock getModelConfig and getAllOpenCodeModelConfigs
// Mock getModelConfig and model config helpers
vi.mock('../opencode/opencode-model-configs.js', () => ({
getModelConfig: vi.fn(() => ({
id: 'gpt-5-2-2025-12-11',
Expand All @@ -102,7 +102,13 @@ vi.mock('../opencode/opencode-model-configs.js', () => ({
cost: { input: 2.5, output: 10 },
limit: { context: 1048576, output: 65536 },
})),
getAllOpenCodeModelConfigs: vi.fn(() => ({})),
getChatCompletionsModelConfigs: vi.fn(() => ({})),
getResponsesApiModelConfigs: vi.fn(() => ({})),
}));

// Mock dynamic model fetcher so tests don't make real API calls
vi.mock('../opencode/opencode-dynamic-models.js', () => ({
fetchDynamicModelConfigs: vi.fn(() => Promise.resolve({})),
}));

// Mock fs
Expand Down
10 changes: 8 additions & 2 deletions src/agents/plugins/__tests__/codemie-code-reasoning.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ vi.mock('../opencode/opencode.session.js', () => ({
}),
}));

// Mock getModelConfig and getAllOpenCodeModelConfigs
// Mock getModelConfig and model config helpers
vi.mock('../opencode/opencode-model-configs.js', () => ({
getModelConfig: vi.fn(() => ({
id: 'gpt-5-2-2025-12-11',
Expand All @@ -98,7 +98,13 @@ vi.mock('../opencode/opencode-model-configs.js', () => ({
tool_call: true,
reasoning: true,
})),
getAllOpenCodeModelConfigs: vi.fn(() => ({})),
getChatCompletionsModelConfigs: vi.fn(() => ({})),
getResponsesApiModelConfigs: vi.fn(() => ({})),
}));

// Mock dynamic model fetcher so tests don't make real API calls
vi.mock('../opencode/opencode-dynamic-models.js', () => ({
fetchDynamicModelConfigs: vi.fn(() => Promise.resolve({})),
}));

// Mock fs
Expand Down
62 changes: 54 additions & 8 deletions src/agents/plugins/codemie-code.plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ import type { AgentMetadata, AgentConfig } from '../core/types.js';
import { join } from 'path';
import { existsSync } from 'fs';
import { logger } from '../../utils/logger.js';
import { getModelConfig, getAllOpenCodeModelConfigs } from './opencode/opencode-model-configs.js';
import { getModelConfig, getChatCompletionsModelConfigs, getResponsesApiModelConfigs } from './opencode/opencode-model-configs.js';
import { fetchDynamicModelConfigs } from './opencode/opencode-dynamic-models.js';
import { BaseAgentAdapter } from '../core/BaseAgentAdapter.js';
import type { SessionAdapter } from '../core/session/BaseSessionAdapter.js';
import type { BaseExtensionInstaller } from '../core/extension/BaseExtensionInstaller.js';
Expand Down Expand Up @@ -73,6 +74,10 @@ function resolveOllamaBaseUrl(baseUrl: string, provider: string | undefined): st

/**
* Build the OpenCode config object that gets passed to the whitelabel binary.
*
* Models are split into two groups:
* - chatModels: routed via codemie-proxy/litellm (Chat Completions API)
* - responsesApiModels: routed via OpenCode's built-in openai CUSTOM_LOADER (Responses API)
*/
function buildOpenCodeConfig(params: {
proxyBaseUrl: string | undefined;
Expand All @@ -83,10 +88,13 @@ function buildOpenCodeConfig(params: {
modelId: string;
timeout: number;
providerOptions?: any;
allModels: Record<string, unknown>;
chatModels: Record<string, unknown>;
responsesApiModels: Record<string, unknown>;
responsesApiBaseUrl: string | undefined;
}): Record<string, unknown> {
const hasResponsesApiModels = Object.keys(params.responsesApiModels).length > 0;
return {
enabled_providers: ['codemie-proxy', 'ollama', 'amazon-bedrock', 'litellm'],
enabled_providers: ['codemie-proxy', 'openai', 'ollama', 'amazon-bedrock', 'litellm'],
share: 'disabled',
provider: {
...(params.proxyBaseUrl && {
Expand All @@ -99,7 +107,24 @@ function buildOpenCodeConfig(params: {
timeout: params.timeout,
...(params.providerOptions?.headers && { headers: params.providerOptions.headers })
},
models: params.allModels
models: params.chatModels
}
}),
// OpenCode's built-in openai CUSTOM_LOADER — uses @ai-sdk/openai sdk.responses()
// which calls POST /v1/responses instead of /v1/chat/completions
...(params.responsesApiBaseUrl && hasResponsesApiModels && {
openai: {
name: 'CodeMie SSO',
// whitelist: suppress the built-in openai model list (GPT-4, GPT-4o, etc.)
// OpenCode merges user models with models.dev — whitelist restricts to ours only
whitelist: Object.keys(params.responsesApiModels),
options: {
baseURL: `${params.responsesApiBaseUrl}/`,
apiKey: 'proxy-handled',
timeout: params.timeout,
...(params.providerOptions?.headers && { headers: params.providerOptions.headers })
},
models: params.responsesApiModels
}
}),
...(params.litellmBaseUrl && {
Expand All @@ -111,7 +136,7 @@ function buildOpenCodeConfig(params: {
apiKey: params.litellmApiKey || 'not-required',
timeout: params.timeout,
},
models: params.allModels
models: params.chatModels
}
}),
ollama: {
Expand Down Expand Up @@ -226,10 +251,21 @@ export const CodeMieCodePluginMetadata: AgentMetadata = {
return env;
}

// Fetch live model catalogue from the CodeMie API.
// Falls back to the static OPENCODE_MODEL_CONFIGS on any error.
const allModels = await fetchDynamicModelConfigs(
baseUrl,
env.CODEMIE_URL,
env.CODEMIE_JWT_TOKEN,
);

// Model selection priority: env var > config > default
// Use dynamic catalogue first, then fall back to static getModelConfig for unknown IDs.
const selectedModel = env.CODEMIE_MODEL || config?.model || 'gpt-5-2-2025-12-11';
const modelConfig = getModelConfig(selectedModel);
const modelConfig = allModels[selectedModel] ?? getModelConfig(selectedModel);
const { providerOptions } = modelConfig;
const allModels = getAllOpenCodeModelConfigs();
const chatModels = getChatCompletionsModelConfigs(allModels);
const responsesApiModels = getResponsesApiModelConfigs(allModels);

const isBedrock = provider === 'bedrock';
const isLiteLLM = provider === 'litellm';
Expand All @@ -241,11 +277,21 @@ export const CodeMieCodePluginMetadata: AgentMetadata = {
? toBedrockModelId(modelConfig.id, env.AWS_REGION || env.CODEMIE_AWS_REGION)
: modelConfig.id;

// Responses API base URL: use proxyBaseUrl for SSO/bearer-auth, or baseUrl for LiteLLM.
// Always set regardless of selected model — fixes model-switching bug where switching
// from a Claude model to a GPT model mid-session would miss the CUSTOM_LOADER.
const responsesApiBaseUrl = proxyBaseUrl || (isLiteLLM ? baseUrl : undefined);
if (responsesApiBaseUrl && Object.keys(responsesApiModels).length > 0) {
env.OPENAI_API_KEY = 'proxy-handled';
logger.debug('[codemie-code] Enabling openai CUSTOM_LOADER for Responses API models');
}

const openCodeConfig = buildOpenCodeConfig({
proxyBaseUrl,
litellmBaseUrl: isLiteLLM ? baseUrl : undefined,
litellmApiKey: isLiteLLM ? env.CODEMIE_API_KEY : undefined,
ollamaBaseUrl, activeProvider, modelId, timeout, providerOptions, allModels
ollamaBaseUrl, activeProvider, modelId, timeout, providerOptions,
chatModels, responsesApiModels, responsesApiBaseUrl
});

// --- Hooks injection ---
Expand Down
24 changes: 24 additions & 0 deletions src/agents/plugins/opencode/node-sqlite.d.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/**
* Minimal type declarations for node:sqlite (Node.js 22.5+, experimental)
*
* node:sqlite lacks @types declarations in @types/node@20.x.
* These ambient declarations provide compile-time safety for the sqlite reader.
* At runtime, Node.js 22.5+ provides the module natively.
*/
declare module 'node:sqlite' {
interface StatementSync {
all(...params: unknown[]): Record<string, unknown>[];
}

interface DatabaseSyncOptions {
open?: boolean;
}

class DatabaseSync {
constructor(path: string, options?: DatabaseSyncOptions);
prepare(sql: string): StatementSync;
close(): void;
}

export { DatabaseSync };
}
Loading