diff --git a/bun.lock b/bun.lock
index 1d2e4462f3c4..347cdc8d330e 100644
--- a/bun.lock
+++ b/bun.lock
@@ -2149,7 +2149,7 @@
"@solidjs/router": ["@solidjs/router@0.15.4", "", { "peerDependencies": { "solid-js": "^1.8.6" } }, "sha512-WOpgg9a9T638cR+5FGbFi/IV4l2FpmBs1GpIMSPa0Ce9vyJN7Wts+X2PqMf9IYn0zUj2MlSJtm1gp7/HI/n5TQ=="],
- "@solidjs/start": ["@solidjs/start@https://pkg.pr.new/@solidjs/start@dfb2020", { "dependencies": { "@babel/core": "^7.28.3", "@babel/traverse": "^7.28.3", "@babel/types": "^7.28.5", "@solidjs/meta": "^0.29.4", "@tanstack/server-functions-plugin": "1.134.5", "@types/babel__traverse": "^7.28.0", "@types/micromatch": "^4.0.9", "cookie-es": "^2.0.0", "defu": "^6.1.4", "error-stack-parser": "^2.1.4", "es-module-lexer": "^1.7.0", "esbuild": "^0.25.3", "fast-glob": "^3.3.3", "h3": "npm:h3@2.0.1-rc.4", "html-to-image": "^1.11.13", "micromatch": "^4.0.8", "path-to-regexp": "^8.2.0", "pathe": "^2.0.3", "radix3": "^1.1.2", "seroval": "^1.3.2", "seroval-plugins": "^1.2.1", "shiki": "^1.26.1", "solid-js": "^1.9.9", "source-map-js": "^1.2.1", "srvx": "^0.9.1", "terracotta": "^1.0.6", "vite": "7.1.10", "vite-plugin-solid": "^2.11.9", "vitest": "^4.0.10" } }, "sha512-7JjjA49VGNOsMRI8QRUhVudZmv0CnJ18SliSgK1ojszs/c3ijftgVkzvXdkSLN4miDTzbkXewf65D6ZBo6W+GQ=="],
+ "@solidjs/start": ["@solidjs/start@https://pkg.pr.new/@solidjs/start@dfb2020", { "dependencies": { "@babel/core": "^7.28.3", "@babel/traverse": "^7.28.3", "@babel/types": "^7.28.5", "@solidjs/meta": "^0.29.4", "@tanstack/server-functions-plugin": "1.134.5", "@types/babel__traverse": "^7.28.0", "@types/micromatch": "^4.0.9", "cookie-es": "^2.0.0", "defu": "^6.1.4", "error-stack-parser": "^2.1.4", "es-module-lexer": "^1.7.0", "esbuild": "^0.25.3", "fast-glob": "^3.3.3", "h3": "npm:h3@2.0.1-rc.4", "html-to-image": "^1.11.13", "micromatch": "^4.0.8", "path-to-regexp": "^8.2.0", "pathe": "^2.0.3", "radix3": "^1.1.2", "seroval": "^1.3.2", "seroval-plugins": "^1.2.1", "shiki": "^1.26.1", "solid-js": "^1.9.9", "source-map-js": "^1.2.1", "srvx": "^0.9.1", "terracotta": "^1.0.6", "vite": "7.1.10", "vite-plugin-solid": "^2.11.9", "vitest": "^4.0.10" } }],
"@speed-highlight/core": ["@speed-highlight/core@1.2.15", "", {}, "sha512-BMq1K3DsElxDWawkX6eLg9+CKJrTVGCBAWVuHXVUV2u0s2711qiChLSId6ikYPfxhdYocLNt3wWwSvDiTvFabw=="],
diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
index 5288a819b3c9..1f8f99796615 100644
--- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
+++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
@@ -1302,7 +1302,9 @@ export function Prompt(props: PromptProps) {
flexDirection="row"
gap={1}
flexGrow={1}
- justifyContent={status().type === "retry" ? "space-between" : "flex-start"}
+ justifyContent={
+ status().type === "retry" || status().type === "reconnecting" ? "space-between" : "flex-start"
+ }
>
@@ -1367,6 +1369,41 @@ export function Prompt(props: PromptProps) {
)
})()}
+ {(() => {
+ const reconnecting = createMemo(() => {
+ const s = status()
+ if (s.type !== "reconnecting") return
+ return s
+ })
+ const [visible, setVisible] = createSignal(false)
+ let timer: ReturnType | undefined
+ createEffect(() => {
+ const r = reconnecting()
+ if (r) {
+ timer = setTimeout(() => setVisible(true), 1000)
+ } else {
+ clearTimeout(timer)
+ setVisible(false)
+ }
+ })
+ onCleanup(() => clearTimeout(timer))
+ const msg = createMemo(() => {
+ const r = reconnecting()
+ if (!r) return
+ if (r.message.length > 80) return r.message.slice(0, 80) + "..."
+ return r.message
+ })
+
+ return (
+
+
+
+ {msg()} [reconnecting attempt #{reconnecting()?.attempt}]
+
+
+
+ )
+ })()}
0 ? theme.primary : theme.text}>
@@ -1377,7 +1414,7 @@ export function Prompt(props: PromptProps) {
-
+
{(file) => {file()}}
diff --git a/packages/opencode/src/cli/cmd/tui/context/status-colors.ts b/packages/opencode/src/cli/cmd/tui/context/status-colors.ts
new file mode 100644
index 000000000000..6e1630a3d4ce
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/context/status-colors.ts
@@ -0,0 +1,100 @@
+/**
+ * Status Color Convention
+ *
+ * Based on ISO 3864 safety colors and WCAG accessibility standards.
+ * Each state includes: color + icon + text for accessibility.
+ *
+ * @see https://www.iso.org/standard/51000.html (ISO 3864)
+ * @see https://www.w3.org/WAI/WCAG21/Understanding/use-of-color.html (WCAG 1.4.1)
+ */
+
+import { RGBA } from "@opentui/core"
+
+export const STATUS_COLORS = {
+ running: {
+ color: "#3B82F6", // Blue
+ bg: "rgba(59, 130, 246, 0.15)",
+ icon: "◐",
+ text: "Ejecutando...",
+ description: "Task is currently executing",
+ },
+ waiting: {
+ color: "#F59E0B", // Yellow
+ bg: "rgba(245, 158, 11, 0.15)",
+ icon: "⏳",
+ text: "Esperando respuesta",
+ description: "Waiting for subagent response",
+ },
+ attention: {
+ color: "#D4652F", // Orange
+ bg: "rgba(212, 101, 47, 0.15)",
+ icon: "⚠",
+ text: "Requiere atención",
+ description: "Requires user attention",
+ },
+ error: {
+ color: "#EF4444", // Red
+ bg: "rgba(239, 68, 68, 0.15)",
+ icon: "✗",
+ text: "Error",
+ description: "An error occurred",
+ },
+ done: {
+ color: "#22C55E", // Green
+ bg: "rgba(34, 197, 94, 0.15)",
+ icon: "✓",
+ text: "Completado",
+ description: "Task completed successfully",
+ },
+ idle: {
+ color: "#6B7280", // Gray
+ bg: "rgba(107, 114, 128, 0.1)",
+ icon: "○",
+ text: "Inactivo",
+ description: "No activity",
+ },
+} as const
+
+export type StatusType = keyof typeof STATUS_COLORS
+
+/**
+ * Get RGBA from hex color for theme integration
+ */
+export function statusColorToRgba(hex: string, alpha: number = 1): RGBA {
+ return RGBA.fromHex(hex).withAlpha(alpha)
+}
+
+/**
+ * Get RGBA background color for a status
+ */
+export function statusBackground(status: StatusType): RGBA {
+ const config = STATUS_COLORS[status]
+ const rgba = RGBA.fromHex(config.color)
+ return rgba.withAlpha(0.15)
+}
+
+/**
+ * Check if a status is "active" (not idle or done)
+ */
+export function isActiveStatus(status: StatusType): boolean {
+ return status !== "idle" && status !== "done"
+}
+
+/**
+ * Get toast variant mapping for existing toast system
+ */
+export function statusToToastVariant(status: StatusType): "error" | "warning" | "info" | "success" {
+ switch (status) {
+ case "error":
+ return "error"
+ case "attention":
+ case "waiting":
+ return "warning"
+ case "done":
+ return "success"
+ case "running":
+ case "idle":
+ default:
+ return "info"
+ }
+}
diff --git a/packages/opencode/src/cli/cmd/tui/event.ts b/packages/opencode/src/cli/cmd/tui/event.ts
index ab85b1e64590..e11003bde205 100644
--- a/packages/opencode/src/cli/cmd/tui/event.ts
+++ b/packages/opencode/src/cli/cmd/tui/event.ts
@@ -35,6 +35,7 @@ export const TuiEvent = {
Schema.Struct({
title: Schema.optional(Schema.String),
message: Schema.String,
+ projectName: Schema.optional(Schema.String).annotate({ description: "Project name for multi-project context" }),
variant: Schema.Literals(["info", "success", "warning", "error"]),
duration: Schema.optional(Schema.Number).annotate({ description: "Duration in milliseconds" }),
}),
diff --git a/packages/opencode/src/cli/cmd/tui/ui/status-indicator.tsx b/packages/opencode/src/cli/cmd/tui/ui/status-indicator.tsx
new file mode 100644
index 000000000000..a71fec976e85
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/ui/status-indicator.tsx
@@ -0,0 +1,181 @@
+import { type ParentProps, Show } from "solid-js"
+import { useTheme } from "@tui/context/theme"
+import { TextAttributes } from "@opentui/core"
+import { STATUS_COLORS, type StatusType, statusColorToRgba } from "../context/status-colors"
+
+/**
+ * Status Indicator Component
+ *
+ * Displays a colored indicator with icon and text for task states.
+ * Follows WCAG 1.4.1 accessibility guidelines - color + icon + text.
+ *
+ * Usage:
+ * ```tsx
+ *
+ *
+ * ```
+ */
+export function StatusIndicator(props: ParentProps<{
+ status: StatusType
+ showLabel?: boolean
+ showIcon?: boolean
+ size?: "small" | "medium" | "large"
+}>) {
+ const { theme } = useTheme()
+
+ const config = () => STATUS_COLORS[props.status]
+ const size = () => props.size ?? "medium"
+
+ const padding = () => {
+ switch (size()) {
+ case "small":
+ return 0
+ case "large":
+ return 2
+ default:
+ return 1
+ }
+ }
+
+ const iconSize = () => {
+ switch (size()) {
+ case "small":
+ return 12
+ case "large":
+ return 16
+ default:
+ return 14
+ }
+ }
+
+ const textSize = () => {
+ switch (size()) {
+ case "small":
+ return 10
+ case "large":
+ return 14
+ default:
+ return 12
+ }
+ }
+
+ return (
+
+
+
+ {config().icon}
+
+
+
+
+
+ {config().text}
+
+
+
+ {props.children}
+
+ )
+}
+
+/**
+ * Project Status Badge
+ *
+ * Shows project name with status indicator.
+ * Useful for multi-project views.
+ */
+export function ProjectStatusBadge(props: {
+ projectName: string
+ status: StatusType
+ onClick?: () => void
+}) {
+ const { theme } = useTheme()
+ const config = () => STATUS_COLORS[props.status]
+
+ return (
+
+
+ {config().icon}
+
+
+
+ [{props.projectName}]
+
+
+
+ {config().text}
+
+
+ )
+}
+
+/**
+ * Session State Banner
+ *
+ * Full-width banner for session state changes.
+ * Appears at top of session view to indicate current state.
+ */
+export function SessionStateBanner(props: {
+ status: StatusType
+ projectName?: string
+}) {
+ const { theme } = useTheme()
+ const config = () => STATUS_COLORS[props.status]
+
+ return (
+
+
+ {config().icon} {props.projectName ? `[${props.projectName}] ` : ""}{config().text}
+
+
+ )
+}
diff --git a/packages/opencode/src/cli/cmd/tui/ui/toast.tsx b/packages/opencode/src/cli/cmd/tui/ui/toast.tsx
index 69674ba7ce61..f304dd60f76f 100644
--- a/packages/opencode/src/cli/cmd/tui/ui/toast.tsx
+++ b/packages/opencode/src/cli/cmd/tui/ui/toast.tsx
@@ -3,9 +3,10 @@ import { createStore } from "solid-js/store"
import { useTheme } from "@tui/context/theme"
import { useTerminalDimensions } from "@opentui/solid"
import { SplitBorder } from "../component/border"
-import { TextAttributes } from "@opentui/core"
+import { TextAttributes, RGBA } from "@opentui/core"
import { Schema } from "effect"
import { type TuiEvent } from "../event"
+import { STATUS_COLORS, statusToToastVariant, type StatusType } from "../context/status-colors"
export type ToastOptions = Schema.Schema.Type
@@ -16,33 +17,50 @@ export function Toast() {
return (
- {(current) => (
-
-
-
- {current().title}
+ {(current) => {
+ const variant = () => current().variant
+ const statusColor = () => {
+ const status: StatusType = statusToToastVariant(variant())
+ return STATUS_COLORS[status].color
+ }
+
+ return (
+
+
+
+ [{current().projectName}]
+
+
+
+
+ {current().title}
+
+
+
+ {current().message}
-
-
- {current().message}
-
-
- )}
+
+ )
+ }}
)
}
diff --git a/packages/opencode/src/mcp/index.ts b/packages/opencode/src/mcp/index.ts
index 533466925138..22adf73904a7 100644
--- a/packages/opencode/src/mcp/index.ts
+++ b/packages/opencode/src/mcp/index.ts
@@ -9,15 +9,14 @@ import {
type Tool as MCPToolDef,
ToolListChangedNotificationSchema,
} from "@modelcontextprotocol/sdk/types.js"
-import { Config } from "../config"
-import { ConfigMCP } from "../config/mcp"
-import { Log } from "../util"
-import { NamedError } from "@opencode-ai/core/util/error"
+import { Config } from "../config/config"
+import { Log } from "../util/log"
+import { Process } from "../util/process"
+import { NamedError } from "@opencode-ai/util/error"
import z from "zod/v4"
+import { Instance } from "../project/instance"
import { Installation } from "../installation"
-import { InstallationVersion } from "@opencode-ai/core/installation/version"
import { withTimeout } from "@/util/timeout"
-import { AppFileSystem } from "@opencode-ai/core/filesystem"
import { McpOAuthProvider } from "./oauth-provider"
import { McpOAuthCallback } from "./oauth-callback"
import { McpAuth } from "./auth"
@@ -25,246 +24,350 @@ import { BusEvent } from "../bus/bus-event"
import { Bus } from "@/bus"
import { TuiEvent } from "@/cli/cmd/tui/event"
import open from "open"
-import { Effect, Exit, Layer, Option, Context, Schema, Stream } from "effect"
-import { EffectBridge } from "@/effect"
-import { InstanceState } from "@/effect"
-import { ChildProcess, ChildProcessSpawner } from "effect/unstable/process"
-import { CrossSpawnSpawner } from "@opencode-ai/core/cross-spawn-spawner"
-import { zod as effectZod } from "@/util/effect-zod"
-import { withStatics } from "@/util/schema"
-
-const log = Log.create({ service: "mcp" })
-const DEFAULT_TIMEOUT = 30_000
-
-export const Resource = Schema.Struct({
- name: Schema.String,
- uri: Schema.String,
- description: Schema.optional(Schema.String),
- mimeType: Schema.optional(Schema.String),
- client: Schema.String,
-})
- .annotate({ identifier: "McpResource" })
- .pipe(withStatics((s) => ({ zod: effectZod(s) })))
-export type Resource = Schema.Schema.Type
-
-export const ToolsChanged = BusEvent.define(
- "mcp.tools.changed",
- Schema.Struct({
- server: Schema.String,
- }),
-)
-
-export const BrowserOpenFailed = BusEvent.define(
- "mcp.browser.open.failed",
- Schema.Struct({
- mcpName: Schema.String,
- url: Schema.String,
- }),
-)
-
-export const Failed = NamedError.create(
- "MCPFailed",
- z.object({
- name: z.string(),
- }),
-)
-
-type MCPClient = Client
-
-const StatusConnected = Schema.Struct({ status: Schema.Literal("connected") }).annotate({
- identifier: "MCPStatusConnected",
-})
-const StatusDisabled = Schema.Struct({ status: Schema.Literal("disabled") }).annotate({
- identifier: "MCPStatusDisabled",
-})
-const StatusFailed = Schema.Struct({ status: Schema.Literal("failed"), error: Schema.String }).annotate({
- identifier: "MCPStatusFailed",
-})
-const StatusNeedsAuth = Schema.Struct({ status: Schema.Literal("needs_auth") }).annotate({
- identifier: "MCPStatusNeedsAuth",
-})
-const StatusNeedsClientRegistration = Schema.Struct({
- status: Schema.Literal("needs_client_registration"),
- error: Schema.String,
-}).annotate({ identifier: "MCPStatusNeedsClientRegistration" })
-
-export const Status = Schema.Union([
- StatusConnected,
- StatusDisabled,
- StatusFailed,
- StatusNeedsAuth,
- StatusNeedsClientRegistration,
-])
- .annotate({ identifier: "MCPStatus", discriminator: "status" })
- .pipe(withStatics((s) => ({ zod: effectZod(s) })))
-export type Status = Schema.Schema.Type
-
-// Store transports for OAuth servers to allow finishing auth
-type TransportWithAuth = StreamableHTTPClientTransport | SSEClientTransport
-const pendingOAuthTransports = new Map()
-
-// Prompt cache types
-type PromptInfo = Awaited>["prompts"][number]
-type ResourceInfo = Awaited>["resources"][number]
-type McpEntry = NonNullable[string]
-
-function isMcpConfigured(entry: McpEntry): entry is ConfigMCP.Info {
- return typeof entry === "object" && entry !== null && "type" in entry
-}
-
-const sanitize = (s: string) => s.replace(/[^a-zA-Z0-9_-]/g, "_")
-
-// Convert MCP tool definition to AI SDK Tool type
-function convertMcpTool(mcpTool: MCPToolDef, client: MCPClient, timeout?: number): Tool {
- const inputSchema = mcpTool.inputSchema
- // Spread first, then override type to ensure it's always "object"
- const schema: JSONSchema7 = {
- ...(inputSchema as JSONSchema7),
- type: "object",
- properties: (inputSchema.properties ?? {}) as JSONSchema7["properties"],
- additionalProperties: false,
- }
+export namespace MCP {
+ const log = Log.create({ service: "mcp" })
+ const DEFAULT_TIMEOUT = 30_000
+
+ export const Resource = z
+ .object({
+ name: z.string(),
+ uri: z.string(),
+ description: z.string().optional(),
+ mimeType: z.string().optional(),
+ client: z.string(),
+ })
+ .meta({ ref: "McpResource" })
+ export type Resource = z.infer
- return dynamicTool({
- description: mcpTool.description ?? "",
- inputSchema: jsonSchema(schema),
- execute: async (args: unknown) => {
- return client.callTool(
- {
- name: mcpTool.name,
- arguments: (args || {}) as Record,
- },
- CallToolResultSchema,
- {
- resetTimeoutOnProgress: true,
- timeout,
- },
- )
- },
- })
-}
+ export const ToolsChanged = BusEvent.define(
+ "mcp.tools.changed",
+ z.object({
+ server: z.string(),
+ }),
+ )
-function defs(key: string, client: MCPClient, timeout?: number) {
- return Effect.tryPromise({
- try: () => withTimeout(client.listTools(), timeout ?? DEFAULT_TIMEOUT),
- catch: (err) => (err instanceof Error ? err : new Error(String(err))),
- }).pipe(
- Effect.map((result) => result.tools),
- Effect.catch((err) => {
- log.error("failed to get tools from client", { key, error: err })
- return Effect.succeed(undefined)
+ export const BrowserOpenFailed = BusEvent.define(
+ "mcp.browser.open.failed",
+ z.object({
+ mcpName: z.string(),
+ url: z.string(),
}),
)
-}
-function fetchFromClient(
- clientName: string,
- client: Client,
- listFn: (c: Client) => Promise,
- label: string,
-) {
- return Effect.tryPromise({
- try: () => listFn(client),
- catch: (e: any) => {
- log.error(`failed to get ${label}`, { clientName, error: e.message })
- return e
- },
- }).pipe(
- Effect.map((items) => {
- const out: Record = {}
- const sanitizedClient = sanitize(clientName)
- for (const item of items) {
- out[sanitizedClient + ":" + sanitize(item.name)] = { ...item, client: clientName }
- }
- return out
+ export const Failed = NamedError.create(
+ "MCPFailed",
+ z.object({
+ name: z.string(),
}),
- Effect.orElseSucceed(() => undefined),
)
-}
-interface CreateResult {
- mcpClient?: MCPClient
- status: Status
- defs?: MCPToolDef[]
-}
+ type MCPClient = Client
-interface AuthResult {
- authorizationUrl: string
- oauthState: string
- client?: MCPClient
-}
+ export const Status = z
+ .discriminatedUnion("status", [
+ z
+ .object({
+ status: z.literal("connected"),
+ })
+ .meta({
+ ref: "MCPStatusConnected",
+ }),
+ z
+ .object({
+ status: z.literal("disabled"),
+ })
+ .meta({
+ ref: "MCPStatusDisabled",
+ }),
+ z
+ .object({
+ status: z.literal("failed"),
+ error: z.string(),
+ })
+ .meta({
+ ref: "MCPStatusFailed",
+ }),
+ z
+ .object({
+ status: z.literal("needs_auth"),
+ })
+ .meta({
+ ref: "MCPStatusNeedsAuth",
+ }),
+ z
+ .object({
+ status: z.literal("needs_client_registration"),
+ error: z.string(),
+ })
+ .meta({
+ ref: "MCPStatusNeedsClientRegistration",
+ }),
+ ])
+ .meta({
+ ref: "MCPStatus",
+ })
+ export type Status = z.infer
-// --- Effect Service ---
+ // Register notification handlers for MCP client
+ function registerNotificationHandlers(client: MCPClient, serverName: string) {
+ client.setNotificationHandler(ToolListChangedNotificationSchema, async () => {
+ log.info("tools list changed notification received", { server: serverName })
+ Bus.publish(ToolsChanged, { server: serverName })
+ })
+ }
-interface State {
- status: Record
- clients: Record
- defs: Record
-}
+ function isNetworkError(err: unknown): boolean {
+ if (err instanceof UnauthorizedError) return false
+ if (!(err instanceof Error)) return false
+ if ("code" in err && typeof (err as { code: unknown }).code === "number") return false
+ const msg = err.message.toLowerCase()
+ return (
+ msg.includes("econnreset") ||
+ msg.includes("econnrefused") ||
+ msg.includes("etimedout") ||
+ msg.includes("fetch failed") ||
+ msg.includes("socket") ||
+ msg.includes("network") ||
+ msg.includes("connection")
+ )
+ }
-export interface Interface {
- readonly status: () => Effect.Effect>
- readonly clients: () => Effect.Effect>
- readonly tools: () => Effect.Effect>
- readonly prompts: () => Effect.Effect>
- readonly resources: () => Effect.Effect>
- readonly add: (name: string, mcp: ConfigMCP.Info) => Effect.Effect<{ status: Record | Status }>
- readonly connect: (name: string) => Effect.Effect
- readonly disconnect: (name: string) => Effect.Effect
- readonly getPrompt: (
- clientName: string,
- name: string,
- args?: Record,
- ) => Effect.Effect> | undefined>
- readonly readResource: (
- clientName: string,
- resourceUri: string,
- ) => Effect.Effect> | undefined>
- readonly startAuth: (mcpName: string) => Effect.Effect<{ authorizationUrl: string; oauthState: string }>
- readonly authenticate: (mcpName: string) => Effect.Effect
- readonly finishAuth: (mcpName: string, authorizationCode: string) => Effect.Effect
- readonly removeAuth: (mcpName: string) => Effect.Effect
- readonly supportsOAuth: (mcpName: string) => Effect.Effect
- readonly hasStoredTokens: (mcpName: string) => Effect.Effect
- readonly getAuthStatus: (mcpName: string) => Effect.Effect
-}
+ // Convert MCP tool definition to AI SDK Tool type
+ async function convertMcpTool(
+ mcpTool: MCPToolDef,
+ client: MCPClient,
+ timeout?: number,
+ reconnect?: () => Promise,
+ ): Promise {
+ const inputSchema = mcpTool.inputSchema
+
+ // Spread first, then override type to ensure it's always "object"
+ const schema: JSONSchema7 = {
+ ...(inputSchema as JSONSchema7),
+ type: "object",
+ properties: (inputSchema.properties ?? {}) as JSONSchema7["properties"],
+ additionalProperties: false,
+ }
-export class Service extends Context.Service()("@opencode/MCP") {}
-
-export const layer = Layer.effect(
- Service,
- Effect.gen(function* () {
- const spawner = yield* ChildProcessSpawner.ChildProcessSpawner
- const auth = yield* McpAuth.Service
- const bus = yield* Bus.Service
-
- type Transport = StdioClientTransport | StreamableHTTPClientTransport | SSEClientTransport
-
- /**
- * Connect a client via the given transport with resource safety:
- * on failure the transport is closed; on success the caller owns it.
- */
- const connectTransport = (transport: Transport, timeout: number) =>
- Effect.acquireUseRelease(
- Effect.succeed(transport),
- (t) =>
- Effect.tryPromise({
- try: () => {
- const client = new Client({ name: "opencode", version: InstallationVersion })
- return withTimeout(client.connect(t), timeout).then(() => client)
+ return dynamicTool({
+ description: mcpTool.description ?? "",
+ inputSchema: jsonSchema(schema),
+ execute: async (args: unknown) => {
+ const call = (c: MCPClient) =>
+ c.callTool(
+ {
+ name: mcpTool.name,
+ arguments: (args || {}) as Record,
+ },
+ CallToolResultSchema,
+ {
+ resetTimeoutOnProgress: true,
+ timeout,
},
- catch: (e) => (e instanceof Error ? e : new Error(String(e))),
+ )
+ if (!reconnect) return call(client)
+ try {
+ return await call(client)
+ } catch (err) {
+ if (!isNetworkError(err)) throw err
+ const fresh = await reconnect().catch(() => undefined)
+ if (!fresh) throw err
+ return call(fresh)
+ }
+ },
+ })
+ }
+
+ // Store transports for OAuth servers to allow finishing auth
+ type TransportWithAuth = StreamableHTTPClientTransport | SSEClientTransport
+ const pendingOAuthTransports = new Map()
+
+ // Prompt cache types
+ type PromptInfo = Awaited>["prompts"][number]
+
+ type ResourceInfo = Awaited>["resources"][number]
+ type McpEntry = NonNullable[string]
+ function isMcpConfigured(entry: McpEntry): entry is Config.Mcp {
+ return typeof entry === "object" && entry !== null && "type" in entry
+ }
+
+ async function descendants(pid: number): Promise {
+ if (process.platform === "win32") return []
+ const pids: number[] = []
+ const queue = [pid]
+ while (queue.length > 0) {
+ const current = queue.shift()!
+ const lines = await Process.lines(["pgrep", "-P", String(current)], { nothrow: true })
+ for (const tok of lines) {
+ const cpid = parseInt(tok, 10)
+ if (!isNaN(cpid) && !pids.includes(cpid)) {
+ pids.push(cpid)
+ queue.push(cpid)
+ }
+ }
+ }
+ return pids
+ }
+
+ const state = Instance.state(
+ async () => {
+ const cfg = await Config.get()
+ const config = cfg.mcp ?? {}
+ const clients: Record = {}
+ const status: Record = {}
+
+ await Promise.all(
+ Object.entries(config).map(async ([key, mcp]) => {
+ if (!isMcpConfigured(mcp)) {
+ log.error("Ignoring MCP config entry without type", { key })
+ return
+ }
+
+ // If disabled by config, mark as disabled without trying to connect
+ if (mcp.enabled === false) {
+ status[key] = { status: "disabled" }
+ return
+ }
+
+ const result = await create(key, mcp).catch(() => undefined)
+ if (!result) return
+
+ status[key] = result.status
+
+ if (result.mcpClient) {
+ clients[key] = result.mcpClient
+ }
+ }),
+ )
+ return {
+ status,
+ clients,
+ }
+ },
+ async (state) => {
+ // The MCP SDK only signals the direct child process on close.
+ // Servers like chrome-devtools-mcp spawn grandchild processes
+ // (e.g. Chrome) that the SDK never reaches, leaving them orphaned.
+ // Kill the full descendant tree first so the server exits promptly
+ // and no processes are left behind.
+ for (const client of Object.values(state.clients)) {
+ const pid = (client.transport as any)?.pid
+ if (typeof pid !== "number") continue
+ for (const dpid of await descendants(pid)) {
+ try {
+ process.kill(dpid, "SIGTERM")
+ } catch {}
+ }
+ }
+
+ await Promise.all(
+ Object.values(state.clients).map((client) =>
+ client.close().catch((error) => {
+ log.error("Failed to close MCP client", {
+ error,
+ })
}),
- (t, exit) => (Exit.isFailure(exit) ? Effect.tryPromise(() => t.close()).pipe(Effect.ignore) : Effect.void),
+ ),
)
+ pendingOAuthTransports.clear()
+ },
+ )
+
+ // Helper function to fetch prompts for a specific client
+ async function fetchPromptsForClient(clientName: string, client: Client) {
+ const prompts = await client.listPrompts().catch((e) => {
+ log.error("failed to get prompts", { clientName, error: e.message })
+ return undefined
+ })
+
+ if (!prompts) {
+ return
+ }
+
+ const commands: Record = {}
- const DISABLED_RESULT: CreateResult = { status: { status: "disabled" } }
+ for (const prompt of prompts.prompts) {
+ const sanitizedClientName = clientName.replace(/[^a-zA-Z0-9_-]/g, "_")
+ const sanitizedPromptName = prompt.name.replace(/[^a-zA-Z0-9_-]/g, "_")
+ const key = sanitizedClientName + ":" + sanitizedPromptName
- const connectRemote = Effect.fn("MCP.connectRemote")(function* (
- key: string,
- mcp: ConfigMCP.Info & { type: "remote" },
- ) {
+ commands[key] = { ...prompt, client: clientName }
+ }
+ return commands
+ }
+
+ async function fetchResourcesForClient(clientName: string, client: Client) {
+ const resources = await client.listResources().catch((e) => {
+ log.error("failed to get prompts", { clientName, error: e.message })
+ return undefined
+ })
+
+ if (!resources) {
+ return
+ }
+
+ const commands: Record = {}
+
+ for (const resource of resources.resources) {
+ const sanitizedClientName = clientName.replace(/[^a-zA-Z0-9_-]/g, "_")
+ const sanitizedResourceName = resource.name.replace(/[^a-zA-Z0-9_-]/g, "_")
+ const key = sanitizedClientName + ":" + sanitizedResourceName
+
+ commands[key] = { ...resource, client: clientName }
+ }
+ return commands
+ }
+
+ export async function add(name: string, mcp: Config.Mcp) {
+ const s = await state()
+ const result = await create(name, mcp)
+ if (!result) {
+ const status = {
+ status: "failed" as const,
+ error: "unknown error",
+ }
+ s.status[name] = status
+ return {
+ status,
+ }
+ }
+ if (!result.mcpClient) {
+ s.status[name] = result.status
+ return {
+ status: s.status,
+ }
+ }
+ // Close existing client if present to prevent memory leaks
+ const existingClient = s.clients[name]
+ if (existingClient) {
+ await existingClient.close().catch((error) => {
+ log.error("Failed to close existing MCP client", { name, error })
+ })
+ }
+ s.clients[name] = result.mcpClient
+ s.status[name] = result.status
+
+ return {
+ status: s.status,
+ }
+ }
+
+ async function create(key: string, mcp: Config.Mcp) {
+ if (mcp.enabled === false) {
+ log.info("mcp server disabled", { key })
+ return {
+ mcpClient: undefined,
+ status: { status: "disabled" as const },
+ }
+ }
+
+ log.info("found", { key, type: mcp.type })
+ let mcpClient: MCPClient | undefined
+ let status: Status | undefined = undefined
+
+ if (mcp.type === "remote") {
+ // OAuth is enabled by default for remote servers unless explicitly disabled with oauth: false
const oauthDisabled = mcp.oauth === false
const oauthConfig = typeof mcp.oauth === "object" ? mcp.oauth : undefined
let authProvider: McpOAuthProvider | undefined
@@ -277,14 +380,13 @@ export const layer = Layer.effect(
clientId: oauthConfig?.clientId,
clientSecret: oauthConfig?.clientSecret,
scope: oauthConfig?.scope,
- redirectUri: oauthConfig?.redirectUri,
},
{
onRedirect: async (url) => {
log.info("oauth redirect requested", { key, url: url.toString() })
+ // Store the URL - actual browser opening is handled by startAuth
},
},
- auth,
)
}
@@ -305,77 +407,78 @@ export const layer = Layer.effect(
},
]
+ let lastError: Error | undefined
const connectTimeout = mcp.timeout ?? DEFAULT_TIMEOUT
- let lastStatus: Status | undefined
-
for (const { name, transport } of transports) {
- const result = yield* connectTransport(transport, connectTimeout).pipe(
- Effect.map((client) => ({ client, transportName: name })),
- Effect.catch((error) => {
- const lastError = error instanceof Error ? error : new Error(String(error))
- const isAuthError =
- error instanceof UnauthorizedError || (authProvider && lastError.message.includes("OAuth"))
-
- if (isAuthError) {
- log.info("mcp server requires authentication", { key, transport: name })
-
- if (lastError.message.includes("registration") || lastError.message.includes("client_id")) {
- lastStatus = {
- status: "needs_client_registration" as const,
- error: "Server does not support dynamic client registration. Please provide clientId in config.",
- }
- return bus
- .publish(TuiEvent.ToastShow, {
- title: "MCP Authentication Required",
- message: `Server "${key}" requires a pre-registered client ID. Add clientId to your config.`,
- variant: "warning",
- duration: 8000,
- })
- .pipe(Effect.ignore, Effect.as(undefined))
- } else {
- pendingOAuthTransports.set(key, transport)
- lastStatus = { status: "needs_auth" as const }
- return bus
- .publish(TuiEvent.ToastShow, {
- title: "MCP Authentication Required",
- message: `Server "${key}" requires authentication. Run: opencode mcp auth ${key}`,
- variant: "warning",
- duration: 8000,
- })
- .pipe(Effect.ignore, Effect.as(undefined))
+ try {
+ const client = new Client({
+ name: "opencode",
+ version: Installation.VERSION,
+ })
+ await withTimeout(client.connect(transport), connectTimeout)
+ registerNotificationHandlers(client, key)
+ mcpClient = client
+ log.info("connected", { key, transport: name })
+ status = { status: "connected" }
+ break
+ } catch (error) {
+ lastError = error instanceof Error ? error : new Error(String(error))
+
+ // Handle OAuth-specific errors.
+ // The SDK throws UnauthorizedError when auth() returns 'REDIRECT',
+ // but may also throw plain Errors when auth() fails internally
+ // (e.g. during discovery, registration, or state generation).
+ // When an authProvider is attached, treat both cases as auth-related.
+ const isAuthError =
+ error instanceof UnauthorizedError || (authProvider && lastError.message.includes("OAuth"))
+ if (isAuthError) {
+ log.info("mcp server requires authentication", { key, transport: name })
+
+ // Check if this is a "needs registration" error
+ if (lastError.message.includes("registration") || lastError.message.includes("client_id")) {
+ status = {
+ status: "needs_client_registration" as const,
+ error: "Server does not support dynamic client registration. Please provide clientId in config.",
}
+ // Show toast for needs_client_registration
+ Bus.publish(TuiEvent.ToastShow, {
+ title: "MCP Authentication Required",
+ message: `Server "${key}" requires a pre-registered client ID. Add clientId to your config.`,
+ variant: "warning",
+ duration: 8000,
+ }).catch((e) => log.debug("failed to show toast", { error: e }))
+ } else {
+ // Store transport for later finishAuth call
+ pendingOAuthTransports.set(key, transport)
+ status = { status: "needs_auth" as const }
+ // Show toast for needs_auth
+ Bus.publish(TuiEvent.ToastShow, {
+ title: "MCP Authentication Required",
+ message: `Server "${key}" requires authentication. Run: opencode mcp auth ${key}`,
+ variant: "warning",
+ duration: 8000,
+ }).catch((e) => log.debug("failed to show toast", { error: e }))
}
+ break
+ }
- log.debug("transport connection failed", {
- key,
- transport: name,
- url: mcp.url,
- error: lastError.message,
- })
- lastStatus = { status: "failed" as const, error: lastError.message }
- return Effect.succeed(undefined)
- }),
- )
- if (result) {
- log.info("connected", { key, transport: result.transportName })
- return { client: result.client as MCPClient | undefined, status: { status: "connected" } as Status }
+ log.debug("transport connection failed", {
+ key,
+ transport: name,
+ url: mcp.url,
+ error: lastError.message,
+ })
+ status = {
+ status: "failed" as const,
+ error: lastError.message,
+ }
}
- // If this was an auth error, stop trying other transports
- if (lastStatus?.status === "needs_auth" || lastStatus?.status === "needs_client_registration") break
}
+ }
- return {
- client: undefined as MCPClient | undefined,
- status: (lastStatus ?? { status: "failed", error: "Unknown error" }) as Status,
- }
- })
-
- const connectLocal = Effect.fn("MCP.connectLocal")(function* (
- key: string,
- mcp: ConfigMCP.Info & { type: "local" },
- ) {
+ if (mcp.type === "local") {
const [cmd, ...args] = mcp.command
- const cwd = yield* InstanceState.directory
+ const cwd = Instance.directory
const transport = new StdioClientTransport({
stderr: "pipe",
command: cmd,
@@ -392,526 +495,536 @@ export const layer = Layer.effect(
})
const connectTimeout = mcp.timeout ?? DEFAULT_TIMEOUT
- return yield* connectTransport(transport, connectTimeout).pipe(
- Effect.map((client): { client: MCPClient | undefined; status: Status } => ({
- client,
- status: { status: "connected" },
- })),
- Effect.catch((error): Effect.Effect<{ client: MCPClient | undefined; status: Status }> => {
- const msg = error instanceof Error ? error.message : String(error)
- log.error("local mcp startup failed", { key, command: mcp.command, cwd, error: msg })
- return Effect.succeed({ client: undefined, status: { status: "failed", error: msg } })
- }),
- )
- })
-
- const create = Effect.fn("MCP.create")(function* (key: string, mcp: ConfigMCP.Info) {
- if (mcp.enabled === false) {
- log.info("mcp server disabled", { key })
- return DISABLED_RESULT
+ try {
+ const client = new Client({
+ name: "opencode",
+ version: Installation.VERSION,
+ })
+ await withTimeout(client.connect(transport), connectTimeout)
+ registerNotificationHandlers(client, key)
+ mcpClient = client
+ status = {
+ status: "connected",
+ }
+ } catch (error) {
+ log.error("local mcp startup failed", {
+ key,
+ command: mcp.command,
+ cwd,
+ error: error instanceof Error ? error.message : String(error),
+ })
+ status = {
+ status: "failed" as const,
+ error: error instanceof Error ? error.message : String(error),
+ }
}
+ }
- log.info("found", { key, type: mcp.type })
-
- const { client: mcpClient, status } =
- mcp.type === "remote"
- ? yield* connectRemote(key, mcp as ConfigMCP.Info & { type: "remote" })
- : yield* connectLocal(key, mcp as ConfigMCP.Info & { type: "local" })
-
- if (!mcpClient) {
- return { status } satisfies CreateResult
+ if (!status) {
+ status = {
+ status: "failed" as const,
+ error: "Unknown error",
}
+ }
- const listed = yield* defs(key, mcpClient, mcp.timeout)
- if (!listed) {
- yield* Effect.tryPromise(() => mcpClient.close()).pipe(Effect.ignore)
- return { status: { status: "failed", error: "Failed to get tools" } } satisfies CreateResult
+ if (!mcpClient) {
+ return {
+ mcpClient: undefined,
+ status,
}
+ }
- log.info("create() successfully created client", { key, toolCount: listed.length })
- return { mcpClient, status, defs: listed } satisfies CreateResult
+ const result = await withTimeout(mcpClient.listTools(), mcp.timeout ?? DEFAULT_TIMEOUT).catch((err) => {
+ log.error("failed to get tools from client", { key, error: err })
+ return undefined
})
- const cfgSvc = yield* Config.Service
-
- const descendants = Effect.fnUntraced(
- function* (pid: number) {
- if (process.platform === "win32") return [] as number[]
- const pids: number[] = []
- const queue = [pid]
- while (queue.length > 0) {
- const current = queue.shift()!
- const handle = yield* spawner.spawn(ChildProcess.make("pgrep", ["-P", String(current)], { stdin: "ignore" }))
- const text = yield* Stream.mkString(Stream.decodeText(handle.stdout))
- yield* handle.exitCode
- for (const tok of text.split("\n")) {
- const cpid = parseInt(tok, 10)
- if (!isNaN(cpid) && !pids.includes(cpid)) {
- pids.push(cpid)
- queue.push(cpid)
- }
- }
- }
- return pids
- },
- Effect.scoped,
- Effect.catch(() => Effect.succeed([] as number[])),
- )
-
- function watch(s: State, name: string, client: MCPClient, bridge: EffectBridge.Shape, timeout?: number) {
- client.setNotificationHandler(ToolListChangedNotificationSchema, async () => {
- log.info("tools list changed notification received", { server: name })
- if (s.clients[name] !== client || s.status[name]?.status !== "connected") return
-
- const listed = await bridge.promise(defs(name, client, timeout))
- if (!listed) return
- if (s.clients[name] !== client || s.status[name]?.status !== "connected") return
-
- s.defs[name] = listed
- await bridge.promise(bus.publish(ToolsChanged, { server: name }).pipe(Effect.ignore))
+ if (!result) {
+ await mcpClient.close().catch((error) => {
+ log.error("Failed to close MCP client", {
+ error,
+ })
})
+ status = {
+ status: "failed",
+ error: "Failed to get tools",
+ }
+ return {
+ mcpClient: undefined,
+ status: {
+ status: "failed" as const,
+ error: "Failed to get tools",
+ },
+ }
}
- const state = yield* InstanceState.make(
- Effect.fn("MCP.state")(function* () {
- const cfg = yield* cfgSvc.get()
- const bridge = yield* EffectBridge.make()
- const config = cfg.mcp ?? {}
- const s: State = {
- status: {},
- clients: {},
- defs: {},
- }
-
- yield* Effect.forEach(
- Object.entries(config),
- ([key, mcp]) =>
- Effect.gen(function* () {
- if (!isMcpConfigured(mcp)) {
- log.error("Ignoring MCP config entry without type", { key })
- return
- }
-
- if (mcp.enabled === false) {
- s.status[key] = { status: "disabled" }
- return
- }
+ log.info("create() successfully created client", { key, toolCount: result.tools.length })
+ return {
+ mcpClient,
+ status,
+ }
+ }
- const result = yield* create(key, mcp).pipe(Effect.catch(() => Effect.void))
- if (!result) return
+ export async function status() {
+ const s = await state()
+ const cfg = await Config.get()
+ const config = cfg.mcp ?? {}
+ const result: Record = {}
- s.status[key] = result.status
- if (result.mcpClient) {
- s.clients[key] = result.mcpClient
- s.defs[key] = result.defs!
- watch(s, key, result.mcpClient, bridge, mcp.timeout)
- }
- }),
- { concurrency: "unbounded" },
- )
+ // Include all configured MCPs from config, not just connected ones
+ for (const [key, mcp] of Object.entries(config)) {
+ if (!isMcpConfigured(mcp)) continue
+ result[key] = s.status[key] ?? { status: "disabled" }
+ }
- yield* Effect.addFinalizer(() =>
- Effect.gen(function* () {
- yield* Effect.forEach(
- Object.values(s.clients),
- (client) =>
- Effect.gen(function* () {
- const pid = client.transport instanceof StdioClientTransport ? client.transport.pid : null
- if (typeof pid === "number") {
- const pids = yield* descendants(pid)
- for (const dpid of pids) {
- try {
- process.kill(dpid, "SIGTERM")
- } catch {}
- }
- }
- yield* Effect.tryPromise(() => client.close()).pipe(Effect.ignore)
- }),
- { concurrency: "unbounded" },
- )
- pendingOAuthTransports.clear()
- }),
- )
+ return result
+ }
- return s
- }),
- )
+ export async function clients() {
+ return state().then((state) => state.clients)
+ }
- function closeClient(s: State, name: string) {
- const client = s.clients[name]
- delete s.defs[name]
- if (!client) return Effect.void
- return Effect.tryPromise(() => client.close()).pipe(Effect.ignore)
- }
-
- const storeClient = Effect.fnUntraced(function* (
- s: State,
- name: string,
- client: MCPClient,
- listed: MCPToolDef[],
- timeout?: number,
- ) {
- const bridge = yield* EffectBridge.make()
- yield* closeClient(s, name)
- s.status[name] = { status: "connected" }
- s.clients[name] = client
- s.defs[name] = listed
- watch(s, name, client, bridge, timeout)
- return s.status[name]
- })
+ export async function connect(name: string) {
+ const cfg = await Config.get()
+ const config = cfg.mcp ?? {}
+ const mcp = config[name]
+ if (!mcp) {
+ log.error("MCP config not found", { name })
+ return
+ }
- const status = Effect.fn("MCP.status")(function* () {
- const s = yield* InstanceState.get(state)
+ if (!isMcpConfigured(mcp)) {
+ log.error("Ignoring MCP connect request for config without type", { name })
+ return
+ }
- const cfg = yield* cfgSvc.get()
- const config = cfg.mcp ?? {}
- const result: Record = {}
+ const result = await create(name, { ...mcp, enabled: true })
- for (const [key, mcp] of Object.entries(config)) {
- if (!isMcpConfigured(mcp)) continue
- result[key] = s.status[key] ?? { status: "disabled" }
+ if (!result) {
+ const s = await state()
+ s.status[name] = {
+ status: "failed",
+ error: "Unknown error during connection",
}
+ return
+ }
- return result
- })
-
- const clients = Effect.fn("MCP.clients")(function* () {
- const s = yield* InstanceState.get(state)
- return s.clients
- })
+ const s = await state()
+ s.status[name] = result.status
+ if (result.mcpClient) {
+ // Close existing client if present to prevent memory leaks
+ const existingClient = s.clients[name]
+ if (existingClient) {
+ await existingClient.close().catch((error) => {
+ log.error("Failed to close existing MCP client", { name, error })
+ })
+ }
+ s.clients[name] = result.mcpClient
+ }
+ }
- const createAndStore = Effect.fn("MCP.createAndStore")(function* (name: string, mcp: ConfigMCP.Info) {
- const s = yield* InstanceState.get(state)
- const result = yield* create(name, mcp)
+ export async function disconnect(name: string) {
+ const s = await state()
+ const client = s.clients[name]
+ if (client) {
+ await client.close().catch((error) => {
+ log.error("Failed to close MCP client", { name, error })
+ })
+ delete s.clients[name]
+ }
+ s.status[name] = { status: "disabled" }
+ }
- s.status[name] = result.status
- if (!result.mcpClient) {
- yield* closeClient(s, name)
- delete s.clients[name]
- return result.status
- }
+ export async function tools() {
+ const result: Record = {}
+ const s = await state()
+ const cfg = await Config.get()
+ const config = cfg.mcp ?? {}
+ const clientsSnapshot = await clients()
+ const defaultTimeout = cfg.experimental?.mcp_timeout
- return yield* storeClient(s, name, result.mcpClient, result.defs!, mcp.timeout)
- })
+ const connectedClients = Object.entries(clientsSnapshot).filter(
+ ([clientName]) => s.status[clientName]?.status === "connected",
+ )
- const add = Effect.fn("MCP.add")(function* (name: string, mcp: ConfigMCP.Info) {
- yield* createAndStore(name, mcp)
- const s = yield* InstanceState.get(state)
- return { status: s.status }
- })
+ const toolsResults = await Promise.all(
+ connectedClients.map(async ([clientName, client]) => {
+ const toolsResult = await client.listTools().catch((e) => {
+ log.error("failed to get tools", { clientName, error: e.message })
+ const failedStatus = {
+ status: "failed" as const,
+ error: e instanceof Error ? e.message : String(e),
+ }
+ s.status[clientName] = failedStatus
+ delete s.clients[clientName]
+ return undefined
+ })
+ return { clientName, client, toolsResult }
+ }),
+ )
- const connect = Effect.fn("MCP.connect")(function* (name: string) {
- const mcp = yield* getMcpConfig(name)
- if (!mcp) {
- log.error("MCP config not found or invalid", { name })
- return
+ for (const { clientName, client, toolsResult } of toolsResults) {
+ if (!toolsResult) continue
+ const mcpConfig = config[clientName]
+ const entry = isMcpConfigured(mcpConfig) ? mcpConfig : undefined
+ const timeout = entry?.timeout ?? defaultTimeout
+ const reconnect: (() => Promise) | undefined =
+ entry && entry.type === "remote"
+ ? async () => {
+ const cur = await state()
+ const old = cur.clients[clientName]
+ if (old) {
+ await old.close().catch(() => {})
+ delete cur.clients[clientName]
+ }
+ log.info("reconnecting remote mcp server after tool call failure", { clientName })
+ const r = await create(clientName, entry).catch(() => undefined)
+ if (!r?.mcpClient) return undefined
+ cur.clients[clientName] = r.mcpClient
+ cur.status[clientName] = r.status
+ return r.mcpClient
+ }
+ : undefined
+ for (const mcpTool of toolsResult.tools) {
+ const sanitizedClientName = clientName.replace(/[^a-zA-Z0-9_-]/g, "_")
+ const sanitizedToolName = mcpTool.name.replace(/[^a-zA-Z0-9_-]/g, "_")
+ result[sanitizedClientName + "_" + sanitizedToolName] = await convertMcpTool(
+ mcpTool,
+ client,
+ timeout,
+ reconnect,
+ )
}
- yield* createAndStore(name, { ...mcp, enabled: true })
- })
+ }
+ return result
+ }
- const disconnect = Effect.fn("MCP.disconnect")(function* (name: string) {
- const s = yield* InstanceState.get(state)
- yield* closeClient(s, name)
- delete s.clients[name]
- s.status[name] = { status: "disabled" }
- })
+ export async function prompts() {
+ const s = await state()
+ const clientsSnapshot = await clients()
- const tools = Effect.fn("MCP.tools")(function* () {
- const result: Record = {}
- const s = yield* InstanceState.get(state)
+ const prompts = Object.fromEntries(
+ (
+ await Promise.all(
+ Object.entries(clientsSnapshot).map(async ([clientName, client]) => {
+ if (s.status[clientName]?.status !== "connected") {
+ return []
+ }
- const cfg = yield* cfgSvc.get()
- const config = cfg.mcp ?? {}
- const defaultTimeout = cfg.experimental?.mcp_timeout
+ return Object.entries((await fetchPromptsForClient(clientName, client)) ?? {})
+ }),
+ )
+ ).flat(),
+ )
- const connectedClients = Object.entries(s.clients).filter(
- ([clientName]) => s.status[clientName]?.status === "connected",
- )
+ return prompts
+ }
- yield* Effect.forEach(
- connectedClients,
- ([clientName, client]) =>
- Effect.gen(function* () {
- const mcpConfig = config[clientName]
- const entry = mcpConfig && isMcpConfigured(mcpConfig) ? mcpConfig : undefined
-
- const listed = s.defs[clientName]
- if (!listed) {
- log.warn("missing cached tools for connected server", { clientName })
- return
- }
+ export async function resources() {
+ const s = await state()
+ const clientsSnapshot = await clients()
- const timeout = entry?.timeout ?? defaultTimeout
- for (const mcpTool of listed) {
- result[sanitize(clientName) + "_" + sanitize(mcpTool.name)] = convertMcpTool(mcpTool, client, timeout)
+ const result = Object.fromEntries(
+ (
+ await Promise.all(
+ Object.entries(clientsSnapshot).map(async ([clientName, client]) => {
+ if (s.status[clientName]?.status !== "connected") {
+ return []
}
+
+ return Object.entries((await fetchResourcesForClient(clientName, client)) ?? {})
}),
- { concurrency: "unbounded" },
- )
- return result
- })
+ )
+ ).flat(),
+ )
- function collectFromConnected(
- s: State,
- listFn: (c: Client) => Promise,
- label: string,
- ) {
- return Effect.forEach(
- Object.entries(s.clients).filter(([name]) => s.status[name]?.status === "connected"),
- ([clientName, client]) =>
- fetchFromClient(clientName, client, listFn, label).pipe(Effect.map((items) => Object.entries(items ?? {}))),
- { concurrency: "unbounded" },
- ).pipe(Effect.map((results) => Object.fromEntries(results.flat())))
- }
-
- const prompts = Effect.fn("MCP.prompts")(function* () {
- const s = yield* InstanceState.get(state)
- return yield* collectFromConnected(s, (c) => c.listPrompts().then((r) => r.prompts), "prompts")
- })
+ return result
+ }
- const resources = Effect.fn("MCP.resources")(function* () {
- const s = yield* InstanceState.get(state)
- return yield* collectFromConnected(s, (c) => c.listResources().then((r) => r.resources), "resources")
- })
+ export async function getPrompt(clientName: string, name: string, args?: Record) {
+ const clientsSnapshot = await clients()
+ const client = clientsSnapshot[clientName]
- const withClient = Effect.fnUntraced(function* (
- clientName: string,
- fn: (client: MCPClient) => Promise,
- label: string,
- meta?: Record,
- ) {
- const s = yield* InstanceState.get(state)
- const client = s.clients[clientName]
- if (!client) {
- log.warn(`client not found for ${label}`, { clientName })
+ if (!client) {
+ log.warn("client not found for prompt", {
+ clientName,
+ })
+ return undefined
+ }
+
+ const result = await client
+ .getPrompt({
+ name: name,
+ arguments: args,
+ })
+ .catch((e) => {
+ log.error("failed to get prompt from MCP server", {
+ clientName,
+ promptName: name,
+ error: e.message,
+ })
return undefined
- }
- return yield* Effect.tryPromise({
- try: () => fn(client),
- catch: (e: any) => {
- log.error(`failed to ${label}`, { clientName, ...meta, error: e?.message })
- return e
- },
- }).pipe(Effect.orElseSucceed(() => undefined))
- })
+ })
+
+ return result
+ }
- const getPrompt = Effect.fn("MCP.getPrompt")(function* (
- clientName: string,
- name: string,
- args?: Record,
- ) {
- return yield* withClient(clientName, (client) => client.getPrompt({ name, arguments: args }), "getPrompt", {
- promptName: name,
+ export async function readResource(clientName: string, resourceUri: string) {
+ const clientsSnapshot = await clients()
+ const client = clientsSnapshot[clientName]
+
+ if (!client) {
+ log.warn("client not found for prompt", {
+ clientName: clientName,
})
- })
+ return undefined
+ }
- const readResource = Effect.fn("MCP.readResource")(function* (clientName: string, resourceUri: string) {
- return yield* withClient(clientName, (client) => client.readResource({ uri: resourceUri }), "readResource", {
- resourceUri,
+ const result = await client
+ .readResource({
+ uri: resourceUri,
+ })
+ .catch((e) => {
+ log.error("failed to get prompt from MCP server", {
+ clientName: clientName,
+ resourceUri: resourceUri,
+ error: e.message,
+ })
+ return undefined
})
- })
- const getMcpConfig = Effect.fnUntraced(function* (mcpName: string) {
- const cfg = yield* cfgSvc.get()
- const mcpConfig = cfg.mcp?.[mcpName]
- if (!mcpConfig || !isMcpConfigured(mcpConfig)) return undefined
- return mcpConfig
- })
+ return result
+ }
- const startAuth = Effect.fn("MCP.startAuth")(function* (mcpName: string) {
- const mcpConfig = yield* getMcpConfig(mcpName)
- if (!mcpConfig) throw new Error(`MCP server ${mcpName} not found or disabled`)
- if (mcpConfig.type !== "remote") throw new Error(`MCP server ${mcpName} is not a remote server`)
- if (mcpConfig.oauth === false) throw new Error(`MCP server ${mcpName} has OAuth explicitly disabled`)
-
- // OAuth config is optional - if not provided, we'll use auto-discovery
- const oauthConfig = typeof mcpConfig.oauth === "object" ? mcpConfig.oauth : undefined
-
- // Start the callback server with custom redirectUri if configured
- yield* Effect.promise(() => McpOAuthCallback.ensureRunning(oauthConfig?.redirectUri))
-
- const oauthState = Array.from(crypto.getRandomValues(new Uint8Array(32)))
- .map((b) => b.toString(16).padStart(2, "0"))
- .join("")
- yield* auth.updateOAuthState(mcpName, oauthState)
- let capturedUrl: URL | undefined
- const authProvider = new McpOAuthProvider(
- mcpName,
- mcpConfig.url,
- {
- clientId: oauthConfig?.clientId,
- clientSecret: oauthConfig?.clientSecret,
- scope: oauthConfig?.scope,
- redirectUri: oauthConfig?.redirectUri,
- },
- {
- onRedirect: async (url) => {
- capturedUrl = url
- },
- },
- auth,
- )
+ /**
+ * Start OAuth authentication flow for an MCP server.
+ * Returns the authorization URL that should be opened in a browser.
+ */
+ export async function startAuth(mcpName: string): Promise<{ authorizationUrl: string }> {
+ const cfg = await Config.get()
+ const mcpConfig = cfg.mcp?.[mcpName]
- const transport = new StreamableHTTPClientTransport(new URL(mcpConfig.url), { authProvider })
+ if (!mcpConfig) {
+ throw new Error(`MCP server not found: ${mcpName}`)
+ }
- return yield* Effect.tryPromise({
- try: () => {
- const client = new Client({ name: "opencode", version: InstallationVersion })
- return client
- .connect(transport)
- .then(() => ({ authorizationUrl: "", oauthState, client }) satisfies AuthResult)
- },
- catch: (error) => error,
- }).pipe(
- Effect.catch((error) => {
- if (error instanceof UnauthorizedError && capturedUrl) {
- pendingOAuthTransports.set(mcpName, transport)
- return Effect.succeed({ authorizationUrl: capturedUrl.toString(), oauthState } satisfies AuthResult)
- }
- return Effect.die(error)
- }),
- )
- })
+ if (!isMcpConfigured(mcpConfig)) {
+ throw new Error(`MCP server ${mcpName} is disabled or missing configuration`)
+ }
- const authenticate = Effect.fn("MCP.authenticate")(function* (mcpName: string) {
- const result = yield* startAuth(mcpName)
- if (!result.authorizationUrl) {
- const client = "client" in result ? result.client : undefined
- const mcpConfig = yield* getMcpConfig(mcpName)
- if (!mcpConfig) {
- yield* Effect.tryPromise(() => client?.close() ?? Promise.resolve()).pipe(Effect.ignore)
- return { status: "failed", error: "MCP config not found after auth" } as Status
- }
+ if (mcpConfig.type !== "remote") {
+ throw new Error(`MCP server ${mcpName} is not a remote server`)
+ }
- const listed = client ? yield* defs(mcpName, client, mcpConfig.timeout) : undefined
- if (!client || !listed) {
- yield* Effect.tryPromise(() => client?.close() ?? Promise.resolve()).pipe(Effect.ignore)
- return { status: "failed", error: "Failed to get tools" } as Status
- }
+ if (mcpConfig.oauth === false) {
+ throw new Error(`MCP server ${mcpName} has OAuth explicitly disabled`)
+ }
+
+ // Start the callback server
+ await McpOAuthCallback.ensureRunning()
+
+ // Generate and store a cryptographically secure state parameter BEFORE creating the provider
+ // The SDK will call provider.state() to read this value
+ const oauthState = Array.from(crypto.getRandomValues(new Uint8Array(32)))
+ .map((b) => b.toString(16).padStart(2, "0"))
+ .join("")
+ await McpAuth.updateOAuthState(mcpName, oauthState)
+
+ // Create a new auth provider for this flow
+ // OAuth config is optional - if not provided, we'll use auto-discovery
+ const oauthConfig = typeof mcpConfig.oauth === "object" ? mcpConfig.oauth : undefined
+ let capturedUrl: URL | undefined
+ const authProvider = new McpOAuthProvider(
+ mcpName,
+ mcpConfig.url,
+ {
+ clientId: oauthConfig?.clientId,
+ clientSecret: oauthConfig?.clientSecret,
+ scope: oauthConfig?.scope,
+ },
+ {
+ onRedirect: async (url) => {
+ capturedUrl = url
+ },
+ },
+ )
+
+ // Create transport with auth provider
+ const transport = new StreamableHTTPClientTransport(new URL(mcpConfig.url), {
+ authProvider,
+ })
- const s = yield* InstanceState.get(state)
- yield* auth.clearOAuthState(mcpName)
- return yield* storeClient(s, mcpName, client, listed, mcpConfig.timeout)
+ // Try to connect - this will trigger the OAuth flow
+ try {
+ const client = new Client({
+ name: "opencode",
+ version: Installation.VERSION,
+ })
+ await client.connect(transport)
+ // If we get here, we're already authenticated
+ return { authorizationUrl: "" }
+ } catch (error) {
+ if (error instanceof UnauthorizedError && capturedUrl) {
+ // Store transport for finishAuth
+ pendingOAuthTransports.set(mcpName, transport)
+ return { authorizationUrl: capturedUrl.toString() }
}
+ throw error
+ }
+ }
- log.info("opening browser for oauth", { mcpName, url: result.authorizationUrl, state: result.oauthState })
+ /**
+ * Complete OAuth authentication after user authorizes in browser.
+ * Opens the browser and waits for callback.
+ */
+ export async function authenticate(mcpName: string): Promise {
+ const { authorizationUrl } = await startAuth(mcpName)
+
+ if (!authorizationUrl) {
+ // Already authenticated
+ const s = await state()
+ return s.status[mcpName] ?? { status: "connected" }
+ }
- const callbackPromise = McpOAuthCallback.waitForCallback(result.oauthState, mcpName)
+ // Get the state that was already generated and stored in startAuth()
+ const oauthState = await McpAuth.getOAuthState(mcpName)
+ if (!oauthState) {
+ throw new Error("OAuth state not found - this should not happen")
+ }
- yield* Effect.tryPromise(() => open(result.authorizationUrl)).pipe(
- Effect.flatMap((subprocess) =>
- Effect.callback((resume) => {
- const timer = setTimeout(() => resume(Effect.void), 500)
- subprocess.on("error", (err) => {
- clearTimeout(timer)
- resume(Effect.fail(err))
- })
- subprocess.on("exit", (code) => {
- if (code !== null && code !== 0) {
- clearTimeout(timer)
- resume(Effect.fail(new Error(`Browser open failed with exit code ${code}`)))
- }
- })
- }),
- ),
- Effect.catch(() => {
- log.warn("failed to open browser, user must open URL manually", { mcpName })
- return bus.publish(BrowserOpenFailed, { mcpName, url: result.authorizationUrl }).pipe(Effect.ignore)
- }),
- )
+ // The SDK has already added the state parameter to the authorization URL
+ // We just need to open the browser
+ log.info("opening browser for oauth", { mcpName, url: authorizationUrl, state: oauthState })
+
+ // Register the callback BEFORE opening the browser to avoid race condition
+ // when the IdP has an active SSO session and redirects immediately
+ const callbackPromise = McpOAuthCallback.waitForCallback(oauthState)
+
+ try {
+ const subprocess = await open(authorizationUrl)
+ // The open package spawns a detached process and returns immediately.
+ // We need to listen for errors which fire asynchronously:
+ // - "error" event: command not found (ENOENT)
+ // - "exit" with non-zero code: command exists but failed (e.g., no display)
+ await new Promise((resolve, reject) => {
+ // Give the process a moment to fail if it's going to
+ const timeout = setTimeout(() => resolve(), 500)
+ subprocess.on("error", (error) => {
+ clearTimeout(timeout)
+ reject(error)
+ })
+ subprocess.on("exit", (code) => {
+ if (code !== null && code !== 0) {
+ clearTimeout(timeout)
+ reject(new Error(`Browser open failed with exit code ${code}`))
+ }
+ })
+ })
+ } catch (error) {
+ // Browser opening failed (e.g., in remote/headless sessions like SSH, devcontainers)
+ // Emit event so CLI can display the URL for manual opening
+ log.warn("failed to open browser, user must open URL manually", { mcpName, error })
+ Bus.publish(BrowserOpenFailed, { mcpName, url: authorizationUrl })
+ }
- const code = yield* Effect.promise(() => callbackPromise)
+ // Wait for callback using the already-registered promise
+ const code = await callbackPromise
- const storedState = yield* auth.getOAuthState(mcpName)
- if (storedState !== result.oauthState) {
- yield* auth.clearOAuthState(mcpName)
- throw new Error("OAuth state mismatch - potential CSRF attack")
- }
- yield* auth.clearOAuthState(mcpName)
- return yield* finishAuth(mcpName, code)
- })
+ // Validate and clear the state
+ const storedState = await McpAuth.getOAuthState(mcpName)
+ if (storedState !== oauthState) {
+ await McpAuth.clearOAuthState(mcpName)
+ throw new Error("OAuth state mismatch - potential CSRF attack")
+ }
- const finishAuth = Effect.fn("MCP.finishAuth")(function* (mcpName: string, authorizationCode: string) {
- const transport = pendingOAuthTransports.get(mcpName)
- if (!transport) throw new Error(`No pending OAuth flow for MCP server: ${mcpName}`)
+ await McpAuth.clearOAuthState(mcpName)
- const result = yield* Effect.tryPromise({
- try: () => transport.finishAuth(authorizationCode).then(() => true as const),
- catch: (error) => {
- log.error("failed to finish oauth", { mcpName, error })
- return error
- },
- }).pipe(Effect.option)
+ // Finish auth
+ return finishAuth(mcpName, code)
+ }
- if (Option.isNone(result)) {
- return { status: "failed", error: "OAuth completion failed" } as Status
- }
+ /**
+ * Complete OAuth authentication with the authorization code.
+ */
+ export async function finishAuth(mcpName: string, authorizationCode: string): Promise {
+ const transport = pendingOAuthTransports.get(mcpName)
- yield* auth.clearCodeVerifier(mcpName)
- pendingOAuthTransports.delete(mcpName)
+ if (!transport) {
+ throw new Error(`No pending OAuth flow for MCP server: ${mcpName}`)
+ }
- const mcpConfig = yield* getMcpConfig(mcpName)
- if (!mcpConfig) return { status: "failed", error: "MCP config not found after auth" } as Status
+ try {
+ // Call finishAuth on the transport
+ await transport.finishAuth(authorizationCode)
- return yield* createAndStore(mcpName, mcpConfig)
- })
+ // Clear the code verifier after successful auth
+ await McpAuth.clearCodeVerifier(mcpName)
- const removeAuth = Effect.fn("MCP.removeAuth")(function* (mcpName: string) {
- yield* auth.remove(mcpName)
- McpOAuthCallback.cancelPending(mcpName)
- pendingOAuthTransports.delete(mcpName)
- log.info("removed oauth credentials", { mcpName })
- })
+ // Now try to reconnect
+ const cfg = await Config.get()
+ const mcpConfig = cfg.mcp?.[mcpName]
- const supportsOAuth = Effect.fn("MCP.supportsOAuth")(function* (mcpName: string) {
- const mcpConfig = yield* getMcpConfig(mcpName)
- if (!mcpConfig) return false
- return mcpConfig.type === "remote" && mcpConfig.oauth !== false
- })
+ if (!mcpConfig) {
+ throw new Error(`MCP server not found: ${mcpName}`)
+ }
- const hasStoredTokens = Effect.fn("MCP.hasStoredTokens")(function* (mcpName: string) {
- const entry = yield* auth.get(mcpName)
- return !!entry?.tokens
- })
+ if (!isMcpConfigured(mcpConfig)) {
+ throw new Error(`MCP server ${mcpName} is disabled or missing configuration`)
+ }
- const getAuthStatus = Effect.fn("MCP.getAuthStatus")(function* (mcpName: string) {
- const entry = yield* auth.get(mcpName)
- if (!entry?.tokens) return "not_authenticated" as AuthStatus
- const expired = yield* auth.isTokenExpired(mcpName)
- return (expired ? "expired" : "authenticated") as AuthStatus
- })
+ // Re-add the MCP server to establish connection
+ pendingOAuthTransports.delete(mcpName)
+ const result = await add(mcpName, mcpConfig)
- return Service.of({
- status,
- clients,
- tools,
- prompts,
- resources,
- add,
- connect,
- disconnect,
- getPrompt,
- readResource,
- startAuth,
- authenticate,
- finishAuth,
- removeAuth,
- supportsOAuth,
- hasStoredTokens,
- getAuthStatus,
- })
- }),
-)
+ const statusRecord = result.status as Record
+ return statusRecord[mcpName] ?? { status: "failed", error: "Unknown error after auth" }
+ } catch (error) {
+ log.error("failed to finish oauth", { mcpName, error })
+ return {
+ status: "failed",
+ error: error instanceof Error ? error.message : String(error),
+ }
+ }
+ }
-export type AuthStatus = "authenticated" | "expired" | "not_authenticated"
+ /**
+ * Remove OAuth credentials for an MCP server.
+ */
+ export async function removeAuth(mcpName: string): Promise {
+ await McpAuth.remove(mcpName)
+ McpOAuthCallback.cancelPending(mcpName)
+ pendingOAuthTransports.delete(mcpName)
+ await McpAuth.clearOAuthState(mcpName)
+ log.info("removed oauth credentials", { mcpName })
+ }
-// --- Per-service runtime ---
+ /**
+ * Check if an MCP server supports OAuth (remote servers support OAuth by default unless explicitly disabled).
+ */
+ export async function supportsOAuth(mcpName: string): Promise {
+ const cfg = await Config.get()
+ const mcpConfig = cfg.mcp?.[mcpName]
+ if (!mcpConfig) return false
+ if (!isMcpConfigured(mcpConfig)) return false
+ return mcpConfig.type === "remote" && mcpConfig.oauth !== false
+ }
-export const defaultLayer = layer.pipe(
- Layer.provide(McpAuth.layer),
- Layer.provide(Bus.layer),
- Layer.provide(Config.defaultLayer),
- Layer.provide(CrossSpawnSpawner.defaultLayer),
- Layer.provide(AppFileSystem.defaultLayer),
-)
+ /**
+ * Check if an MCP server has stored OAuth tokens.
+ */
+ export async function hasStoredTokens(mcpName: string): Promise {
+ const entry = await McpAuth.get(mcpName)
+ return !!entry?.tokens
+ }
+
+ export type AuthStatus = "authenticated" | "expired" | "not_authenticated"
-export * as MCP from "."
+ /**
+ * Get the authentication status for an MCP server.
+ */
+ export async function getAuthStatus(mcpName: string): Promise {
+ const hasTokens = await hasStoredTokens(mcpName)
+ if (!hasTokens) return "not_authenticated"
+ const expired = await McpAuth.isTokenExpired(mcpName)
+ return expired ? "expired" : "authenticated"
+ }
+}
diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts
index 9aa1b6304c12..1fe16c606999 100644
--- a/packages/opencode/src/provider/provider.ts
+++ b/packages/opencode/src/provider/provider.ts
@@ -1,166 +1,176 @@
+import z from "zod"
import os from "os"
import fuzzysort from "fuzzysort"
-import { Config } from "../config"
+import { Config } from "../config/config"
import { mapValues, mergeDeep, omit, pickBy, sortBy } from "remeda"
import { NoSuchModelError, type Provider as SDK } from "ai"
-import { Log } from "../util"
-import { Npm } from "@opencode-ai/core/npm"
-import { Hash } from "@opencode-ai/core/util/hash"
+import { Log } from "../util/log"
+import { BunProc } from "../bun"
+import { Hash } from "../util/hash"
import { Plugin } from "../plugin"
-import { type LanguageModelV3 } from "@ai-sdk/provider"
-import * as ModelsDev from "./models"
+import { NamedError } from "@opencode-ai/util/error"
+import { ModelsDev } from "./models"
import { Auth } from "../auth"
import { Env } from "../env"
-import { InstallationVersion } from "@opencode-ai/core/installation/version"
-import { Flag } from "@opencode-ai/core/flag/flag"
-import { zod } from "@/util/effect-zod"
-import { namedSchemaError } from "@/util/named-schema-error"
+import { Instance } from "../project/instance"
+import { Flag } from "../flag/flag"
import { iife } from "@/util/iife"
-import { Global } from "@opencode-ai/core/global"
+import { Global } from "../global"
import path from "path"
-import { pathToFileURL } from "url"
-import { Effect, Layer, Context, Schema, Types } from "effect"
-import { EffectBridge } from "@/effect"
-import { InstanceState } from "@/effect"
-import { AppFileSystem } from "@opencode-ai/core/filesystem"
-import { isRecord } from "@/util/record"
-import { withStatics } from "@/util/schema"
-
-import * as ProviderTransform from "./transform"
+import { Filesystem } from "../util/filesystem"
+
+// Direct imports for bundled providers
+import { createAmazonBedrock, type AmazonBedrockProviderSettings } from "@ai-sdk/amazon-bedrock"
+import { createAnthropic } from "@ai-sdk/anthropic"
+import { createAzure } from "@ai-sdk/azure"
+import { createGoogleGenerativeAI } from "@ai-sdk/google"
+import { createVertex } from "@ai-sdk/google-vertex"
+import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
+import { createOpenAI } from "@ai-sdk/openai"
+import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
+import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
+import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
+import { createXai } from "@ai-sdk/xai"
+import { createMistral } from "@ai-sdk/mistral"
+import { createGroq } from "@ai-sdk/groq"
+import { createDeepInfra } from "@ai-sdk/deepinfra"
+import { createCerebras } from "@ai-sdk/cerebras"
+import { createCohere } from "@ai-sdk/cohere"
+import { createGateway } from "@ai-sdk/gateway"
+import { createTogetherAI } from "@ai-sdk/togetherai"
+import { createPerplexity } from "@ai-sdk/perplexity"
+import { createVercel } from "@ai-sdk/vercel"
+import {
+ createGitLab,
+ VERSION as GITLAB_PROVIDER_VERSION,
+ isWorkflowModel,
+ discoverWorkflowModels,
+} from "gitlab-ai-provider"
+import { fromNodeProviderChain } from "@aws-sdk/credential-providers"
+import { GoogleAuth } from "google-auth-library"
+import { ProviderTransform } from "./transform"
+import { Installation } from "../installation"
import { ModelID, ProviderID } from "./schema"
-const log = Log.create({ service: "provider" })
+export namespace Provider {
+ const log = Log.create({ service: "provider" })
-function shouldUseCopilotResponsesApi(modelID: string): boolean {
- const match = /^gpt-(\d+)/.exec(modelID)
- if (!match) return false
- return Number(match[1]) >= 5 && !modelID.startsWith("gpt-5-mini")
-}
+ function shouldUseCopilotResponsesApi(modelID: string): boolean {
+ const match = /^gpt-(\d+)/.exec(modelID)
+ if (!match) return false
+ return Number(match[1]) >= 5 && !modelID.startsWith("gpt-5-mini")
+ }
-function wrapSSE(res: Response, ms: number, ctl: AbortController) {
- if (typeof ms !== "number" || ms <= 0) return res
- if (!res.body) return res
- if (!res.headers.get("content-type")?.includes("text/event-stream")) return res
-
- const reader = res.body.getReader()
- const body = new ReadableStream({
- async pull(ctrl) {
- const part = await new Promise>>((resolve, reject) => {
- const id = setTimeout(() => {
- const err = new Error("SSE read timed out")
- ctl.abort(err)
- void reader.cancel(err)
- reject(err)
- }, ms)
-
- reader.read().then(
- (part) => {
- clearTimeout(id)
- resolve(part)
- },
- (err) => {
- clearTimeout(id)
+ function wrapSSE(res: Response, ms: number, ctl: AbortController) {
+ if (typeof ms !== "number" || ms <= 0) return res
+ if (!res.body) return res
+ if (!res.headers.get("content-type")?.includes("text/event-stream")) return res
+
+ const reader = res.body.getReader()
+ const body = new ReadableStream({
+ async pull(ctrl) {
+ const part = await new Promise>>((resolve, reject) => {
+ const id = setTimeout(() => {
+ const err = new Error("SSE read timed out")
+ ctl.abort(err)
+ void reader.cancel(err)
reject(err)
- },
- )
- })
-
- if (part.done) {
- ctrl.close()
- return
- }
-
- ctrl.enqueue(part.value)
- },
- async cancel(reason) {
- ctl.abort(reason)
- await reader.cancel(reason)
- },
- })
+ }, ms)
+
+ reader.read().then(
+ (part) => {
+ clearTimeout(id)
+ resolve(part)
+ },
+ (err) => {
+ clearTimeout(id)
+ reject(err)
+ },
+ )
+ })
- return new Response(body, {
- headers: new Headers(res.headers),
- status: res.status,
- statusText: res.statusText,
- })
-}
+ if (part.done) {
+ ctrl.close()
+ return
+ }
-type BundledSDK = {
- languageModel(modelId: string): LanguageModelV3
-}
+ ctrl.enqueue(part.value)
+ },
+ async cancel(reason) {
+ ctl.abort(reason)
+ await reader.cancel(reason)
+ },
+ })
-const BUNDLED_PROVIDERS: Record Promise<(opts: any) => BundledSDK>> = {
- "@ai-sdk/amazon-bedrock": () => import("@ai-sdk/amazon-bedrock").then((m) => m.createAmazonBedrock),
- "@ai-sdk/anthropic": () => import("@ai-sdk/anthropic").then((m) => m.createAnthropic),
- "@ai-sdk/azure": () => import("@ai-sdk/azure").then((m) => m.createAzure),
- "@ai-sdk/google": () => import("@ai-sdk/google").then((m) => m.createGoogleGenerativeAI),
- "@ai-sdk/google-vertex": () => import("@ai-sdk/google-vertex").then((m) => m.createVertex),
- "@ai-sdk/google-vertex/anthropic": () =>
- import("@ai-sdk/google-vertex/anthropic").then((m) => m.createVertexAnthropic),
- "@ai-sdk/openai": () => import("@ai-sdk/openai").then((m) => m.createOpenAI),
- "@ai-sdk/openai-compatible": () => import("@ai-sdk/openai-compatible").then((m) => m.createOpenAICompatible),
- "@openrouter/ai-sdk-provider": () => import("@openrouter/ai-sdk-provider").then((m) => m.createOpenRouter),
- "@ai-sdk/xai": () => import("@ai-sdk/xai").then((m) => m.createXai),
- "@ai-sdk/mistral": () => import("@ai-sdk/mistral").then((m) => m.createMistral),
- "@ai-sdk/groq": () => import("@ai-sdk/groq").then((m) => m.createGroq),
- "@ai-sdk/deepinfra": () => import("@ai-sdk/deepinfra").then((m) => m.createDeepInfra),
- "@ai-sdk/cerebras": () => import("@ai-sdk/cerebras").then((m) => m.createCerebras),
- "@ai-sdk/cohere": () => import("@ai-sdk/cohere").then((m) => m.createCohere),
- "@ai-sdk/gateway": () => import("@ai-sdk/gateway").then((m) => m.createGateway),
- "@ai-sdk/togetherai": () => import("@ai-sdk/togetherai").then((m) => m.createTogetherAI),
- "@ai-sdk/perplexity": () => import("@ai-sdk/perplexity").then((m) => m.createPerplexity),
- "@ai-sdk/vercel": () => import("@ai-sdk/vercel").then((m) => m.createVercel),
- "@ai-sdk/alibaba": () => import("@ai-sdk/alibaba").then((m) => m.createAlibaba),
- "gitlab-ai-provider": () => import("gitlab-ai-provider").then((m) => m.createGitLab),
- "@ai-sdk/github-copilot": () => import("./sdk/copilot").then((m) => m.createOpenaiCompatible),
- "venice-ai-sdk-provider": () => import("venice-ai-sdk-provider").then((m) => m.createVenice),
-}
+ return new Response(body, {
+ headers: new Headers(res.headers),
+ status: res.status,
+ statusText: res.statusText,
+ })
+ }
-type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise
-type CustomVarsLoader = (options: Record) => Record
-type CustomDiscoverModels = () => Promise>
-type CustomLoader = (provider: Info) => Effect.Effect<{
- autoload: boolean
- getModel?: CustomModelLoader
- vars?: CustomVarsLoader
- options?: Record
- discoverModels?: CustomDiscoverModels
-}>
-
-type CustomDep = {
- auth: (id: string) => Effect.Effect
- config: () => Effect.Effect
- env: () => Effect.Effect>
- get: (key: string) => Effect.Effect
-}
+ const BUNDLED_PROVIDERS: Record SDK> = {
+ "@ai-sdk/amazon-bedrock": createAmazonBedrock,
+ "@ai-sdk/anthropic": createAnthropic,
+ "@ai-sdk/azure": createAzure,
+ "@ai-sdk/google": createGoogleGenerativeAI,
+ "@ai-sdk/google-vertex": createVertex,
+ "@ai-sdk/google-vertex/anthropic": createVertexAnthropic,
+ "@ai-sdk/openai": createOpenAI,
+ "@ai-sdk/openai-compatible": createOpenAICompatible,
+ "@openrouter/ai-sdk-provider": createOpenRouter,
+ "@ai-sdk/xai": createXai,
+ "@ai-sdk/mistral": createMistral,
+ "@ai-sdk/groq": createGroq,
+ "@ai-sdk/deepinfra": createDeepInfra,
+ "@ai-sdk/cerebras": createCerebras,
+ "@ai-sdk/cohere": createCohere,
+ "@ai-sdk/gateway": createGateway,
+ "@ai-sdk/togetherai": createTogetherAI,
+ "@ai-sdk/perplexity": createPerplexity,
+ "@ai-sdk/vercel": createVercel,
+ "gitlab-ai-provider": createGitLab,
+ // @ts-ignore (TODO: kill this code so we dont have to maintain it)
+ "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible,
+ }
-function useLanguageModel(sdk: any) {
- return sdk.responses === undefined && sdk.chat === undefined
-}
+ type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise
+ type CustomVarsLoader = (options: Record) => Record
+ type CustomDiscoverModels = () => Promise>
+ type CustomLoader = (provider: Info) => Promise<{
+ autoload: boolean
+ getModel?: CustomModelLoader
+ vars?: CustomVarsLoader
+ options?: Record
+ discoverModels?: CustomDiscoverModels
+ }>
+
+ function useLanguageModel(sdk: any) {
+ return sdk.responses === undefined && sdk.chat === undefined
+ }
-function custom(dep: CustomDep): Record {
- return {
- anthropic: () =>
- Effect.succeed({
+ const CUSTOM_LOADERS: Record = {
+ async anthropic() {
+ return {
autoload: false,
options: {
headers: {
"anthropic-beta": "interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14",
},
},
- }),
- opencode: Effect.fnUntraced(function* (input: Info) {
- const env = yield* dep.env()
- const hasKey = iife(() => {
+ }
+ },
+ async opencode(input) {
+ const hasKey = await (async () => {
+ const env = Env.all()
if (input.env.some((item) => env[item])) return true
+ if (await Auth.get(input.id)) return true
+ const config = await Config.get()
+ if (config.provider?.["opencode"]?.options?.apiKey) return true
return false
- })
- const ok =
- hasKey ||
- Boolean(yield* dep.auth(input.id)) ||
- Boolean((yield* dep.config()).provider?.["opencode"]?.options?.apiKey)
+ })()
- if (!ok) {
+ if (!hasKey) {
for (const [key, value] of Object.entries(input.models)) {
if (value.cost.input === 0) continue
delete input.models[key]
@@ -169,40 +179,42 @@ function custom(dep: CustomDep): Record {
return {
autoload: Object.keys(input.models).length > 0,
- options: ok ? {} : { apiKey: "public" },
+ options: hasKey ? {} : { apiKey: "public" },
}
- }),
- openai: () =>
- Effect.succeed({
+ },
+ openai: async () => {
+ return {
autoload: false,
async getModel(sdk: any, modelID: string, _options?: Record) {
return sdk.responses(modelID)
},
options: {},
- }),
- xai: () =>
- Effect.succeed({
+ }
+ },
+ xai: async () => {
+ return {
autoload: false,
async getModel(sdk: any, modelID: string, _options?: Record) {
return sdk.responses(modelID)
},
options: {},
- }),
- "github-copilot": () =>
- Effect.succeed({
+ }
+ },
+ "github-copilot": async () => {
+ return {
autoload: false,
async getModel(sdk: any, modelID: string, _options?: Record) {
if (useLanguageModel(sdk)) return sdk.languageModel(modelID)
return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID)
},
options: {},
- }),
- azure: Effect.fnUntraced(function* (provider: Info) {
- const env = yield* dep.env()
+ }
+ },
+ azure: async (provider) => {
const resource = iife(() => {
const name = provider.options?.resourceName
if (typeof name === "string" && name.trim() !== "") return name
- return env["AZURE_RESOURCE_NAME"]
+ return Env.get("AZURE_RESOURCE_NAME")
})
return {
@@ -222,9 +234,9 @@ function custom(dep: CustomDep): Record {
}
},
}
- }),
- "azure-cognitive-services": Effect.fnUntraced(function* () {
- const resourceName = yield* dep.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME")
+ },
+ "azure-cognitive-services": async () => {
+ const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME")
return {
autoload: false,
async getModel(sdk: any, modelID: string, options?: Record) {
@@ -239,23 +251,24 @@ function custom(dep: CustomDep): Record {
baseURL: resourceName ? `https://${resourceName}.cognitiveservices.azure.com/openai` : undefined,
},
}
- }),
- "amazon-bedrock": Effect.fnUntraced(function* () {
- const providerConfig = (yield* dep.config()).provider?.["amazon-bedrock"]
- const auth = yield* dep.auth("amazon-bedrock")
- const env = yield* dep.env()
+ },
+ "amazon-bedrock": async () => {
+ const config = await Config.get()
+ const providerConfig = config.provider?.["amazon-bedrock"]
+
+ const auth = await Auth.get("amazon-bedrock")
// Region precedence: 1) config file, 2) env var, 3) default
const configRegion = providerConfig?.options?.region
- const envRegion = env["AWS_REGION"]
+ const envRegion = Env.get("AWS_REGION")
const defaultRegion = configRegion ?? envRegion ?? "us-east-1"
// Profile: config file takes precedence over env var
const configProfile = providerConfig?.options?.profile
- const envProfile = env["AWS_PROFILE"]
+ const envProfile = Env.get("AWS_PROFILE")
const profile = configProfile ?? envProfile
- const awsAccessKeyId = env["AWS_ACCESS_KEY_ID"]
+ const awsAccessKeyId = Env.get("AWS_ACCESS_KEY_ID")
// TODO: Using process.env directly because Env.set only updates a process.env shallow copy,
// until the scope of the Env API is clarified (test only or runtime?)
@@ -269,7 +282,7 @@ function custom(dep: CustomDep): Record {
return undefined
})
- const awsWebIdentityTokenFile = env["AWS_WEB_IDENTITY_TOKEN_FILE"]
+ const awsWebIdentityTokenFile = Env.get("AWS_WEB_IDENTITY_TOKEN_FILE")
const containerCreds = Boolean(
process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI || process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI,
@@ -278,9 +291,7 @@ function custom(dep: CustomDep): Record {
if (!profile && !awsAccessKeyId && !awsBearerToken && !awsWebIdentityTokenFile && !containerCreds)
return { autoload: false }
- const { fromNodeProviderChain } = yield* Effect.promise(() => import("@aws-sdk/credential-providers"))
-
- const providerOptions: Record = {
+ const providerOptions: AmazonBedrockProviderSettings = {
region: defaultRegion,
}
@@ -388,30 +399,9 @@ function custom(dep: CustomDep): Record {
return sdk.languageModel(modelID)
},
}
- }),
- llmgateway: () =>
- Effect.succeed({
- autoload: false,
- options: {
- headers: {
- "HTTP-Referer": "https://opencode.ai/",
- "X-Title": "opencode",
- "X-Source": "opencode",
- },
- },
- }),
- openrouter: () =>
- Effect.succeed({
- autoload: false,
- options: {
- headers: {
- "HTTP-Referer": "https://opencode.ai/",
- "X-Title": "opencode",
- },
- },
- }),
- nvidia: () =>
- Effect.succeed({
+ },
+ openrouter: async () => {
+ return {
autoload: false,
options: {
headers: {
@@ -419,9 +409,10 @@ function custom(dep: CustomDep): Record {
"X-Title": "opencode",
},
},
- }),
- vercel: () =>
- Effect.succeed({
+ }
+ },
+ vercel: async () => {
+ return {
autoload: false,
options: {
headers: {
@@ -429,17 +420,20 @@ function custom(dep: CustomDep): Record {
"x-title": "opencode",
},
},
- }),
- "google-vertex": Effect.fnUntraced(function* (provider: Info) {
- const env = yield* dep.env()
+ }
+ },
+ "google-vertex": async (provider) => {
const project =
- provider.options?.project ?? env["GOOGLE_CLOUD_PROJECT"] ?? env["GCP_PROJECT"] ?? env["GCLOUD_PROJECT"]
+ provider.options?.project ??
+ Env.get("GOOGLE_CLOUD_PROJECT") ??
+ Env.get("GCP_PROJECT") ??
+ Env.get("GCLOUD_PROJECT")
const location = String(
provider.options?.location ??
- env["GOOGLE_VERTEX_LOCATION"] ??
- env["GOOGLE_CLOUD_LOCATION"] ??
- env["VERTEX_LOCATION"] ??
+ Env.get("GOOGLE_VERTEX_LOCATION") ??
+ Env.get("GOOGLE_CLOUD_LOCATION") ??
+ Env.get("VERTEX_LOCATION") ??
"us-central1",
)
@@ -459,7 +453,6 @@ function custom(dep: CustomDep): Record {
project,
location,
fetch: async (input: RequestInfo | URL, init?: RequestInit) => {
- const { GoogleAuth } = await import("google-auth-library")
const auth = new GoogleAuth()
const client = await auth.getApplicationDefault()
const token = await client.credential.getAccessToken()
@@ -475,11 +468,10 @@ function custom(dep: CustomDep): Record {
return sdk.languageModel(id)
},
}
- }),
- "google-vertex-anthropic": Effect.fnUntraced(function* () {
- const env = yield* dep.env()
- const project = env["GOOGLE_CLOUD_PROJECT"] ?? env["GCP_PROJECT"] ?? env["GCLOUD_PROJECT"]
- const location = env["GOOGLE_CLOUD_LOCATION"] ?? env["VERTEX_LOCATION"] ?? "global"
+ },
+ "google-vertex-anthropic": async () => {
+ const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT")
+ const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "global"
const autoload = Boolean(project)
if (!autoload) return { autoload: false }
return {
@@ -493,9 +485,9 @@ function custom(dep: CustomDep): Record {
return sdk.languageModel(id)
},
}
- }),
- "sap-ai-core": Effect.fnUntraced(function* () {
- const auth = yield* dep.auth("sap-ai-core")
+ },
+ "sap-ai-core": async () => {
+ const auth = await Auth.get("sap-ai-core")
// TODO: Using process.env directly because Env.set only updates a shallow copy (not process.env),
// until the scope of the Env API is clarified (test only or runtime?)
const envServiceKey = iife(() => {
@@ -517,9 +509,9 @@ function custom(dep: CustomDep): Record {
return sdk(modelID)
},
}
- }),
- zenmux: () =>
- Effect.succeed({
+ },
+ zenmux: async () => {
+ return {
autoload: false,
options: {
headers: {
@@ -527,57 +519,48 @@ function custom(dep: CustomDep): Record {
"X-Title": "opencode",
},
},
- }),
- gitlab: Effect.fnUntraced(function* (input: Info) {
- const {
- VERSION: GITLAB_PROVIDER_VERSION,
- isWorkflowModel,
- discoverWorkflowModels,
- } = yield* Effect.promise(() => import("gitlab-ai-provider"))
-
- const instanceUrl = (yield* dep.get("GITLAB_INSTANCE_URL")) || "https://gitlab.com"
+ }
+ },
+ gitlab: async (input) => {
+ const instanceUrl = Env.get("GITLAB_INSTANCE_URL") || "https://gitlab.com"
- const auth = yield* dep.auth(input.id)
- const apiKey = yield* Effect.sync(() => {
+ const auth = await Auth.get(input.id)
+ const apiKey = await (async () => {
if (auth?.type === "oauth") return auth.access
if (auth?.type === "api") return auth.key
- return undefined
- })
- const token = apiKey ?? (yield* dep.get("GITLAB_TOKEN"))
+ return Env.get("GITLAB_TOKEN")
+ })()
- const providerConfig = (yield* dep.config()).provider?.["gitlab"]
- const directory = yield* InstanceState.directory
+ const config = await Config.get()
+ const providerConfig = config.provider?.["gitlab"]
const aiGatewayHeaders = {
- "User-Agent": `opencode/${InstallationVersion} gitlab-ai-provider/${GITLAB_PROVIDER_VERSION} (${os.platform()} ${os.release()}; ${os.arch()})`,
+ "User-Agent": `opencode/${Installation.VERSION} gitlab-ai-provider/${GITLAB_PROVIDER_VERSION} (${os.platform()} ${os.release()}; ${os.arch()})`,
"anthropic-beta": "context-1m-2025-08-07",
- ...providerConfig?.options?.aiGatewayHeaders,
+ ...(providerConfig?.options?.aiGatewayHeaders || {}),
}
const featureFlags = {
duo_agent_platform_agentic_chat: true,
duo_agent_platform: true,
- ...providerConfig?.options?.featureFlags,
+ ...(providerConfig?.options?.featureFlags || {}),
}
return {
- autoload: !!token,
+ autoload: !!apiKey,
options: {
instanceUrl,
- apiKey: token,
+ apiKey,
aiGatewayHeaders,
featureFlags,
},
- async getModel(sdk: any, modelID: string, options?: Record) {
+ async getModel(sdk: ReturnType, modelID: string, options?: Record) {
if (modelID.startsWith("duo-workflow-")) {
- const workflowRef = typeof options?.workflowRef === "string" ? options.workflowRef : undefined
+ const workflowRef = options?.workflowRef as string | undefined
// Use the static mapping if it exists, otherwise use duo-workflow with selectedModelRef
const sdkModelID = isWorkflowModel(modelID) ? modelID : "duo-workflow"
- const workflowDefinition =
- typeof options?.workflowDefinition === "string" ? options.workflowDefinition : undefined
const model = sdk.workflowChat(sdkModelID, {
featureFlags,
- workflowDefinition,
})
if (workflowRef) {
model.selectedModelRef = workflowRef
@@ -601,16 +584,14 @@ function custom(dep: CustomDep): Record {
auth?.type === "api" ? { "PRIVATE-TOKEN": token } : { Authorization: `Bearer ${token}` }
log.info("gitlab model discovery starting", { instanceUrl })
- const result = await discoverWorkflowModels({ instanceUrl, getHeaders }, { workingDirectory: directory })
+ const result = await discoverWorkflowModels(
+ { instanceUrl, getHeaders },
+ { workingDirectory: Instance.directory },
+ )
if (!result.models.length) {
log.info("gitlab model discovery skipped: no models found", {
- project: result.project
- ? {
- id: result.project.id,
- path: result.project.pathWithNamespace,
- }
- : null,
+ project: result.project ? { id: result.project.id, path: result.project.pathWithNamespace } : null,
})
return {}
}
@@ -638,20 +619,8 @@ function custom(dep: CustomDep): Record {
reasoning: true,
attachment: true,
toolcall: true,
- input: {
- text: true,
- audio: false,
- image: true,
- video: false,
- pdf: true,
- },
- output: {
- text: true,
- audio: false,
- image: false,
- video: false,
- pdf: false,
- },
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
release_date: "",
@@ -671,28 +640,15 @@ function custom(dep: CustomDep): Record {
}
},
}
- }),
- "cloudflare-workers-ai": Effect.fnUntraced(function* (input: Info) {
- // When baseURL is already configured (e.g. corporate config routing through a proxy/gateway),
- // skip the account ID check because the URL is already fully specified.
- if (input.options?.baseURL) return { autoload: false }
-
- const auth = yield* dep.auth(input.id)
- const env = yield* dep.env()
- const accountId = env["CLOUDFLARE_ACCOUNT_ID"] || (auth?.type === "api" ? auth.metadata?.accountId : undefined)
- if (!accountId)
- return {
- autoload: false,
- async getModel() {
- throw new Error(
- "CLOUDFLARE_ACCOUNT_ID is missing. Set it with: export CLOUDFLARE_ACCOUNT_ID=",
- )
- },
- }
+ },
+ "cloudflare-workers-ai": async (input) => {
+ const accountId = Env.get("CLOUDFLARE_ACCOUNT_ID")
+ if (!accountId) return { autoload: false }
- const apiKey = yield* Effect.gen(function* () {
- const envToken = env["CLOUDFLARE_API_KEY"]
+ const apiKey = await iife(async () => {
+ const envToken = Env.get("CLOUDFLARE_API_KEY")
if (envToken) return envToken
+ const auth = await Auth.get(input.id)
if (auth?.type === "api") return auth.key
return undefined
})
@@ -701,9 +657,6 @@ function custom(dep: CustomDep): Record {
autoload: !!apiKey,
options: {
apiKey,
- headers: {
- "User-Agent": `opencode/${InstallationVersion} cloudflare-workers-ai (${os.platform()} ${os.release()}; ${os.arch()})`,
- },
},
async getModel(sdk: any, modelID: string) {
return sdk.languageModel(modelID)
@@ -714,38 +667,21 @@ function custom(dep: CustomDep): Record {
}
},
}
- }),
- "cloudflare-ai-gateway": Effect.fnUntraced(function* (input: Info) {
- // When baseURL is already configured (e.g. corporate config), skip the ID checks.
- if (input.options?.baseURL) return { autoload: false }
-
- const auth = yield* dep.auth(input.id)
- const env = yield* dep.env()
- const accountId = env["CLOUDFLARE_ACCOUNT_ID"] || (auth?.type === "api" ? auth.metadata?.accountId : undefined)
- const gateway = env["CLOUDFLARE_GATEWAY_ID"] || (auth?.type === "api" ? auth.metadata?.gatewayId : undefined)
-
- if (!accountId || !gateway) {
- const missing = [
- !accountId ? "CLOUDFLARE_ACCOUNT_ID" : undefined,
- !gateway ? "CLOUDFLARE_GATEWAY_ID" : undefined,
- ].filter((x): x is string => Boolean(x))
- return {
- autoload: false,
- async getModel() {
- throw new Error(
- `${missing.join(" and ")} missing. Set with: ${missing.map((x) => `export ${x}=`).join(" && ")}`,
- )
- },
- }
- }
+ },
+ "cloudflare-ai-gateway": async (input) => {
+ const accountId = Env.get("CLOUDFLARE_ACCOUNT_ID")
+ const gateway = Env.get("CLOUDFLARE_GATEWAY_ID")
+
+ if (!accountId || !gateway) return { autoload: false }
// Get API token from env or auth - required for authenticated gateways
- const apiToken = yield* Effect.gen(function* () {
- const envToken = env["CLOUDFLARE_API_TOKEN"] || env["CF_AIG_TOKEN"]
+ const apiToken = await (async () => {
+ const envToken = Env.get("CLOUDFLARE_API_TOKEN") || Env.get("CF_AIG_TOKEN")
if (envToken) return envToken
+ const auth = await Auth.get(input.id)
if (auth?.type === "api") return auth.key
return undefined
- })
+ })()
if (!apiToken) {
throw new Error(
@@ -755,8 +691,8 @@ function custom(dep: CustomDep): Record {
}
// Use official ai-gateway-provider package (v2.x for AI SDK v5 compatibility)
- const { createAiGateway } = yield* Effect.promise(() => import("ai-gateway-provider"))
- const { createUnified } = yield* Effect.promise(() => import("ai-gateway-provider/providers/unified"))
+ const { createAiGateway } = await import("ai-gateway-provider")
+ const { createUnified } = await import("ai-gateway-provider/providers/unified")
const metadata = iife(() => {
if (input.options?.metadata) return input.options.metadata
@@ -772,9 +708,6 @@ function custom(dep: CustomDep): Record {
cacheKey: input.options?.cacheKey,
skipCache: input.options?.skipCache,
collectLog: input.options?.collectLog,
- headers: {
- "User-Agent": `opencode/${InstallationVersion} cloudflare-ai-gateway (${os.platform()} ${os.release()}; ${os.arch()})`,
- },
}
const aigateway = createAiGateway({
@@ -793,18 +726,19 @@ function custom(dep: CustomDep): Record {
},
options: {},
}
- }),
- cerebras: () =>
- Effect.succeed({
+ },
+ cerebras: async () => {
+ return {
autoload: false,
options: {
headers: {
"X-Cerebras-3rd-Party-Integration": "opencode",
},
},
- }),
- kilo: () =>
- Effect.succeed({
+ }
+ },
+ kilo: async () => {
+ return {
autoload: false,
options: {
headers: {
@@ -812,796 +746,652 @@ function custom(dep: CustomDep): Record {
"X-Title": "opencode",
},
},
- }),
- }
-}
-
-const ProviderApiInfo = Schema.Struct({
- id: Schema.String,
- url: Schema.String,
- npm: Schema.String,
-})
-
-const ProviderModalities = Schema.Struct({
- text: Schema.Boolean,
- audio: Schema.Boolean,
- image: Schema.Boolean,
- video: Schema.Boolean,
- pdf: Schema.Boolean,
-})
-
-const ProviderInterleaved = Schema.Union([
- Schema.Boolean,
- Schema.Struct({
- field: Schema.Literals(["reasoning_content", "reasoning_details"]),
- }),
-])
-
-const ProviderCapabilities = Schema.Struct({
- temperature: Schema.Boolean,
- reasoning: Schema.Boolean,
- attachment: Schema.Boolean,
- toolcall: Schema.Boolean,
- input: ProviderModalities,
- output: ProviderModalities,
- interleaved: ProviderInterleaved,
-})
-
-const ProviderCacheCost = Schema.Struct({
- read: Schema.Number,
- write: Schema.Number,
-})
-
-const ProviderCost = Schema.Struct({
- input: Schema.Number,
- output: Schema.Number,
- cache: ProviderCacheCost,
- experimentalOver200K: Schema.optional(
- Schema.Struct({
- input: Schema.Number,
- output: Schema.Number,
- cache: ProviderCacheCost,
- }),
- ),
-})
-
-const ProviderLimit = Schema.Struct({
- context: Schema.Number,
- input: Schema.optional(Schema.Number),
- output: Schema.Number,
-})
-
-export const Model = Schema.Struct({
- id: ModelID,
- providerID: ProviderID,
- api: ProviderApiInfo,
- name: Schema.String,
- family: Schema.optional(Schema.String),
- capabilities: ProviderCapabilities,
- cost: ProviderCost,
- limit: ProviderLimit,
- status: Schema.Literals(["alpha", "beta", "deprecated", "active"]),
- options: Schema.Record(Schema.String, Schema.Any),
- headers: Schema.Record(Schema.String, Schema.String),
- release_date: Schema.String,
- variants: Schema.optional(Schema.Record(Schema.String, Schema.Record(Schema.String, Schema.Any))),
-})
- .annotate({ identifier: "Model" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type Model = Types.DeepMutable>
-
-export const Info = Schema.Struct({
- id: ProviderID,
- name: Schema.String,
- source: Schema.Literals(["env", "config", "custom", "api"]),
- env: Schema.Array(Schema.String),
- key: Schema.optional(Schema.String),
- options: Schema.Record(Schema.String, Schema.Any),
- models: Schema.Record(Schema.String, Model),
-})
- .annotate({ identifier: "Provider" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type Info = Types.DeepMutable>
-
-const DefaultModelIDs = Schema.Record(Schema.String, Schema.String)
-
-export const ListResult = Schema.Struct({
- all: Schema.Array(Info),
- default: DefaultModelIDs,
- connected: Schema.Array(Schema.String),
-}).pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ListResult = Types.DeepMutable>
-
-export const ConfigProvidersResult = Schema.Struct({
- providers: Schema.Array(Info),
- default: DefaultModelIDs,
-}).pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ConfigProvidersResult = Types.DeepMutable>
-
-export function defaultModelIDs }>(providers: Record) {
- return mapValues(providers, (item) => sort(Object.values(item.models))[0].id)
-}
-
-export interface Interface {
- readonly list: () => Effect.Effect>
- readonly getProvider: (providerID: ProviderID) => Effect.Effect
- readonly getModel: (providerID: ProviderID, modelID: ModelID) => Effect.Effect
- readonly getLanguage: (model: Model) => Effect.Effect
- readonly closest: (
- providerID: ProviderID,
- query: string[],
- ) => Effect.Effect<{ providerID: ProviderID; modelID: string } | undefined>
- readonly getSmallModel: (providerID: ProviderID) => Effect.Effect
- readonly defaultModel: () => Effect.Effect<{ providerID: ProviderID; modelID: ModelID }>
-}
-
-interface State {
- models: Map
- providers: Record
- sdk: Map
- modelLoaders: Record
- varsLoaders: Record
-}
-
-export class Service extends Context.Service()("@opencode/Provider") {}
-
-function cost(c: ModelsDev.Model["cost"]): Model["cost"] {
- const result: Model["cost"] = {
- input: c?.input ?? 0,
- output: c?.output ?? 0,
- cache: {
- read: c?.cache_read ?? 0,
- write: c?.cache_write ?? 0,
+ }
},
}
- if (c?.context_over_200k) {
- result.experimentalOver200K = {
- cache: {
- read: c.context_over_200k.cache_read ?? 0,
- write: c.context_over_200k.cache_write ?? 0,
- },
- input: c.context_over_200k.input,
- output: c.context_over_200k.output,
- }
- }
- return result
-}
-function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model {
- const base: Model = {
- id: ModelID.make(model.id),
- providerID: ProviderID.make(provider.id),
- name: model.name,
- family: model.family,
- api: {
- id: model.id,
- url: model.provider?.api ?? provider.api ?? "",
- npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible",
- },
- status: model.status ?? "active",
- headers: {},
- options: {},
- cost: cost(model.cost),
- limit: {
- context: model.limit.context,
- input: model.limit.input,
- output: model.limit.output,
- },
- capabilities: {
- temperature: model.temperature ?? false,
- reasoning: model.reasoning ?? false,
- attachment: model.attachment ?? false,
- toolcall: model.tool_call ?? true,
- input: {
- text: model.modalities?.input?.includes("text") ?? false,
- audio: model.modalities?.input?.includes("audio") ?? false,
- image: model.modalities?.input?.includes("image") ?? false,
- video: model.modalities?.input?.includes("video") ?? false,
- pdf: model.modalities?.input?.includes("pdf") ?? false,
+ export const Model = z
+ .object({
+ id: ModelID.zod,
+ providerID: ProviderID.zod,
+ api: z.object({
+ id: z.string(),
+ url: z.string(),
+ npm: z.string(),
+ }),
+ name: z.string(),
+ family: z.string().optional(),
+ capabilities: z.object({
+ temperature: z.boolean(),
+ reasoning: z.boolean(),
+ attachment: z.boolean(),
+ toolcall: z.boolean(),
+ input: z.object({
+ text: z.boolean(),
+ audio: z.boolean(),
+ image: z.boolean(),
+ video: z.boolean(),
+ pdf: z.boolean(),
+ }),
+ output: z.object({
+ text: z.boolean(),
+ audio: z.boolean(),
+ image: z.boolean(),
+ video: z.boolean(),
+ pdf: z.boolean(),
+ }),
+ interleaved: z.union([
+ z.boolean(),
+ z.object({
+ field: z.enum(["reasoning_content", "reasoning_details"]),
+ }),
+ ]),
+ }),
+ cost: z.object({
+ input: z.number(),
+ output: z.number(),
+ cache: z.object({
+ read: z.number(),
+ write: z.number(),
+ }),
+ experimentalOver200K: z
+ .object({
+ input: z.number(),
+ output: z.number(),
+ cache: z.object({
+ read: z.number(),
+ write: z.number(),
+ }),
+ })
+ .optional(),
+ }),
+ limit: z.object({
+ context: z.number(),
+ input: z.number().optional(),
+ output: z.number(),
+ }),
+ status: z.enum(["alpha", "beta", "deprecated", "active"]),
+ options: z.record(z.string(), z.any()),
+ headers: z.record(z.string(), z.string()),
+ release_date: z.string(),
+ variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
+ })
+ .meta({
+ ref: "Model",
+ })
+ export type Model = z.infer
+
+ export const Info = z
+ .object({
+ id: ProviderID.zod,
+ name: z.string(),
+ source: z.enum(["env", "config", "custom", "api"]),
+ env: z.string().array(),
+ key: z.string().optional(),
+ options: z.record(z.string(), z.any()),
+ models: z.record(z.string(), Model),
+ })
+ .meta({
+ ref: "Provider",
+ })
+ export type Info = z.infer
+
+ function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model {
+ const m: Model = {
+ id: ModelID.make(model.id),
+ providerID: ProviderID.make(provider.id),
+ name: model.name,
+ family: model.family,
+ api: {
+ id: model.id,
+ url: model.provider?.api ?? provider.api!,
+ npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible",
},
- output: {
- text: model.modalities?.output?.includes("text") ?? false,
- audio: model.modalities?.output?.includes("audio") ?? false,
- image: model.modalities?.output?.includes("image") ?? false,
- video: model.modalities?.output?.includes("video") ?? false,
- pdf: model.modalities?.output?.includes("pdf") ?? false,
+ status: model.status ?? "active",
+ headers: model.headers ?? {},
+ options: model.options ?? {},
+ cost: {
+ input: model.cost?.input ?? 0,
+ output: model.cost?.output ?? 0,
+ cache: {
+ read: model.cost?.cache_read ?? 0,
+ write: model.cost?.cache_write ?? 0,
+ },
+ experimentalOver200K: model.cost?.context_over_200k
+ ? {
+ cache: {
+ read: model.cost.context_over_200k.cache_read ?? 0,
+ write: model.cost.context_over_200k.cache_write ?? 0,
+ },
+ input: model.cost.context_over_200k.input,
+ output: model.cost.context_over_200k.output,
+ }
+ : undefined,
},
- interleaved: model.interleaved ?? false,
- },
- release_date: model.release_date ?? "",
- variants: {},
- }
+ limit: {
+ context: model.limit.context,
+ input: model.limit.input,
+ output: model.limit.output,
+ },
+ capabilities: {
+ temperature: model.temperature,
+ reasoning: model.reasoning,
+ attachment: model.attachment,
+ toolcall: model.tool_call,
+ input: {
+ text: model.modalities?.input?.includes("text") ?? false,
+ audio: model.modalities?.input?.includes("audio") ?? false,
+ image: model.modalities?.input?.includes("image") ?? false,
+ video: model.modalities?.input?.includes("video") ?? false,
+ pdf: model.modalities?.input?.includes("pdf") ?? false,
+ },
+ output: {
+ text: model.modalities?.output?.includes("text") ?? false,
+ audio: model.modalities?.output?.includes("audio") ?? false,
+ image: model.modalities?.output?.includes("image") ?? false,
+ video: model.modalities?.output?.includes("video") ?? false,
+ pdf: model.modalities?.output?.includes("pdf") ?? false,
+ },
+ interleaved: model.interleaved ?? false,
+ },
+ release_date: model.release_date,
+ variants: {},
+ }
- return {
- ...base,
- variants: mapValues(ProviderTransform.variants(base), (v) => v),
+ m.variants = mapValues(ProviderTransform.variants(m), (v) => v)
+
+ return m
}
-}
-export function fromModelsDevProvider(provider: ModelsDev.Provider): Info {
- const models: Record = {}
- for (const [key, model] of Object.entries(provider.models)) {
- models[key] = fromModelsDevModel(provider, model)
- for (const [mode, opts] of Object.entries(model.experimental?.modes ?? {})) {
- const id = `${model.id}-${mode}`
- const base = fromModelsDevModel(provider, model)
- models[id] = {
- ...base,
- id: ModelID.make(id),
- name: `${model.name} ${mode[0].toUpperCase()}${mode.slice(1)}`,
- cost: opts.cost ? mergeDeep(base.cost, cost(opts.cost)) : base.cost,
- options: opts.provider?.body
- ? Object.fromEntries(
- Object.entries(opts.provider.body).map(([k, v]) => [
- k.replace(/_([a-z])/g, (_, c) => c.toUpperCase()),
- v,
- ]),
- )
- : base.options,
- headers: opts.provider?.headers ?? base.headers,
- }
+ export function fromModelsDevProvider(provider: ModelsDev.Provider): Info {
+ return {
+ id: ProviderID.make(provider.id),
+ source: "custom",
+ name: provider.name,
+ env: provider.env ?? [],
+ options: {},
+ models: mapValues(provider.models, (model) => fromModelsDevModel(provider, model)),
}
}
- return {
- id: ProviderID.make(provider.id),
- source: "custom",
- name: provider.name,
- env: [...(provider.env ?? [])],
- options: {},
- models,
- }
-}
-const layer: Layer.Layer<
- Service,
- never,
- Config.Service | Auth.Service | Plugin.Service | AppFileSystem.Service | Env.Service
-> = Layer.effect(
- Service,
- Effect.gen(function* () {
- const fs = yield* AppFileSystem.Service
- const config = yield* Config.Service
- const auth = yield* Auth.Service
- const env = yield* Env.Service
- const plugin = yield* Plugin.Service
-
- const state = yield* InstanceState.make(() =>
- Effect.gen(function* () {
- using _ = log.time("state")
- const bridge = yield* EffectBridge.make()
- const cfg = yield* config.get()
- const modelsDev = yield* Effect.promise(() => ModelsDev.get())
- const database = mapValues(modelsDev, fromModelsDevProvider)
-
- const providers: Record = {} as Record
- const languages = new Map()
- const modelLoaders: {
- [providerID: string]: CustomModelLoader
- } = {}
- const varsLoaders: {
- [providerID: string]: CustomVarsLoader
- } = {}
- const sdk = new Map()
- const discoveryLoaders: {
- [providerID: string]: CustomDiscoverModels
- } = {}
- const dep = {
- auth: (id: string) => auth.get(id).pipe(Effect.orDie),
- config: () => config.get(),
- env: () => env.all(),
- get: (key: string) => env.get(key),
- }
+ const state = Instance.state(async () => {
+ using _ = log.time("state")
+ const config = await Config.get()
+ const modelsDev = await ModelsDev.get()
+ const database = mapValues(modelsDev, fromModelsDevProvider)
- log.info("init")
+ const disabled = new Set(config.disabled_providers ?? [])
+ const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null
- function mergeProvider(providerID: ProviderID, provider: Partial) {
- const existing = providers[providerID]
- if (existing) {
- // @ts-expect-error
- providers[providerID] = mergeDeep(existing, provider)
- return
- }
- const match = database[providerID]
- if (!match) return
- // @ts-expect-error
- providers[providerID] = mergeDeep(match, provider)
- }
+ function isProviderAllowed(providerID: ProviderID): boolean {
+ if (enabled && !enabled.has(providerID)) return false
+ if (disabled.has(providerID)) return false
+ return true
+ }
- // load plugins first so config() hook runs before reading cfg.provider
- const plugins = yield* plugin.list()
+ const providers: Record = {} as Record
+ const languages = new Map()
+ const modelLoaders: {
+ [providerID: string]: CustomModelLoader
+ } = {}
+ const varsLoaders: {
+ [providerID: string]: CustomVarsLoader
+ } = {}
+ const discoveryLoaders: {
+ [providerID: string]: CustomDiscoverModels
+ } = {}
+ const sdk = new Map()
+
+ log.info("init")
+
+ const configProviders = Object.entries(config.provider ?? {})
+
+ function mergeProvider(providerID: ProviderID, provider: Partial) {
+ const existing = providers[providerID]
+ if (existing) {
+ // @ts-expect-error
+ providers[providerID] = mergeDeep(existing, provider)
+ return
+ }
+ const match = database[providerID]
+ if (!match) return
+ // @ts-expect-error
+ providers[providerID] = mergeDeep(match, provider)
+ }
- // now read config providers - includes any modifications from plugin config() hook
- const configProviders = Object.entries(cfg.provider ?? {})
- const disabled = new Set(cfg.disabled_providers ?? [])
- const enabled = cfg.enabled_providers ? new Set(cfg.enabled_providers) : null
+ // extend database from config
+ for (const [providerID, provider] of configProviders) {
+ const existing = database[providerID]
+ const parsed: Info = {
+ id: ProviderID.make(providerID),
+ name: provider.name ?? existing?.name ?? providerID,
+ env: provider.env ?? existing?.env ?? [],
+ options: mergeDeep(existing?.options ?? {}, provider.options ?? {}),
+ source: "config",
+ models: existing?.models ?? {},
+ }
- function isProviderAllowed(providerID: ProviderID): boolean {
- if (enabled && !enabled.has(providerID)) return false
- if (disabled.has(providerID)) return false
- return true
+ for (const [modelID, model] of Object.entries(provider.models ?? {})) {
+ const existingModel = parsed.models[model.id ?? modelID]
+ const name = iife(() => {
+ if (model.name) return model.name
+ if (model.id && model.id !== modelID) return modelID
+ return existingModel?.name ?? modelID
+ })
+ const parsedModel: Model = {
+ id: ModelID.make(modelID),
+ api: {
+ id: model.id ?? existingModel?.api.id ?? modelID,
+ npm:
+ model.provider?.npm ??
+ provider.npm ??
+ existingModel?.api.npm ??
+ modelsDev[providerID]?.npm ??
+ "@ai-sdk/openai-compatible",
+ url: model.provider?.api ?? provider?.api ?? existingModel?.api.url ?? modelsDev[providerID]?.api,
+ },
+ status: model.status ?? existingModel?.status ?? "active",
+ name,
+ providerID: ProviderID.make(providerID),
+ capabilities: {
+ temperature: model.temperature ?? existingModel?.capabilities.temperature ?? false,
+ reasoning: model.reasoning ?? existingModel?.capabilities.reasoning ?? false,
+ attachment: model.attachment ?? existingModel?.capabilities.attachment ?? false,
+ toolcall: model.tool_call ?? existingModel?.capabilities.toolcall ?? true,
+ input: {
+ text: model.modalities?.input?.includes("text") ?? existingModel?.capabilities.input.text ?? true,
+ audio: model.modalities?.input?.includes("audio") ?? existingModel?.capabilities.input.audio ?? false,
+ image: model.modalities?.input?.includes("image") ?? existingModel?.capabilities.input.image ?? false,
+ video: model.modalities?.input?.includes("video") ?? existingModel?.capabilities.input.video ?? false,
+ pdf: model.modalities?.input?.includes("pdf") ?? existingModel?.capabilities.input.pdf ?? false,
+ },
+ output: {
+ text: model.modalities?.output?.includes("text") ?? existingModel?.capabilities.output.text ?? true,
+ audio: model.modalities?.output?.includes("audio") ?? existingModel?.capabilities.output.audio ?? false,
+ image: model.modalities?.output?.includes("image") ?? existingModel?.capabilities.output.image ?? false,
+ video: model.modalities?.output?.includes("video") ?? existingModel?.capabilities.output.video ?? false,
+ pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false,
+ },
+ interleaved: model.interleaved ?? false,
+ },
+ cost: {
+ input: model?.cost?.input ?? existingModel?.cost?.input ?? 0,
+ output: model?.cost?.output ?? existingModel?.cost?.output ?? 0,
+ cache: {
+ read: model?.cost?.cache_read ?? existingModel?.cost?.cache.read ?? 0,
+ write: model?.cost?.cache_write ?? existingModel?.cost?.cache.write ?? 0,
+ },
+ },
+ options: mergeDeep(existingModel?.options ?? {}, model.options ?? {}),
+ limit: {
+ context: model.limit?.context ?? existingModel?.limit?.context ?? 0,
+ output: model.limit?.output ?? existingModel?.limit?.output ?? 0,
+ },
+ headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}),
+ family: model.family ?? existingModel?.family ?? "",
+ release_date: model.release_date ?? existingModel?.release_date ?? "",
+ variants: {},
}
+ const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {})
+ parsedModel.variants = mapValues(
+ pickBy(merged, (v) => !v.disabled),
+ (v) => omit(v, ["disabled"]),
+ )
+ parsed.models[modelID] = parsedModel
+ }
+ database[providerID] = parsed
+ }
- // extend database from config
- for (const [providerID, provider] of configProviders) {
- const existing = database[providerID]
- const parsed: Info = {
- id: ProviderID.make(providerID),
- name: provider.name ?? existing?.name ?? providerID,
- env: provider.env ?? existing?.env ?? [],
- options: mergeDeep(existing?.options ?? {}, provider.options ?? {}),
- source: "config",
- models: existing?.models ?? {},
- }
+ // load env
+ const env = Env.all()
+ for (const [id, provider] of Object.entries(database)) {
+ const providerID = ProviderID.make(id)
+ if (disabled.has(providerID)) continue
+ const apiKey = provider.env.map((item) => env[item]).find(Boolean)
+ if (!apiKey) continue
+ mergeProvider(providerID, {
+ source: "env",
+ key: provider.env.length === 1 ? apiKey : undefined,
+ })
+ }
- for (const [modelID, model] of Object.entries(provider.models ?? {})) {
- const existingModel = parsed.models[model.id ?? modelID]
- const name = iife(() => {
- if (model.name) return model.name
- if (model.id && model.id !== modelID) return modelID
- return existingModel?.name ?? modelID
- })
- const parsedModel: Model = {
- id: ModelID.make(modelID),
- api: {
- id: model.id ?? existingModel?.api.id ?? modelID,
- npm:
- model.provider?.npm ??
- provider.npm ??
- existingModel?.api.npm ??
- modelsDev[providerID]?.npm ??
- "@ai-sdk/openai-compatible",
- url: model.provider?.api ?? provider?.api ?? existingModel?.api.url ?? modelsDev[providerID]?.api ?? "",
- },
- status: model.status ?? existingModel?.status ?? "active",
- name,
- providerID: ProviderID.make(providerID),
- capabilities: {
- temperature: model.temperature ?? existingModel?.capabilities.temperature ?? false,
- reasoning: model.reasoning ?? existingModel?.capabilities.reasoning ?? false,
- attachment: model.attachment ?? existingModel?.capabilities.attachment ?? false,
- toolcall: model.tool_call ?? existingModel?.capabilities.toolcall ?? true,
- input: {
- text: model.modalities?.input?.includes("text") ?? existingModel?.capabilities.input.text ?? true,
- audio: model.modalities?.input?.includes("audio") ?? existingModel?.capabilities.input.audio ?? false,
- image: model.modalities?.input?.includes("image") ?? existingModel?.capabilities.input.image ?? false,
- video: model.modalities?.input?.includes("video") ?? existingModel?.capabilities.input.video ?? false,
- pdf: model.modalities?.input?.includes("pdf") ?? existingModel?.capabilities.input.pdf ?? false,
- },
- output: {
- text: model.modalities?.output?.includes("text") ?? existingModel?.capabilities.output.text ?? true,
- audio:
- model.modalities?.output?.includes("audio") ?? existingModel?.capabilities.output.audio ?? false,
- image:
- model.modalities?.output?.includes("image") ?? existingModel?.capabilities.output.image ?? false,
- video:
- model.modalities?.output?.includes("video") ?? existingModel?.capabilities.output.video ?? false,
- pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false,
- },
- interleaved: model.interleaved ?? existingModel?.capabilities.interleaved ?? false,
- },
- cost: {
- input: model?.cost?.input ?? existingModel?.cost?.input ?? 0,
- output: model?.cost?.output ?? existingModel?.cost?.output ?? 0,
- cache: {
- read: model?.cost?.cache_read ?? existingModel?.cost?.cache.read ?? 0,
- write: model?.cost?.cache_write ?? existingModel?.cost?.cache.write ?? 0,
- },
- },
- options: mergeDeep(existingModel?.options ?? {}, model.options ?? {}),
- limit: {
- context: model.limit?.context ?? existingModel?.limit?.context ?? 0,
- input: model.limit?.input ?? existingModel?.limit?.input,
- output: model.limit?.output ?? existingModel?.limit?.output ?? 0,
- },
- headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}),
- family: model.family ?? existingModel?.family ?? "",
- release_date: model.release_date ?? existingModel?.release_date ?? "",
- variants: {},
- }
- const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {})
- parsedModel.variants = mapValues(
- pickBy(merged, (v) => !v.disabled),
- (v) => omit(v, ["disabled"]),
- )
- parsed.models[modelID] = parsedModel
- }
- database[providerID] = parsed
- }
+ // load apikeys
+ for (const [id, provider] of Object.entries(await Auth.all())) {
+ const providerID = ProviderID.make(id)
+ if (disabled.has(providerID)) continue
+ if (provider.type === "api") {
+ mergeProvider(providerID, {
+ source: "api",
+ key: provider.key,
+ })
+ }
+ }
- // load env
- const envs = yield* env.all()
- for (const [id, provider] of Object.entries(database)) {
- const providerID = ProviderID.make(id)
- if (disabled.has(providerID)) continue
- const apiKey = provider.env.map((item) => envs[item]).find(Boolean)
- if (!apiKey) continue
- mergeProvider(providerID, {
- source: "env",
- key: provider.env.length === 1 ? apiKey : undefined,
- })
- }
+ for (const plugin of await Plugin.list()) {
+ if (!plugin.auth) continue
+ const providerID = ProviderID.make(plugin.auth.provider)
+ if (disabled.has(providerID)) continue
- // load apikeys
- const auths = yield* auth.all().pipe(Effect.orDie)
- for (const [id, provider] of Object.entries(auths)) {
- const providerID = ProviderID.make(id)
- if (disabled.has(providerID)) continue
- if (provider.type === "api") {
- mergeProvider(providerID, {
- source: "api",
- key: provider.key,
- })
- }
- }
+ const auth = await Auth.get(providerID)
+ if (!auth) continue
+ if (!plugin.auth.loader) continue
- // plugin auth loader - database now has entries for config providers
- for (const plugin of plugins) {
- if (!plugin.auth) continue
- const providerID = ProviderID.make(plugin.auth.provider)
- if (disabled.has(providerID)) continue
-
- const stored = yield* auth.get(providerID).pipe(Effect.orDie)
- if (!stored) continue
- if (!plugin.auth.loader) continue
-
- const options = yield* Effect.promise(() =>
- plugin.auth!.loader!(
- () => bridge.promise(auth.get(providerID).pipe(Effect.orDie)) as any,
- database[plugin.auth!.provider],
- ),
- )
- const opts = options ?? {}
- const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts }
- mergeProvider(providerID, patch)
- }
+ if (auth) {
+ const options = await plugin.auth.loader(() => Auth.get(providerID) as any, database[plugin.auth.provider])
+ const opts = options ?? {}
+ const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts }
+ mergeProvider(providerID, patch)
+ }
+ }
- for (const [id, fn] of Object.entries(custom(dep))) {
- const providerID = ProviderID.make(id)
- if (disabled.has(providerID)) continue
- const data = database[providerID]
- if (!data) {
- log.error("Provider does not exist in model list " + providerID)
- continue
- }
- const result = yield* fn(data)
- if (result && (result.autoload || providers[providerID])) {
- if (result.getModel) modelLoaders[providerID] = result.getModel
- if (result.vars) varsLoaders[providerID] = result.vars
- if (result.discoverModels) discoveryLoaders[providerID] = result.discoverModels
- const opts = result.options ?? {}
- const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts }
- mergeProvider(providerID, patch)
- }
- }
+ for (const [id, fn] of Object.entries(CUSTOM_LOADERS)) {
+ const providerID = ProviderID.make(id)
+ if (disabled.has(providerID)) continue
+ const data = database[providerID]
+ if (!data) {
+ log.error("Provider does not exist in model list " + providerID)
+ continue
+ }
+ const result = await fn(data)
+ if (result && (result.autoload || providers[providerID])) {
+ if (result.getModel) modelLoaders[providerID] = result.getModel
+ if (result.vars) varsLoaders[providerID] = result.vars
+ if (result.discoverModels) discoveryLoaders[providerID] = result.discoverModels
+ const opts = result.options ?? {}
+ const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts }
+ mergeProvider(providerID, patch)
+ }
+ }
- // load config - re-apply with updated data
- for (const [id, provider] of configProviders) {
- const providerID = ProviderID.make(id)
- const partial: Partial = { source: "config" }
- if (provider.env) partial.env = provider.env
- if (provider.name) partial.name = provider.name
- if (provider.options) partial.options = provider.options
- mergeProvider(providerID, partial)
- }
+ // load config
+ for (const [id, provider] of configProviders) {
+ const providerID = ProviderID.make(id)
+ const partial: Partial = { source: "config" }
+ if (provider.env) partial.env = provider.env
+ if (provider.name) partial.name = provider.name
+ if (provider.options) partial.options = provider.options
+ mergeProvider(providerID, partial)
+ }
- const gitlab = ProviderID.make("gitlab")
- if (discoveryLoaders[gitlab] && providers[gitlab] && isProviderAllowed(gitlab)) {
- yield* Effect.promise(async () => {
- try {
- const discovered = await discoveryLoaders[gitlab]()
- for (const [modelID, model] of Object.entries(discovered)) {
- if (!providers[gitlab].models[modelID]) {
- providers[gitlab].models[modelID] = model
- }
- }
- } catch (e) {
- log.warn("state discovery error", { id: "gitlab", error: e })
- }
- })
- }
+ for (const [id, provider] of Object.entries(providers)) {
+ const providerID = ProviderID.make(id)
+ if (!isProviderAllowed(providerID)) {
+ delete providers[providerID]
+ continue
+ }
- for (const hook of plugins) {
- const p = hook.provider
- const models = p?.models
- if (!p || !models) continue
-
- const providerID = ProviderID.make(p.id)
- if (disabled.has(providerID)) continue
-
- const provider = providers[providerID]
- if (!provider) continue
- const pluginAuth = yield* auth.get(providerID).pipe(Effect.orDie)
-
- provider.models = yield* Effect.promise(async () => {
- const next = await models(provider, { auth: pluginAuth })
- return Object.fromEntries(
- Object.entries(next).map(([id, model]) => [
- id,
- {
- ...model,
- id: ModelID.make(id),
- providerID,
- },
- ]),
- )
- })
- }
+ const configProvider = config.provider?.[providerID]
- for (const [id, provider] of Object.entries(providers)) {
- const providerID = ProviderID.make(id)
- if (!isProviderAllowed(providerID)) {
- delete providers[providerID]
- continue
- }
+ for (const [modelID, model] of Object.entries(provider.models)) {
+ model.api.id = model.api.id ?? model.id ?? modelID
+ if (
+ modelID === "gpt-5-chat-latest" ||
+ (providerID === ProviderID.openrouter && modelID === "openai/gpt-5-chat")
+ )
+ delete provider.models[modelID]
+ if (model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) delete provider.models[modelID]
+ if (model.status === "deprecated") delete provider.models[modelID]
+ if (
+ (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) ||
+ (configProvider?.whitelist && !configProvider.whitelist.includes(modelID))
+ )
+ delete provider.models[modelID]
- const configProvider = cfg.provider?.[providerID]
+ model.variants = mapValues(ProviderTransform.variants(model), (v) => v)
- for (const [modelID, model] of Object.entries(provider.models)) {
- model.api.id = model.api.id ?? model.id ?? modelID
- if (
- modelID === "gpt-5-chat-latest" ||
- (providerID === ProviderID.openrouter && modelID === "openai/gpt-5-chat")
- )
- delete provider.models[modelID]
- if (model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) delete provider.models[modelID]
- if (model.status === "deprecated") delete provider.models[modelID]
- if (
- (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) ||
- (configProvider?.whitelist && !configProvider.whitelist.includes(modelID))
- )
- delete provider.models[modelID]
+ // Filter out disabled variants from config
+ const configVariants = configProvider?.models?.[modelID]?.variants
+ if (configVariants && model.variants) {
+ const merged = mergeDeep(model.variants, configVariants)
+ model.variants = mapValues(
+ pickBy(merged, (v) => !v.disabled),
+ (v) => omit(v, ["disabled"]),
+ )
+ }
+ }
- model.variants = mapValues(ProviderTransform.variants(model), (v) => v)
+ if (Object.keys(provider.models).length === 0) {
+ delete providers[providerID]
+ continue
+ }
- const configVariants = configProvider?.models?.[modelID]?.variants
- if (configVariants && model.variants) {
- const merged = mergeDeep(model.variants, configVariants)
- model.variants = mapValues(
- pickBy(merged, (v) => !v.disabled),
- (v) => omit(v, ["disabled"]),
- )
- }
- }
+ log.info("found", { providerID })
+ }
- if (Object.keys(provider.models).length === 0) {
- delete providers[providerID]
- continue
+ const gitlab = ProviderID.make("gitlab")
+ if (discoveryLoaders[gitlab] && providers[gitlab]) {
+ await (async () => {
+ const discovered = await discoveryLoaders[gitlab]()
+ for (const [modelID, model] of Object.entries(discovered)) {
+ if (!providers[gitlab].models[modelID]) {
+ providers[gitlab].models[modelID] = model
}
-
- log.info("found", { providerID })
}
+ })().catch((e) => log.warn("state discovery error", { id: "gitlab", error: e }))
+ }
- return {
- models: languages,
- providers,
- sdk,
- modelLoaders,
- varsLoaders,
- }
- }),
- )
+ return {
+ models: languages,
+ providers,
+ sdk,
+ modelLoaders,
+ varsLoaders,
+ }
+ })
- const list = Effect.fn("Provider.list")(() => InstanceState.use(state, (s) => s.providers))
+ export async function list() {
+ return state().then((state) => state.providers)
+ }
- async function resolveSDK(model: Model, s: State, envs: Record) {
- try {
- using _ = log.time("getSDK", {
- providerID: model.providerID,
- })
- const provider = s.providers[model.providerID]
- const options = { ...provider.options }
+ async function getSDK(model: Model) {
+ try {
+ using _ = log.time("getSDK", {
+ providerID: model.providerID,
+ })
+ const s = await state()
+ const provider = s.providers[model.providerID]
+ const options = { ...provider.options }
- if (model.providerID === "google-vertex" && !model.api.npm.includes("@ai-sdk/openai-compatible")) {
- delete options.fetch
- }
+ if (model.providerID === "google-vertex" && !model.api.npm.includes("@ai-sdk/openai-compatible")) {
+ delete options.fetch
+ }
- if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) {
- options["includeUsage"] = true
- }
+ if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) {
+ options["includeUsage"] = true
+ }
- const baseURL = iife(() => {
- let url =
- typeof options["baseURL"] === "string" && options["baseURL"] !== "" ? options["baseURL"] : model.api.url
- if (!url) return
-
- const loader = s.varsLoaders[model.providerID]
- if (loader) {
- const vars = loader(options)
- for (const [key, value] of Object.entries(vars)) {
- const field = "${" + key + "}"
- url = url.replaceAll(field, value)
- }
+ const baseURL = iife(() => {
+ let url =
+ typeof options["baseURL"] === "string" && options["baseURL"] !== "" ? options["baseURL"] : model.api.url
+ if (!url) return
+
+ // some models/providers have variable urls, ex: "https://${AZURE_RESOURCE_NAME}.services.ai.azure.com/anthropic/v1"
+ // We track this in models.dev, and then when we are resolving the baseURL
+ // we need to string replace that literal: "${AZURE_RESOURCE_NAME}"
+ const loader = s.varsLoaders[model.providerID]
+ if (loader) {
+ const vars = loader(options)
+ for (const [key, value] of Object.entries(vars)) {
+ const field = "${" + key + "}"
+ url = url.replaceAll(field, value)
}
+ }
- url = url.replace(/\$\{([^}]+)\}/g, (item, key) => {
- const val = envs[String(key)]
- return val ?? item
- })
- return url
+ url = url.replace(/\$\{([^}]+)\}/g, (item, key) => {
+ const val = Env.get(String(key))
+ return val ?? item
})
+ return url
+ })
- if (baseURL !== undefined) options["baseURL"] = baseURL
- if (options["apiKey"] === undefined && provider.key) options["apiKey"] = provider.key
- if (model.headers)
- options["headers"] = {
- ...options["headers"],
- ...model.headers,
- }
+ if (baseURL !== undefined) options["baseURL"] = baseURL
+ if (options["apiKey"] === undefined && provider.key) options["apiKey"] = provider.key
+ if (model.headers)
+ options["headers"] = {
+ ...options["headers"],
+ ...model.headers,
+ }
- const key = Hash.fast(
- JSON.stringify({
- providerID: model.providerID,
- npm: model.api.npm,
- options,
- }),
- )
- const existing = s.sdk.get(key)
- if (existing) return existing
-
- const customFetch = options["fetch"]
- const chunkTimeout = options["chunkTimeout"]
- delete options["chunkTimeout"]
-
- options["fetch"] = async (input: any, init?: BunFetchRequestInit) => {
- const fetchFn = customFetch ?? fetch
- const opts = init ?? {}
- const chunkAbortCtl = typeof chunkTimeout === "number" && chunkTimeout > 0 ? new AbortController() : undefined
- const signals: AbortSignal[] = []
-
- if (opts.signal) signals.push(opts.signal)
- if (chunkAbortCtl) signals.push(chunkAbortCtl.signal)
- if (options["timeout"] !== undefined && options["timeout"] !== null && options["timeout"] !== false)
- signals.push(AbortSignal.timeout(options["timeout"]))
-
- const combined = signals.length === 0 ? null : signals.length === 1 ? signals[0] : AbortSignal.any(signals)
- if (combined) opts.signal = combined
-
- // Strip openai itemId metadata following what codex does
- if (model.api.npm === "@ai-sdk/openai" && opts.body && opts.method === "POST") {
- const body = JSON.parse(opts.body as string)
- const isAzure = model.providerID.includes("azure")
- const keepIds = isAzure && body.store === true
- if (!keepIds && Array.isArray(body.input)) {
- for (const item of body.input) {
- if ("id" in item) {
- delete item.id
- }
+ const key = Hash.fast(JSON.stringify({ providerID: model.providerID, npm: model.api.npm, options }))
+ const existing = s.sdk.get(key)
+ if (existing) return existing
+
+ const customFetch = options["fetch"]
+ const chunkTimeoutRaw = options["chunkTimeout"]
+ delete options["chunkTimeout"]
+ const chunkTimeout = typeof chunkTimeoutRaw === "number" && chunkTimeoutRaw > 0 ? chunkTimeoutRaw : 30_000
+
+ options["fetch"] = async (input: any, init?: BunFetchRequestInit) => {
+ // Preserve custom fetch if it exists, wrap it with timeout logic
+ const fetchFn = customFetch ?? fetch
+ const opts = init ?? {}
+ const chunkAbortCtl = new AbortController()
+ const signals: AbortSignal[] = []
+
+ if (opts.signal) signals.push(opts.signal)
+ signals.push(chunkAbortCtl.signal)
+ if (options["timeout"] !== undefined && options["timeout"] !== null && options["timeout"] !== false)
+ signals.push(AbortSignal.timeout(options["timeout"]))
+
+ const combined = signals.length === 0 ? null : signals.length === 1 ? signals[0] : AbortSignal.any(signals)
+ if (combined) opts.signal = combined
+
+ // Strip openai itemId metadata following what codex does
+ // Codex uses #[serde(skip_serializing)] on id fields for all item types:
+ // Message, Reasoning, FunctionCall, LocalShellCall, CustomToolCall, WebSearchCall
+ // IDs are only re-attached for Azure with store=true
+ if (model.api.npm === "@ai-sdk/openai" && opts.body && opts.method === "POST") {
+ const body = JSON.parse(opts.body as string)
+ const isAzure = model.providerID.includes("azure")
+ const keepIds = isAzure && body.store === true
+ if (!keepIds && Array.isArray(body.input)) {
+ for (const item of body.input) {
+ if ("id" in item) {
+ delete item.id
}
- opts.body = JSON.stringify(body)
}
+ opts.body = JSON.stringify(body)
}
-
- const res = await fetchFn(input, {
- ...opts,
- // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682
- timeout: false,
- })
-
- if (!chunkAbortCtl) return res
- return wrapSSE(res, chunkTimeout, chunkAbortCtl)
- }
-
- const bundledLoader = BUNDLED_PROVIDERS[model.api.npm]
- if (bundledLoader) {
- log.info("using bundled provider", {
- providerID: model.providerID,
- pkg: model.api.npm,
- })
- const factory = await bundledLoader()
- const loaded = factory({
- name: model.providerID,
- ...options,
- })
- s.sdk.set(key, loaded)
- return loaded as SDK
}
- let installedPath: string
- if (!model.api.npm.startsWith("file://")) {
- const item = await Npm.add(model.api.npm)
- if (!item.entrypoint) throw new Error(`Package ${model.api.npm} has no import entrypoint`)
- installedPath = item.entrypoint
- } else {
- log.info("loading local provider", { pkg: model.api.npm })
- installedPath = model.api.npm
- }
+ const res = await fetchFn(input, {
+ ...opts,
+ // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682
+ timeout: false,
+ })
- // `installedPath` is a local entry path or an existing `file://` URL. Normalize
- // only path inputs so Node on Windows accepts the dynamic import.
- const importSpec = installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href
- const mod = await import(importSpec)
+ return wrapSSE(res, chunkTimeout, chunkAbortCtl)
+ }
- const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!]
- const loaded = fn({
+ const bundledFn = BUNDLED_PROVIDERS[model.api.npm]
+ if (bundledFn) {
+ log.info("using bundled provider", { providerID: model.providerID, pkg: model.api.npm })
+ const loaded = bundledFn({
name: model.providerID,
...options,
})
s.sdk.set(key, loaded)
return loaded as SDK
- } catch (e) {
- throw new InitError({ providerID: model.providerID }, { cause: e })
}
- }
-
- const getProvider = Effect.fn("Provider.getProvider")((providerID: ProviderID) =>
- InstanceState.use(state, (s) => s.providers[providerID]),
- )
- const getModel = Effect.fn("Provider.getModel")(function* (providerID: ProviderID, modelID: ModelID) {
- const s = yield* InstanceState.get(state)
- const provider = s.providers[providerID]
- if (!provider) {
- const available = Object.keys(s.providers)
- const matches = fuzzysort.go(providerID, available, { limit: 3, threshold: -10000 })
- throw new ModelNotFoundError({ providerID, modelID, suggestions: matches.map((m) => m.target) })
+ let installedPath: string
+ if (!model.api.npm.startsWith("file://")) {
+ installedPath = await BunProc.install(model.api.npm, "latest")
+ } else {
+ log.info("loading local provider", { pkg: model.api.npm })
+ installedPath = model.api.npm
}
- const info = provider.models[modelID]
- if (!info) {
- const available = Object.keys(provider.models)
- const matches = fuzzysort.go(modelID, available, { limit: 3, threshold: -10000 })
- throw new ModelNotFoundError({ providerID, modelID, suggestions: matches.map((m) => m.target) })
- }
- return info
- })
+ const mod = await import(installedPath)
- const getLanguage = Effect.fn("Provider.getLanguage")(function* (model: Model) {
- const s = yield* InstanceState.get(state)
- const envs = yield* env.all()
- const key = `${model.providerID}/${model.id}`
- if (s.models.has(key)) return s.models.get(key)!
+ const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!]
+ const loaded = fn({
+ name: model.providerID,
+ ...options,
+ })
+ s.sdk.set(key, loaded)
+ return loaded as SDK
+ } catch (e) {
+ throw new InitError({ providerID: model.providerID }, { cause: e })
+ }
+ }
- return yield* Effect.promise(async () => {
- const provider = s.providers[model.providerID]
- const sdk = await resolveSDK(model, s, envs)
+ export async function getProvider(providerID: ProviderID) {
+ return state().then((s) => s.providers[providerID])
+ }
- try {
- const language = s.modelLoaders[model.providerID]
- ? await s.modelLoaders[model.providerID](sdk, model.api.id, {
- ...provider.options,
- ...model.options,
- })
- : sdk.languageModel(model.api.id)
- s.models.set(key, language)
- return language
- } catch (e) {
- if (e instanceof NoSuchModelError)
- throw new ModelNotFoundError(
- {
- modelID: model.id,
- providerID: model.providerID,
- },
- { cause: e },
- )
- throw e
- }
- })
- })
+ export async function getModel(providerID: ProviderID, modelID: ModelID) {
+ const s = await state()
+ const provider = s.providers[providerID]
+ if (!provider) {
+ const availableProviders = Object.keys(s.providers)
+ const matches = fuzzysort.go(providerID, availableProviders, { limit: 3, threshold: -10000 })
+ const suggestions = matches.map((m) => m.target)
+ throw new ModelNotFoundError({ providerID, modelID, suggestions })
+ }
- const closest = Effect.fn("Provider.closest")(function* (providerID: ProviderID, query: string[]) {
- const s = yield* InstanceState.get(state)
- const provider = s.providers[providerID]
- if (!provider) return undefined
- for (const item of query) {
- for (const modelID of Object.keys(provider.models)) {
- if (modelID.includes(item)) return { providerID, modelID }
- }
- }
- return undefined
- })
+ const info = provider.models[modelID]
+ if (!info) {
+ const availableModels = Object.keys(provider.models)
+ const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 })
+ const suggestions = matches.map((m) => m.target)
+ throw new ModelNotFoundError({ providerID, modelID, suggestions })
+ }
+ return info
+ }
- const getSmallModel = Effect.fn("Provider.getSmallModel")(function* (providerID: ProviderID) {
- const cfg = yield* config.get()
+ export async function getLanguage(model: Model): Promise {
+ const s = await state()
+ const key = `${model.providerID}/${model.id}`
+ if (s.models.has(key)) return s.models.get(key)!
+
+ const provider = s.providers[model.providerID]
+ const sdk = await getSDK(model)
+
+ try {
+ const language = s.modelLoaders[model.providerID]
+ ? await s.modelLoaders[model.providerID](sdk, model.api.id, { ...provider.options, ...model.options })
+ : sdk.languageModel(model.api.id)
+ s.models.set(key, language)
+ return language
+ } catch (e) {
+ if (e instanceof NoSuchModelError)
+ throw new ModelNotFoundError(
+ {
+ modelID: model.id,
+ providerID: model.providerID,
+ },
+ { cause: e },
+ )
+ throw e
+ }
+ }
- if (cfg.small_model) {
- const parsed = parseModel(cfg.small_model)
- return yield* getModel(parsed.providerID, parsed.modelID)
+ export async function closest(providerID: ProviderID, query: string[]) {
+ const s = await state()
+ const provider = s.providers[providerID]
+ if (!provider) return undefined
+ for (const item of query) {
+ for (const modelID of Object.keys(provider.models)) {
+ if (modelID.includes(item))
+ return {
+ providerID,
+ modelID,
+ }
}
+ }
+ }
- const s = yield* InstanceState.get(state)
- const provider = s.providers[providerID]
- if (!provider) return undefined
+ export async function getSmallModel(providerID: ProviderID) {
+ const cfg = await Config.get()
+ if (cfg.small_model) {
+ const parsed = parseModel(cfg.small_model)
+ return getModel(parsed.providerID, parsed.modelID)
+ }
+
+ const provider = await state().then((state) => state.providers[providerID])
+ if (provider) {
let priority = [
"claude-haiku-4-5",
"claude-haiku-4.5",
@@ -1615,6 +1405,7 @@ const layer: Layer.Layer<
priority = ["gpt-5-nano"]
}
if (providerID.startsWith("github-copilot")) {
+ // prioritize free models for github copilot
priority = ["gpt-5-mini", "claude-haiku-4.5", ...priority]
}
for (const item of priority) {
@@ -1622,102 +1413,93 @@ const layer: Layer.Layer<
const crossRegionPrefixes = ["global.", "us.", "eu."]
const candidates = Object.keys(provider.models).filter((m) => m.includes(item))
+ // Model selection priority:
+ // 1. global. prefix (works everywhere)
+ // 2. User's region prefix (us., eu.)
+ // 3. Unprefixed model
const globalMatch = candidates.find((m) => m.startsWith("global."))
- if (globalMatch) return yield* getModel(providerID, ModelID.make(globalMatch))
+ if (globalMatch) return getModel(providerID, ModelID.make(globalMatch))
const region = provider.options?.region
if (region) {
const regionPrefix = region.split("-")[0]
if (regionPrefix === "us" || regionPrefix === "eu") {
const regionalMatch = candidates.find((m) => m.startsWith(`${regionPrefix}.`))
- if (regionalMatch) return yield* getModel(providerID, ModelID.make(regionalMatch))
+ if (regionalMatch) return getModel(providerID, ModelID.make(regionalMatch))
}
}
const unprefixed = candidates.find((m) => !crossRegionPrefixes.some((p) => m.startsWith(p)))
- if (unprefixed) return yield* getModel(providerID, ModelID.make(unprefixed))
+ if (unprefixed) return getModel(providerID, ModelID.make(unprefixed))
} else {
for (const model of Object.keys(provider.models)) {
- if (model.includes(item)) return yield* getModel(providerID, ModelID.make(model))
+ if (model.includes(item)) return getModel(providerID, ModelID.make(model))
}
}
}
+ }
- return undefined
- })
+ return undefined
+ }
- const defaultModel = Effect.fn("Provider.defaultModel")(function* () {
- const cfg = yield* config.get()
- if (cfg.model) return parseModel(cfg.model)
-
- const s = yield* InstanceState.get(state)
- const recent = yield* fs.readJson(path.join(Global.Path.state, "model.json")).pipe(
- Effect.map((x): { providerID: ProviderID; modelID: ModelID }[] => {
- if (!isRecord(x) || !Array.isArray(x.recent)) return []
- return x.recent.flatMap((item) => {
- if (!isRecord(item)) return []
- if (typeof item.providerID !== "string") return []
- if (typeof item.modelID !== "string") return []
- return [{ providerID: ProviderID.make(item.providerID), modelID: ModelID.make(item.modelID) }]
- })
- }),
- Effect.catch(() => Effect.succeed([] as { providerID: ProviderID; modelID: ModelID }[])),
- )
- for (const entry of recent) {
- const provider = s.providers[entry.providerID]
- if (!provider) continue
- if (!provider.models[entry.modelID]) continue
- return { providerID: entry.providerID, modelID: entry.modelID }
- }
+ const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"]
+ export function sort(models: T[]) {
+ return sortBy(
+ models,
+ [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"],
+ [(model) => (model.id.includes("latest") ? 0 : 1), "asc"],
+ [(model) => model.id, "desc"],
+ )
+ }
- const provider = Object.values(s.providers).find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id))
- if (!provider) throw new Error("no providers found")
- const [model] = sort(Object.values(provider.models))
- if (!model) throw new Error("no models found")
- return {
- providerID: provider.id,
- modelID: model.id,
- }
- })
+ export async function defaultModel() {
+ const cfg = await Config.get()
+ if (cfg.model) return parseModel(cfg.model)
- return Service.of({ list, getProvider, getModel, getLanguage, closest, getSmallModel, defaultModel })
- }),
-)
-
-export const defaultLayer = Layer.suspend(() =>
- layer.pipe(
- Layer.provide(AppFileSystem.defaultLayer),
- Layer.provide(Env.defaultLayer),
- Layer.provide(Config.defaultLayer),
- Layer.provide(Auth.defaultLayer),
- Layer.provide(Plugin.defaultLayer),
- ),
-)
-
-const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"]
-export function sort(models: T[]) {
- return sortBy(
- models,
- [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"],
- [(model) => (model.id.includes("latest") ? 0 : 1), "asc"],
- [(model) => model.id, "desc"],
- )
-}
+ const providers = await list()
+ const recent = (await Filesystem.readJson<{ recent?: { providerID: ProviderID; modelID: ModelID }[] }>(
+ path.join(Global.Path.state, "model.json"),
+ )
+ .then((x) => (Array.isArray(x.recent) ? x.recent : []))
+ .catch(() => [])) as { providerID: ProviderID; modelID: ModelID }[]
+ for (const entry of recent) {
+ const provider = providers[entry.providerID]
+ if (!provider) continue
+ if (!provider.models[entry.modelID]) continue
+ return { providerID: entry.providerID, modelID: entry.modelID }
+ }
-export function parseModel(model: string) {
- const [providerID, ...rest] = model.split("/")
- return {
- providerID: ProviderID.make(providerID),
- modelID: ModelID.make(rest.join("/")),
+ const provider = Object.values(providers).find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id))
+ if (!provider) throw new Error("no providers found")
+ const [model] = sort(Object.values(provider.models))
+ if (!model) throw new Error("no models found")
+ return {
+ providerID: provider.id,
+ modelID: model.id,
+ }
+ }
+
+ export function parseModel(model: string) {
+ const [providerID, ...rest] = model.split("/")
+ return {
+ providerID: ProviderID.make(providerID),
+ modelID: ModelID.make(rest.join("/")),
+ }
}
-}
-export const ModelNotFoundError = namedSchemaError("ProviderModelNotFoundError", {
- providerID: ProviderID,
- modelID: ModelID,
- suggestions: Schema.optional(Schema.Array(Schema.String)),
-})
+ export const ModelNotFoundError = NamedError.create(
+ "ProviderModelNotFoundError",
+ z.object({
+ providerID: ProviderID.zod,
+ modelID: ModelID.zod,
+ suggestions: z.array(z.string()).optional(),
+ }),
+ )
-export const InitError = namedSchemaError("ProviderInitError", {
- providerID: ProviderID,
-})
+ export const InitError = NamedError.create(
+ "ProviderInitError",
+ z.object({
+ providerID: ProviderID.zod,
+ }),
+ )
+}
diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts
index 8a2d352a51e5..e461c871616a 100644
--- a/packages/opencode/src/session/message-v2.ts
+++ b/packages/opencode/src/session/message-v2.ts
@@ -1,1196 +1,1009 @@
import { BusEvent } from "@/bus/bus-event"
import { SessionID, MessageID, PartID } from "./schema"
import z from "zod"
-import { NamedError } from "@opencode-ai/core/util/error"
+import { NamedError } from "@opencode-ai/util/error"
import { APICallError, convertToModelMessages, LoadAPIKeyError, type ModelMessage, type UIMessage } from "ai"
import { LSP } from "../lsp"
import { Snapshot } from "@/snapshot"
-import { SyncEvent } from "../sync"
-import { Database, NotFoundError, and, desc, eq, inArray, lt, or } from "@/storage"
+import { fn } from "@/util/fn"
+import { Database, NotFoundError, and, desc, eq, inArray, lt, or } from "@/storage/db"
import { MessageTable, PartTable, SessionTable } from "./session.sql"
-import { ProviderError } from "@/provider"
+import { ProviderTransform } from "@/provider/transform"
+import { STATUS_CODES } from "http"
+import { Storage } from "@/storage/storage"
+import { ProviderError } from "@/provider/error"
import { iife } from "@/util/iife"
-import { errorMessage } from "@/util/error"
-import { isMedia } from "@/util/media"
import type { SystemError } from "bun"
-import type { Provider } from "@/provider"
+import type { Provider } from "@/provider/provider"
import { ModelID, ProviderID } from "@/provider/schema"
-import { Effect, Schema, Types } from "effect"
-import { zod, ZodOverride } from "@/util/effect-zod"
-import { NonNegativeInt, withStatics } from "@/util/schema"
-import { namedSchemaError } from "@/util/named-schema-error"
-import { EffectLogger } from "@/effect"
-
-/** Error shape thrown by Bun's fetch() when gzip/br decompression fails mid-stream */
-interface FetchDecompressionError extends Error {
- code: "ZlibError"
- errno: number
- path: string
-}
-
-export const SYNTHETIC_ATTACHMENT_PROMPT = "Attached image(s) from tool result:"
-export { isMedia }
-
-export const OutputLengthError = namedSchemaError("MessageOutputLengthError", {})
-export const AbortedError = namedSchemaError("MessageAbortedError", { message: Schema.String })
-export const StructuredOutputError = namedSchemaError("StructuredOutputError", {
- message: Schema.String,
- retries: Schema.Number,
-})
-export const AuthError = namedSchemaError("ProviderAuthError", {
- providerID: Schema.String,
- message: Schema.String,
-})
-export const APIError = namedSchemaError("APIError", {
- message: Schema.String,
- statusCode: Schema.optional(Schema.Number),
- isRetryable: Schema.Boolean,
- responseHeaders: Schema.optional(Schema.Record(Schema.String, Schema.String)),
- responseBody: Schema.optional(Schema.String),
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.String)),
-})
-export type APIError = z.infer
-export const ContextOverflowError = namedSchemaError("ContextOverflowError", {
- message: Schema.String,
- responseBody: Schema.optional(Schema.String),
-})
-
-export class OutputFormatText extends Schema.Class("OutputFormatText")({
- type: Schema.Literal("text"),
-}) {
- static readonly zod = zod(this)
-}
-export class OutputFormatJsonSchema extends Schema.Class("OutputFormatJsonSchema")({
- type: Schema.Literal("json_schema"),
- schema: Schema.Record(Schema.String, Schema.Any).annotate({ identifier: "JSONSchema" }),
- retryCount: NonNegativeInt.pipe(Schema.optional, Schema.withDecodingDefault(Effect.succeed(2))),
-}) {
- static readonly zod = zod(this)
-}
-
-const _Format = Schema.Union([OutputFormatText, OutputFormatJsonSchema]).annotate({
- discriminator: "type",
- identifier: "OutputFormat",
-})
-export const Format = Object.assign(_Format, { zod: zod(_Format) })
-export type OutputFormat = Schema.Schema.Type
-
-const partBase = {
- id: PartID,
- sessionID: SessionID,
- messageID: MessageID,
-}
+export namespace MessageV2 {
+ const NETWORK_ERROR_CODES = new Set([
+ "ECONNRESET",
+ "ETIMEDOUT",
+ "ENETUNREACH",
+ "EHOSTUNREACH",
+ "ENOTFOUND",
+ "EPIPE",
+ "ECONNREFUSED",
+ ])
+
+ export function isMedia(mime: string) {
+ return mime.startsWith("image/") || mime === "application/pdf"
+ }
-export const SnapshotPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("snapshot"),
- snapshot: Schema.String,
-})
- .annotate({ identifier: "SnapshotPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type SnapshotPart = Types.DeepMutable>
-
-export const PatchPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("patch"),
- hash: Schema.String,
- files: Schema.Array(Schema.String),
-})
- .annotate({ identifier: "PatchPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type PatchPart = Types.DeepMutable>
-
-export const TextPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("text"),
- text: Schema.String,
- synthetic: Schema.optional(Schema.Boolean),
- ignored: Schema.optional(Schema.Boolean),
- time: Schema.optional(
- Schema.Struct({
- start: Schema.Number,
- end: Schema.optional(Schema.Number),
+ export const OutputLengthError = NamedError.create("MessageOutputLengthError", z.object({}))
+ export const AbortedError = NamedError.create("MessageAbortedError", z.object({ message: z.string() }))
+ export const StructuredOutputError = NamedError.create(
+ "StructuredOutputError",
+ z.object({
+ message: z.string(),
+ retries: z.number(),
}),
- ),
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
-})
- .annotate({ identifier: "TextPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type TextPart = Types.DeepMutable>
-
-export const ReasoningPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("reasoning"),
- text: Schema.String,
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
- time: Schema.Struct({
- start: Schema.Number,
- end: Schema.optional(Schema.Number),
- }),
-})
- .annotate({ identifier: "ReasoningPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ReasoningPart = Types.DeepMutable>
-
-const filePartSourceBase = {
- text: Schema.Struct({
- value: Schema.String,
- start: Schema.Int,
- end: Schema.Int,
- }).annotate({ identifier: "FilePartSourceText" }),
-}
-
-export const FileSource = Schema.Struct({
- ...filePartSourceBase,
- type: Schema.Literal("file"),
- path: Schema.String,
-})
- .annotate({ identifier: "FileSource" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-
-export const SymbolSource = Schema.Struct({
- ...filePartSourceBase,
- type: Schema.Literal("symbol"),
- path: Schema.String,
- range: LSP.Range,
- name: Schema.String,
- kind: Schema.Int,
-})
- .annotate({ identifier: "SymbolSource" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-
-export const ResourceSource = Schema.Struct({
- ...filePartSourceBase,
- type: Schema.Literal("resource"),
- clientName: Schema.String,
- uri: Schema.String,
-})
- .annotate({ identifier: "ResourceSource" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-
-const _FilePartSource = Schema.Union([FileSource, SymbolSource, ResourceSource]).annotate({
- discriminator: "type",
- identifier: "FilePartSource",
-})
-export const FilePartSource = Object.assign(_FilePartSource, { zod: zod(_FilePartSource) })
-
-export const FilePart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("file"),
- mime: Schema.String,
- filename: Schema.optional(Schema.String),
- url: Schema.String,
- source: Schema.optional(_FilePartSource),
-})
- .annotate({ identifier: "FilePart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type FilePart = Types.DeepMutable>
-
-export const AgentPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("agent"),
- name: Schema.String,
- source: Schema.optional(
- Schema.Struct({
- value: Schema.String,
- start: Schema.Int,
- end: Schema.Int,
+ )
+ export const AuthError = NamedError.create(
+ "ProviderAuthError",
+ z.object({
+ providerID: z.string(),
+ message: z.string(),
}),
- ),
-})
- .annotate({ identifier: "AgentPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type AgentPart = Types.DeepMutable>
-
-export const CompactionPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("compaction"),
- auto: Schema.Boolean,
- overflow: Schema.optional(Schema.Boolean),
- tail_start_id: Schema.optional(MessageID),
-})
- .annotate({ identifier: "CompactionPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type CompactionPart = Types.DeepMutable>
-
-export const SubtaskPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("subtask"),
- prompt: Schema.String,
- description: Schema.String,
- agent: Schema.String,
- model: Schema.optional(
- Schema.Struct({
- providerID: ProviderID,
- modelID: ModelID,
+ )
+ export const APIError = NamedError.create(
+ "APIError",
+ z.object({
+ message: z.string(),
+ statusCode: z.number().optional(),
+ isRetryable: z.boolean(),
+ responseHeaders: z.record(z.string(), z.string()).optional(),
+ responseBody: z.string().optional(),
+ metadata: z.record(z.string(), z.string()).optional(),
}),
- ),
- command: Schema.optional(Schema.String),
-})
- .annotate({ identifier: "SubtaskPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type SubtaskPart = Types.DeepMutable>
-
-export const RetryPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("retry"),
- attempt: Schema.Number,
- // APIError is still NamedError-based Zod; bridge via ZodOverride until errors migrate.
- error: Schema.Any.annotate({ [ZodOverride]: APIError.Schema }),
- time: Schema.Struct({
- created: Schema.Number,
- }),
-})
- .annotate({ identifier: "RetryPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type RetryPart = Omit>, "error"> & {
- error: APIError
-}
+ )
+ export type APIError = z.infer
+ export const ContextOverflowError = NamedError.create(
+ "ContextOverflowError",
+ z.object({ message: z.string(), responseBody: z.string().optional() }),
+ )
-export const StepStartPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("step-start"),
- snapshot: Schema.optional(Schema.String),
-})
- .annotate({ identifier: "StepStartPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type StepStartPart = Types.DeepMutable>
-
-export const StepFinishPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("step-finish"),
- reason: Schema.String,
- snapshot: Schema.optional(Schema.String),
- cost: Schema.Number,
- tokens: Schema.Struct({
- total: Schema.optional(Schema.Number),
- input: Schema.Number,
- output: Schema.Number,
- reasoning: Schema.Number,
- cache: Schema.Struct({
- read: Schema.Number,
- write: Schema.Number,
+ export const OutputFormatText = z
+ .object({
+ type: z.literal("text"),
+ })
+ .meta({
+ ref: "OutputFormatText",
+ })
+
+ export const OutputFormatJsonSchema = z
+ .object({
+ type: z.literal("json_schema"),
+ schema: z.record(z.string(), z.any()).meta({ ref: "JSONSchema" }),
+ retryCount: z.number().int().min(0).default(2),
+ })
+ .meta({
+ ref: "OutputFormatJsonSchema",
+ })
+
+ export const Format = z.discriminatedUnion("type", [OutputFormatText, OutputFormatJsonSchema]).meta({
+ ref: "OutputFormat",
+ })
+ export type OutputFormat = z.infer
+
+ const PartBase = z.object({
+ id: PartID.zod,
+ sessionID: SessionID.zod,
+ messageID: MessageID.zod,
+ })
+
+ export const SnapshotPart = PartBase.extend({
+ type: z.literal("snapshot"),
+ snapshot: z.string(),
+ }).meta({
+ ref: "SnapshotPart",
+ })
+ export type SnapshotPart = z.infer
+
+ export const PatchPart = PartBase.extend({
+ type: z.literal("patch"),
+ hash: z.string(),
+ files: z.string().array(),
+ }).meta({
+ ref: "PatchPart",
+ })
+ export type PatchPart = z.infer
+
+ export const TextPart = PartBase.extend({
+ type: z.literal("text"),
+ text: z.string(),
+ synthetic: z.boolean().optional(),
+ ignored: z.boolean().optional(),
+ time: z
+ .object({
+ start: z.number(),
+ end: z.number().optional(),
+ })
+ .optional(),
+ metadata: z.record(z.string(), z.any()).optional(),
+ }).meta({
+ ref: "TextPart",
+ })
+ export type TextPart = z.infer
+
+ export const ReasoningPart = PartBase.extend({
+ type: z.literal("reasoning"),
+ text: z.string(),
+ metadata: z.record(z.string(), z.any()).optional(),
+ time: z.object({
+ start: z.number(),
+ end: z.number().optional(),
}),
- }),
-})
- .annotate({ identifier: "StepFinishPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type StepFinishPart = Types.DeepMutable>
-
-export const ToolStatePending = Schema.Struct({
- status: Schema.Literal("pending"),
- input: Schema.Record(Schema.String, Schema.Any),
- raw: Schema.String,
-})
- .annotate({ identifier: "ToolStatePending" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ToolStatePending = Types.DeepMutable>
-
-export const ToolStateRunning = Schema.Struct({
- status: Schema.Literal("running"),
- input: Schema.Record(Schema.String, Schema.Any),
- title: Schema.optional(Schema.String),
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
- time: Schema.Struct({
- start: Schema.Number,
- }),
-})
- .annotate({ identifier: "ToolStateRunning" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ToolStateRunning = Types.DeepMutable>
-
-export const ToolStateCompleted = Schema.Struct({
- status: Schema.Literal("completed"),
- input: Schema.Record(Schema.String, Schema.Any),
- output: Schema.String,
- title: Schema.String,
- metadata: Schema.Record(Schema.String, Schema.Any),
- time: Schema.Struct({
- start: Schema.Number,
- end: Schema.Number,
- compacted: Schema.optional(Schema.Number),
- }),
- attachments: Schema.optional(Schema.Array(FilePart)),
-})
- .annotate({ identifier: "ToolStateCompleted" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ToolStateCompleted = Types.DeepMutable>
-
-function truncateToolOutput(text: string, maxChars?: number) {
- if (!maxChars || text.length <= maxChars) return text
- const omitted = text.length - maxChars
- return `${text.slice(0, maxChars)}\n[Tool output truncated for compaction: omitted ${omitted} chars]`
-}
-
-export const ToolStateError = Schema.Struct({
- status: Schema.Literal("error"),
- input: Schema.Record(Schema.String, Schema.Any),
- error: Schema.String,
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
- time: Schema.Struct({
- start: Schema.Number,
- end: Schema.Number,
- }),
-})
- .annotate({ identifier: "ToolStateError" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ToolStateError = Types.DeepMutable>
-
-const _ToolState = Schema.Union([ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError]).annotate({
- discriminator: "status",
- identifier: "ToolState",
-})
-// Cast the derived zod so downstream z.infer sees the same mutable shape that
-// our exported TS types expose (the pre-migration Zod inferences were mutable).
-export const ToolState = Object.assign(_ToolState, {
- zod: zod(_ToolState) as unknown as z.ZodType<
- ToolStatePending | ToolStateRunning | ToolStateCompleted | ToolStateError
- >,
-})
-export type ToolState = ToolStatePending | ToolStateRunning | ToolStateCompleted | ToolStateError
-
-export const ToolPart = Schema.Struct({
- ...partBase,
- type: Schema.Literal("tool"),
- callID: Schema.String,
- tool: Schema.String,
- state: _ToolState,
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
-})
- .annotate({ identifier: "ToolPart" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type ToolPart = Omit>, "state"> & {
- state: ToolState
-}
-
-const messageBase = {
- id: MessageID,
- sessionID: SessionID,
-}
-
-export const User = Schema.Struct({
- ...messageBase,
- role: Schema.Literal("user"),
- time: Schema.Struct({
- created: Schema.Number,
- }),
- format: Schema.optional(_Format),
- summary: Schema.optional(
- Schema.Struct({
- title: Schema.optional(Schema.String),
- body: Schema.optional(Schema.String),
- diffs: Schema.Array(Snapshot.FileDiff),
+ }).meta({
+ ref: "ReasoningPart",
+ })
+ export type ReasoningPart = z.infer
+
+ const FilePartSourceBase = z.object({
+ text: z
+ .object({
+ value: z.string(),
+ start: z.number().int(),
+ end: z.number().int(),
+ })
+ .meta({
+ ref: "FilePartSourceText",
+ }),
+ })
+
+ export const FileSource = FilePartSourceBase.extend({
+ type: z.literal("file"),
+ path: z.string(),
+ }).meta({
+ ref: "FileSource",
+ })
+
+ export const SymbolSource = FilePartSourceBase.extend({
+ type: z.literal("symbol"),
+ path: z.string(),
+ range: LSP.Range,
+ name: z.string(),
+ kind: z.number().int(),
+ }).meta({
+ ref: "SymbolSource",
+ })
+
+ export const ResourceSource = FilePartSourceBase.extend({
+ type: z.literal("resource"),
+ clientName: z.string(),
+ uri: z.string(),
+ }).meta({
+ ref: "ResourceSource",
+ })
+
+ export const FilePartSource = z.discriminatedUnion("type", [FileSource, SymbolSource, ResourceSource]).meta({
+ ref: "FilePartSource",
+ })
+
+ export const FilePart = PartBase.extend({
+ type: z.literal("file"),
+ mime: z.string(),
+ filename: z.string().optional(),
+ url: z.string(),
+ source: FilePartSource.optional(),
+ }).meta({
+ ref: "FilePart",
+ })
+ export type FilePart = z.infer
+
+ export const AgentPart = PartBase.extend({
+ type: z.literal("agent"),
+ name: z.string(),
+ source: z
+ .object({
+ value: z.string(),
+ start: z.number().int(),
+ end: z.number().int(),
+ })
+ .optional(),
+ }).meta({
+ ref: "AgentPart",
+ })
+ export type AgentPart = z.infer
+
+ export const CompactionPart = PartBase.extend({
+ type: z.literal("compaction"),
+ auto: z.boolean(),
+ overflow: z.boolean().optional(),
+ }).meta({
+ ref: "CompactionPart",
+ })
+ export type CompactionPart = z.infer
+
+ export const SubtaskPart = PartBase.extend({
+ type: z.literal("subtask"),
+ prompt: z.string(),
+ description: z.string(),
+ agent: z.string(),
+ model: z
+ .object({
+ providerID: ProviderID.zod,
+ modelID: ModelID.zod,
+ })
+ .optional(),
+ command: z.string().optional(),
+ }).meta({
+ ref: "SubtaskPart",
+ })
+ export type SubtaskPart = z.infer
+
+ export const RetryPart = PartBase.extend({
+ type: z.literal("retry"),
+ attempt: z.number(),
+ error: APIError.Schema,
+ time: z.object({
+ created: z.number(),
}),
- ),
- agent: Schema.String,
- model: Schema.Struct({
- providerID: ProviderID,
- modelID: ModelID,
- variant: Schema.optional(Schema.String),
- }),
- system: Schema.optional(Schema.String),
- tools: Schema.optional(Schema.Record(Schema.String, Schema.Boolean)),
-})
- .annotate({ identifier: "UserMessage" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type User = Types.DeepMutable>
-
-const _Part = Schema.Union([
- TextPart,
- SubtaskPart,
- ReasoningPart,
- FilePart,
- ToolPart,
- StepStartPart,
- StepFinishPart,
- SnapshotPart,
- PatchPart,
- AgentPart,
- RetryPart,
- CompactionPart,
-]).annotate({ discriminator: "type", identifier: "Part" })
-export const Part = Object.assign(_Part, {
- zod: zod(_Part) as unknown as z.ZodType<
- | TextPart
- | SubtaskPart
- | ReasoningPart
- | FilePart
- | ToolPart
- | StepStartPart
- | StepFinishPart
- | SnapshotPart
- | PatchPart
- | AgentPart
- | RetryPart
- | CompactionPart
- >,
-})
-export type Part =
- | TextPart
- | SubtaskPart
- | ReasoningPart
- | FilePart
- | ToolPart
- | StepStartPart
- | StepFinishPart
- | SnapshotPart
- | PatchPart
- | AgentPart
- | RetryPart
- | CompactionPart
-
-// Errors are still NamedError-based Zod; bridge via ZodOverride so the derived
-// Zod + JSON Schema emit the original discriminatedUnion shape. Migrating the
-// error classes to Schema.TaggedErrorClass is a separate slice.
-const AssistantErrorZod = z.discriminatedUnion("name", [
- AuthError.Schema,
- NamedError.Unknown.Schema,
- OutputLengthError.Schema,
- AbortedError.Schema,
- StructuredOutputError.Schema,
- ContextOverflowError.Schema,
- APIError.Schema,
-])
-type AssistantError = z.infer
-
-// ── Prompt input schemas ─────────────────────────────────────────────────────
-//
-// Consumers of `SessionPrompt.PromptInput.parts` send part drafts without the
-// ambient IDs (`messageID`, `sessionID`) that live on stored parts, and may
-// omit `id` to let the server allocate one. These Schema-Struct variants
-// carry that shape, and `SessionPrompt.PromptInput` just references the
-// derived `.zod` (no omit/partial gymnastics needed at the call site).
-
-export const TextPartInput = Schema.Struct({
- id: Schema.optional(PartID),
- type: Schema.Literal("text"),
- text: Schema.String,
- synthetic: Schema.optional(Schema.Boolean),
- ignored: Schema.optional(Schema.Boolean),
- time: Schema.optional(
- Schema.Struct({
- start: Schema.Number,
- end: Schema.optional(Schema.Number),
+ }).meta({
+ ref: "RetryPart",
+ })
+ export type RetryPart = z.infer
+
+ export const StepStartPart = PartBase.extend({
+ type: z.literal("step-start"),
+ snapshot: z.string().optional(),
+ }).meta({
+ ref: "StepStartPart",
+ })
+ export type StepStartPart = z.infer
+
+ export const StepFinishPart = PartBase.extend({
+ type: z.literal("step-finish"),
+ reason: z.string(),
+ snapshot: z.string().optional(),
+ cost: z.number(),
+ tokens: z.object({
+ total: z.number().optional(),
+ input: z.number(),
+ output: z.number(),
+ reasoning: z.number(),
+ cache: z.object({
+ read: z.number(),
+ write: z.number(),
+ }),
}),
- ),
- metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)),
-})
- .annotate({ identifier: "TextPartInput" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type TextPartInput = Types.DeepMutable>
-
-export const FilePartInput = Schema.Struct({
- id: Schema.optional(PartID),
- type: Schema.Literal("file"),
- mime: Schema.String,
- filename: Schema.optional(Schema.String),
- url: Schema.String,
- source: Schema.optional(_FilePartSource),
-})
- .annotate({ identifier: "FilePartInput" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type FilePartInput = Types.DeepMutable>
-
-export const AgentPartInput = Schema.Struct({
- id: Schema.optional(PartID),
- type: Schema.Literal("agent"),
- name: Schema.String,
- source: Schema.optional(
- Schema.Struct({
- value: Schema.String,
- start: Schema.Int,
- end: Schema.Int,
+ }).meta({
+ ref: "StepFinishPart",
+ })
+ export type StepFinishPart = z.infer
+
+ export const ToolStatePending = z
+ .object({
+ status: z.literal("pending"),
+ input: z.record(z.string(), z.any()),
+ raw: z.string(),
+ })
+ .meta({
+ ref: "ToolStatePending",
+ })
+
+ export type ToolStatePending = z.infer
+
+ export const ToolStateRunning = z
+ .object({
+ status: z.literal("running"),
+ input: z.record(z.string(), z.any()),
+ title: z.string().optional(),
+ metadata: z.record(z.string(), z.any()).optional(),
+ time: z.object({
+ start: z.number(),
+ }),
+ })
+ .meta({
+ ref: "ToolStateRunning",
+ })
+ export type ToolStateRunning = z.infer
+
+ export const ToolStateCompleted = z
+ .object({
+ status: z.literal("completed"),
+ input: z.record(z.string(), z.any()),
+ output: z.string(),
+ title: z.string(),
+ metadata: z.record(z.string(), z.any()),
+ time: z.object({
+ start: z.number(),
+ end: z.number(),
+ compacted: z.number().optional(),
+ }),
+ attachments: FilePart.array().optional(),
+ })
+ .meta({
+ ref: "ToolStateCompleted",
+ })
+ export type ToolStateCompleted = z.infer
+
+ export const ToolStateError = z
+ .object({
+ status: z.literal("error"),
+ input: z.record(z.string(), z.any()),
+ error: z.string(),
+ metadata: z.record(z.string(), z.any()).optional(),
+ time: z.object({
+ start: z.number(),
+ end: z.number(),
+ }),
+ })
+ .meta({
+ ref: "ToolStateError",
+ })
+ export type ToolStateError = z.infer
+
+ export const ToolState = z
+ .discriminatedUnion("status", [ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError])
+ .meta({
+ ref: "ToolState",
+ })
+
+ export const ToolPart = PartBase.extend({
+ type: z.literal("tool"),
+ callID: z.string(),
+ tool: z.string(),
+ state: ToolState,
+ metadata: z.record(z.string(), z.any()).optional(),
+ }).meta({
+ ref: "ToolPart",
+ })
+ export type ToolPart = z.infer
+
+ const Base = z.object({
+ id: MessageID.zod,
+ sessionID: SessionID.zod,
+ })
+
+ export const User = Base.extend({
+ role: z.literal("user"),
+ time: z.object({
+ created: z.number(),
}),
- ),
-})
- .annotate({ identifier: "AgentPartInput" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type AgentPartInput = Types.DeepMutable>
-
-export const SubtaskPartInput = Schema.Struct({
- id: Schema.optional(PartID),
- type: Schema.Literal("subtask"),
- prompt: Schema.String,
- description: Schema.String,
- agent: Schema.String,
- model: Schema.optional(
- Schema.Struct({
- providerID: ProviderID,
- modelID: ModelID,
+ format: Format.optional(),
+ summary: z
+ .object({
+ title: z.string().optional(),
+ body: z.string().optional(),
+ diffs: Snapshot.FileDiff.array(),
+ })
+ .optional(),
+ agent: z.string(),
+ model: z.object({
+ providerID: ProviderID.zod,
+ modelID: ModelID.zod,
}),
- ),
- command: Schema.optional(Schema.String),
-})
- .annotate({ identifier: "SubtaskPartInput" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type SubtaskPartInput = Types.DeepMutable>
-
-export const Assistant = Schema.Struct({
- ...messageBase,
- role: Schema.Literal("assistant"),
- time: Schema.Struct({
- created: Schema.Number,
- completed: Schema.optional(Schema.Number),
- }),
- error: Schema.optional(Schema.Any.annotate({ [ZodOverride]: AssistantErrorZod })),
- parentID: MessageID,
- modelID: ModelID,
- providerID: ProviderID,
- /**
- * @deprecated
- */
- mode: Schema.String,
- agent: Schema.String,
- path: Schema.Struct({
- cwd: Schema.String,
- root: Schema.String,
- }),
- summary: Schema.optional(Schema.Boolean),
- cost: Schema.Number,
- tokens: Schema.Struct({
- total: Schema.optional(Schema.Number),
- input: Schema.Number,
- output: Schema.Number,
- reasoning: Schema.Number,
- cache: Schema.Struct({
- read: Schema.Number,
- write: Schema.Number,
+ system: z.string().optional(),
+ tools: z.record(z.string(), z.boolean()).optional(),
+ variant: z.string().optional(),
+ }).meta({
+ ref: "UserMessage",
+ })
+ export type User = z.infer
+
+ export const Part = z
+ .discriminatedUnion("type", [
+ TextPart,
+ SubtaskPart,
+ ReasoningPart,
+ FilePart,
+ ToolPart,
+ StepStartPart,
+ StepFinishPart,
+ SnapshotPart,
+ PatchPart,
+ AgentPart,
+ RetryPart,
+ CompactionPart,
+ ])
+ .meta({
+ ref: "Part",
+ })
+ export type Part = z.infer
+
+ export const Assistant = Base.extend({
+ role: z.literal("assistant"),
+ time: z.object({
+ created: z.number(),
+ completed: z.number().optional(),
}),
- }),
- structured: Schema.optional(Schema.Any),
- variant: Schema.optional(Schema.String),
- finish: Schema.optional(Schema.String),
-})
- .annotate({ identifier: "AssistantMessage" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type Assistant = Omit>, "error"> & {
- error?: AssistantError
-}
-
-const _Info = Schema.Union([User, Assistant]).annotate({ discriminator: "role", identifier: "Message" })
-export const Info = Object.assign(_Info, {
- zod: zod(_Info) as unknown as z.ZodType,
-})
-export type Info = User | Assistant
-
-const UpdatedEventSchema = Schema.Struct({
- sessionID: SessionID,
- info: _Info,
-})
-
-const RemovedEventSchema = Schema.Struct({
- sessionID: SessionID,
- messageID: MessageID,
-})
-
-const PartUpdatedEventSchema = Schema.Struct({
- sessionID: SessionID,
- part: _Part,
- time: Schema.Number,
-})
-
-const PartRemovedEventSchema = Schema.Struct({
- sessionID: SessionID,
- messageID: MessageID,
- partID: PartID,
-})
-
-export const Event = {
- Updated: SyncEvent.define({
- type: "message.updated",
- version: 1,
- aggregate: "sessionID",
- schema: UpdatedEventSchema,
- }),
- Removed: SyncEvent.define({
- type: "message.removed",
- version: 1,
- aggregate: "sessionID",
- schema: RemovedEventSchema,
- }),
- PartUpdated: SyncEvent.define({
- type: "message.part.updated",
- version: 1,
- aggregate: "sessionID",
- schema: PartUpdatedEventSchema,
- }),
- PartDelta: BusEvent.define(
- "message.part.delta",
- Schema.Struct({
- sessionID: SessionID,
- messageID: MessageID,
- partID: PartID,
- field: Schema.String,
- delta: Schema.String,
+ error: z
+ .discriminatedUnion("name", [
+ AuthError.Schema,
+ NamedError.Unknown.Schema,
+ OutputLengthError.Schema,
+ AbortedError.Schema,
+ StructuredOutputError.Schema,
+ ContextOverflowError.Schema,
+ APIError.Schema,
+ ])
+ .optional(),
+ parentID: MessageID.zod,
+ modelID: ModelID.zod,
+ providerID: ProviderID.zod,
+ /**
+ * @deprecated
+ */
+ mode: z.string(),
+ agent: z.string(),
+ path: z.object({
+ cwd: z.string(),
+ root: z.string(),
}),
- ),
- PartRemoved: SyncEvent.define({
- type: "message.part.removed",
- version: 1,
- aggregate: "sessionID",
- schema: PartRemovedEventSchema,
- }),
-}
-
-export const WithParts = Schema.Struct({
- info: _Info,
- parts: Schema.Array(_Part),
-}).pipe(withStatics((s) => ({ zod: zod(s) })))
-export type WithParts = {
- info: Info
- parts: Part[]
-}
-
-const Cursor = Schema.Struct({
- id: MessageID,
- time: Schema.Number,
-})
-type Cursor = typeof Cursor.Type
-
-const decodeCursor = Schema.decodeUnknownSync(Cursor)
-
-export const cursor = {
- encode(input: Cursor) {
- return Buffer.from(JSON.stringify(input)).toString("base64url")
- },
- decode(input: string) {
- return decodeCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
- },
-}
-
-const info = (row: typeof MessageTable.$inferSelect) =>
- ({
- ...row.data,
- id: row.id,
- sessionID: row.session_id,
- }) as Info
-
-const part = (row: typeof PartTable.$inferSelect) =>
- ({
- ...row.data,
- id: row.id,
- sessionID: row.session_id,
- messageID: row.message_id,
- }) as Part
-
-const older = (row: Cursor) =>
- or(lt(MessageTable.time_created, row.time), and(eq(MessageTable.time_created, row.time), lt(MessageTable.id, row.id)))
-
-function hydrate(rows: (typeof MessageTable.$inferSelect)[]) {
- const ids = rows.map((row) => row.id)
- const partByMessage = new Map()
- if (ids.length > 0) {
- const partRows = Database.use((db) =>
- db
- .select()
- .from(PartTable)
- .where(inArray(PartTable.message_id, ids))
- .orderBy(PartTable.message_id, PartTable.id)
- .all(),
- )
- for (const row of partRows) {
- const next = part(row)
- const list = partByMessage.get(row.message_id)
- if (list) list.push(next)
- else partByMessage.set(row.message_id, [next])
- }
+ summary: z.boolean().optional(),
+ cost: z.number(),
+ tokens: z.object({
+ total: z.number().optional(),
+ input: z.number(),
+ output: z.number(),
+ reasoning: z.number(),
+ cache: z.object({
+ read: z.number(),
+ write: z.number(),
+ }),
+ }),
+ structured: z.any().optional(),
+ variant: z.string().optional(),
+ finish: z.string().optional(),
+ }).meta({
+ ref: "AssistantMessage",
+ })
+ export type Assistant = z.infer
+
+ export const Info = z.discriminatedUnion("role", [User, Assistant]).meta({
+ ref: "Message",
+ })
+ export type Info = z.infer
+
+ export const Event = {
+ Updated: BusEvent.define(
+ "message.updated",
+ z.object({
+ info: Info,
+ }),
+ ),
+ Removed: BusEvent.define(
+ "message.removed",
+ z.object({
+ sessionID: SessionID.zod,
+ messageID: MessageID.zod,
+ }),
+ ),
+ PartUpdated: BusEvent.define(
+ "message.part.updated",
+ z.object({
+ part: Part,
+ }),
+ ),
+ PartDelta: BusEvent.define(
+ "message.part.delta",
+ z.object({
+ sessionID: SessionID.zod,
+ messageID: MessageID.zod,
+ partID: PartID.zod,
+ field: z.string(),
+ delta: z.string(),
+ }),
+ ),
+ PartRemoved: BusEvent.define(
+ "message.part.removed",
+ z.object({
+ sessionID: SessionID.zod,
+ messageID: MessageID.zod,
+ partID: PartID.zod,
+ }),
+ ),
}
- return rows.map((row) => ({
- info: info(row),
- parts: partByMessage.get(row.id) ?? [],
- }))
-}
+ export const WithParts = z.object({
+ info: Info,
+ parts: z.array(Part),
+ })
+ export type WithParts = z.infer
+
+ const Cursor = z.object({
+ id: MessageID.zod,
+ time: z.number(),
+ })
+ type Cursor = z.infer
+
+ export const cursor = {
+ encode(input: Cursor) {
+ return Buffer.from(JSON.stringify(input)).toString("base64url")
+ },
+ decode(input: string) {
+ return Cursor.parse(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
+ },
+ }
-function providerMeta(metadata: Record | undefined) {
- if (!metadata) return undefined
- const { providerExecuted: _, ...rest } = metadata
- return Object.keys(rest).length > 0 ? rest : undefined
-}
+ const info = (row: typeof MessageTable.$inferSelect) =>
+ ({
+ ...row.data,
+ id: row.id,
+ sessionID: row.session_id,
+ }) as MessageV2.Info
+
+ const part = (row: typeof PartTable.$inferSelect) =>
+ ({
+ ...row.data,
+ id: row.id,
+ sessionID: row.session_id,
+ messageID: row.message_id,
+ }) as MessageV2.Part
+
+ const older = (row: Cursor) =>
+ or(
+ lt(MessageTable.time_created, row.time),
+ and(eq(MessageTable.time_created, row.time), lt(MessageTable.id, row.id)),
+ )
-export const toModelMessagesEffect = Effect.fnUntraced(function* (
- input: WithParts[],
- model: Provider.Model,
- options?: { stripMedia?: boolean; toolOutputMaxChars?: number },
-) {
- const result: UIMessage[] = []
- const toolNames = new Set()
- // Track media from tool results that need to be injected as user messages
- // for providers that don't support media in tool results.
- //
- // OpenAI-compatible APIs only support string content in tool results, so we need
- // to extract media and inject as user messages. Other SDKs (anthropic, google,
- // bedrock) handle type: "content" with media parts natively.
- //
- // Only apply this workaround if the model actually supports image input -
- // otherwise there's no point extracting images.
- const supportsMediaInToolResults = (() => {
- if (model.api.npm === "@ai-sdk/anthropic") return true
- if (model.api.npm === "@ai-sdk/openai") return true
- if (model.api.npm === "@ai-sdk/amazon-bedrock") return true
- if (model.api.npm === "@ai-sdk/google-vertex/anthropic") return true
- if (model.api.npm === "@ai-sdk/google") {
- const id = model.api.id.toLowerCase()
- return id.includes("gemini-3") && !id.includes("gemini-2")
+ async function hydrate(rows: (typeof MessageTable.$inferSelect)[]) {
+ const ids = rows.map((row) => row.id)
+ const partByMessage = new Map()
+ if (ids.length > 0) {
+ const partRows = Database.use((db) =>
+ db
+ .select()
+ .from(PartTable)
+ .where(inArray(PartTable.message_id, ids))
+ .orderBy(PartTable.message_id, PartTable.id)
+ .all(),
+ )
+ for (const row of partRows) {
+ const next = part(row)
+ const list = partByMessage.get(row.message_id)
+ if (list) list.push(next)
+ else partByMessage.set(row.message_id, [next])
+ }
}
- return false
- })()
- const toModelOutput = (options: { toolCallId: string; input: unknown; output: unknown }) => {
- const output = options.output
- if (typeof output === "string") {
- return { type: "text", value: output }
- }
+ return rows.map((row) => ({
+ info: info(row),
+ parts: partByMessage.get(row.id) ?? [],
+ }))
+ }
- if (typeof output === "object") {
- const outputObject = output as {
- text: string
- attachments?: Array<{ mime: string; url: string }>
+ export function toModelMessages(
+ input: WithParts[],
+ model: Provider.Model,
+ options?: { stripMedia?: boolean },
+ ): ModelMessage[] {
+ const result: UIMessage[] = []
+ const toolNames = new Set()
+ // Track media from tool results that need to be injected as user messages
+ // for providers that don't support media in tool results.
+ //
+ // OpenAI-compatible APIs only support string content in tool results, so we need
+ // to extract media and inject as user messages. Other SDKs (anthropic, google,
+ // bedrock) handle type: "content" with media parts natively.
+ //
+ // Only apply this workaround if the model actually supports image input -
+ // otherwise there's no point extracting images.
+ const supportsMediaInToolResults = (() => {
+ if (model.api.npm === "@ai-sdk/anthropic") return true
+ if (model.api.npm === "@ai-sdk/openai") return true
+ if (model.api.npm === "@ai-sdk/amazon-bedrock") return true
+ if (model.api.npm === "@ai-sdk/google-vertex/anthropic") return true
+ if (model.api.npm === "@ai-sdk/google") {
+ const id = model.api.id.toLowerCase()
+ return id.includes("gemini-3") && !id.includes("gemini-2")
}
- const attachments = (outputObject.attachments ?? []).filter((attachment) => {
- return attachment.url.startsWith("data:") && attachment.url.includes(",")
- })
+ return false
+ })()
- return {
- type: "content",
- value: [
- { type: "text", text: outputObject.text },
- ...attachments.map((attachment) => ({
- type: "media",
- mediaType: attachment.mime,
- data: iife(() => {
- const commaIndex = attachment.url.indexOf(",")
- return commaIndex === -1 ? attachment.url : attachment.url.slice(commaIndex + 1)
- }),
- })),
- ],
+ const toModelOutput = (output: unknown) => {
+ if (typeof output === "string") {
+ return { type: "text", value: output }
}
+
+ if (typeof output === "object") {
+ const outputObject = output as {
+ text: string
+ attachments?: Array<{ mime: string; url: string }>
+ }
+ const attachments = (outputObject.attachments ?? []).filter((attachment) => {
+ return attachment.url.startsWith("data:") && attachment.url.includes(",")
+ })
+
+ return {
+ type: "content",
+ value: [
+ { type: "text", text: outputObject.text },
+ ...attachments.map((attachment) => ({
+ type: "media",
+ mediaType: attachment.mime,
+ data: iife(() => {
+ const commaIndex = attachment.url.indexOf(",")
+ return commaIndex === -1 ? attachment.url : attachment.url.slice(commaIndex + 1)
+ }),
+ })),
+ ],
+ }
+ }
+
+ return { type: "json", value: output as never }
}
- return { type: "json", value: output as never }
- }
+ for (const msg of input) {
+ if (msg.parts.length === 0) continue
- for (const msg of input) {
- if (msg.parts.length === 0) continue
+ if (msg.info.role === "user") {
+ const userMessage: UIMessage = {
+ id: msg.info.id,
+ role: "user",
+ parts: [],
+ }
+ result.push(userMessage)
+ for (const part of msg.parts) {
+ if (part.type === "text" && !part.ignored)
+ userMessage.parts.push({
+ type: "text",
+ text: part.text,
+ })
+ // text/plain and directory files are converted into text parts, ignore them
+ if (part.type === "file" && part.mime !== "text/plain" && part.mime !== "application/x-directory") {
+ if (options?.stripMedia && isMedia(part.mime)) {
+ userMessage.parts.push({
+ type: "text",
+ text: `[Attached ${part.mime}: ${part.filename ?? "file"}]`,
+ })
+ } else {
+ userMessage.parts.push({
+ type: "file",
+ url: part.url,
+ mediaType: part.mime,
+ filename: part.filename,
+ })
+ }
+ }
- if (msg.info.role === "user") {
- const userMessage: UIMessage = {
- id: msg.info.id,
- role: "user",
- parts: [],
- }
- result.push(userMessage)
- for (const part of msg.parts) {
- if (part.type === "text" && !part.ignored)
- userMessage.parts.push({
- type: "text",
- text: part.text,
- })
- // text/plain and directory files are converted into text parts, ignore them
- if (part.type === "file" && part.mime !== "text/plain" && part.mime !== "application/x-directory") {
- if (options?.stripMedia && isMedia(part.mime)) {
+ if (part.type === "compaction") {
userMessage.parts.push({
type: "text",
- text: `[Attached ${part.mime}: ${part.filename ?? "file"}]`,
+ text: "What did we do so far?",
})
- } else {
+ }
+ if (part.type === "subtask") {
userMessage.parts.push({
- type: "file",
- url: part.url,
- mediaType: part.mime,
- filename: part.filename,
+ type: "text",
+ text: "The following tool was executed by the user",
})
}
}
+ }
- if (part.type === "compaction") {
- userMessage.parts.push({
- type: "text",
- text: "What did we do so far?",
- })
+ if (msg.info.role === "assistant") {
+ const differentModel = `${model.providerID}/${model.id}` !== `${msg.info.providerID}/${msg.info.modelID}`
+ const media: Array<{ mime: string; url: string }> = []
+
+ if (
+ msg.info.error &&
+ !(
+ MessageV2.AbortedError.isInstance(msg.info.error) &&
+ msg.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning")
+ )
+ ) {
+ continue
}
- if (part.type === "subtask") {
- userMessage.parts.push({
- type: "text",
- text: "The following tool was executed by the user",
- })
+ const assistantMessage: UIMessage = {
+ id: msg.info.id,
+ role: "assistant",
+ parts: [],
}
- }
- }
-
- if (msg.info.role === "assistant") {
- const differentModel = `${model.providerID}/${model.id}` !== `${msg.info.providerID}/${msg.info.modelID}`
- const media: Array<{ mime: string; url: string }> = []
-
- if (
- msg.info.error &&
- !(
- AbortedError.isInstance(msg.info.error) &&
- msg.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning")
- )
- ) {
- continue
- }
- const assistantMessage: UIMessage = {
- id: msg.info.id,
- role: "assistant",
- parts: [],
- }
- for (const part of msg.parts) {
- if (part.type === "text")
- assistantMessage.parts.push({
- type: "text",
- text: part.text,
- ...(differentModel ? {} : { providerMetadata: part.metadata }),
- })
- if (part.type === "step-start")
- assistantMessage.parts.push({
- type: "step-start",
- })
- if (part.type === "tool") {
- toolNames.add(part.tool)
- if (part.state.status === "completed") {
- const outputText = part.state.time.compacted
- ? "[Old tool result content cleared]"
- : truncateToolOutput(part.state.output, options?.toolOutputMaxChars)
- const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? [])
-
- // For providers that don't support media in tool results, extract media files
- // (images, PDFs) to be sent as a separate user message
- const mediaAttachments = attachments.filter((a) => isMedia(a.mime))
- const nonMediaAttachments = attachments.filter((a) => !isMedia(a.mime))
- if (!supportsMediaInToolResults && mediaAttachments.length > 0) {
- media.push(...mediaAttachments)
- }
- const finalAttachments = supportsMediaInToolResults ? attachments : nonMediaAttachments
-
- const output =
- finalAttachments.length > 0
- ? {
- text: outputText,
- attachments: finalAttachments,
- }
- : outputText
-
+ for (const part of msg.parts) {
+ if (part.type === "text")
assistantMessage.parts.push({
- type: ("tool-" + part.tool) as `tool-${string}`,
- state: "output-available",
- toolCallId: part.callID,
- input: part.state.input,
- output,
- ...(part.metadata?.providerExecuted ? { providerExecuted: true } : {}),
- ...(differentModel ? {} : { callProviderMetadata: providerMeta(part.metadata) }),
+ type: "text",
+ text: part.text,
+ ...(differentModel ? {} : { providerMetadata: part.metadata }),
})
- }
- if (part.state.status === "error") {
- const output = part.state.metadata?.interrupted === true ? part.state.metadata.output : undefined
- if (typeof output === "string") {
+ if (part.type === "step-start")
+ assistantMessage.parts.push({
+ type: "step-start",
+ })
+ if (part.type === "tool") {
+ toolNames.add(part.tool)
+ if (part.state.status === "completed") {
+ const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output
+ const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? [])
+
+ // For providers that don't support media in tool results, extract media files
+ // (images, PDFs) to be sent as a separate user message
+ const mediaAttachments = attachments.filter((a) => isMedia(a.mime))
+ const nonMediaAttachments = attachments.filter((a) => !isMedia(a.mime))
+ if (!supportsMediaInToolResults && mediaAttachments.length > 0) {
+ media.push(...mediaAttachments)
+ }
+ const finalAttachments = supportsMediaInToolResults ? attachments : nonMediaAttachments
+
+ const output =
+ finalAttachments.length > 0
+ ? {
+ text: outputText,
+ attachments: finalAttachments,
+ }
+ : outputText
+
assistantMessage.parts.push({
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-available",
toolCallId: part.callID,
input: part.state.input,
output,
- ...(part.metadata?.providerExecuted ? { providerExecuted: true } : {}),
- ...(differentModel ? {} : { callProviderMetadata: providerMeta(part.metadata) }),
+ ...(differentModel ? {} : { callProviderMetadata: part.metadata }),
})
- } else {
+ }
+ if (part.state.status === "error")
assistantMessage.parts.push({
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-error",
toolCallId: part.callID,
input: part.state.input,
errorText: part.state.error,
- ...(part.metadata?.providerExecuted ? { providerExecuted: true } : {}),
- ...(differentModel ? {} : { callProviderMetadata: providerMeta(part.metadata) }),
+ ...(differentModel ? {} : { callProviderMetadata: part.metadata }),
+ })
+ // Handle pending/running tool calls to prevent dangling tool_use blocks
+ // Anthropic/Claude APIs require every tool_use to have a corresponding tool_result
+ if (part.state.status === "pending" || part.state.status === "running")
+ assistantMessage.parts.push({
+ type: ("tool-" + part.tool) as `tool-${string}`,
+ state: "output-error",
+ toolCallId: part.callID,
+ input: part.state.input,
+ errorText: "[Tool execution was interrupted]",
+ ...(differentModel ? {} : { callProviderMetadata: part.metadata }),
})
- }
}
- // Handle pending/running tool calls to prevent dangling tool_use blocks
- // Anthropic/Claude APIs require every tool_use to have a corresponding tool_result
- if (part.state.status === "pending" || part.state.status === "running")
+ if (part.type === "reasoning") {
assistantMessage.parts.push({
- type: ("tool-" + part.tool) as `tool-${string}`,
- state: "output-error",
- toolCallId: part.callID,
- input: part.state.input,
- errorText: "[Tool execution was interrupted]",
- ...(part.metadata?.providerExecuted ? { providerExecuted: true } : {}),
- ...(differentModel ? {} : { callProviderMetadata: providerMeta(part.metadata) }),
+ type: "reasoning",
+ text: part.text,
+ ...(differentModel ? {} : { providerMetadata: part.metadata }),
})
+ }
}
- if (part.type === "reasoning") {
- assistantMessage.parts.push({
- type: "reasoning",
- text: part.text,
- ...(differentModel ? {} : { providerMetadata: part.metadata }),
- })
- }
- }
- if (assistantMessage.parts.length > 0) {
- result.push(assistantMessage)
- // Inject pending media as a user message for providers that don't support
- // media (images, PDFs) in tool results
- if (media.length > 0) {
- result.push({
- id: MessageID.ascending(),
- role: "user",
- parts: [
- {
- type: "text" as const,
- text: SYNTHETIC_ATTACHMENT_PROMPT,
- },
- ...media.map((attachment) => ({
- type: "file" as const,
- url: attachment.url,
- mediaType: attachment.mime,
- })),
- ],
- })
+ if (assistantMessage.parts.length > 0) {
+ result.push(assistantMessage)
+ // Inject pending media as a user message for providers that don't support
+ // media (images, PDFs) in tool results
+ if (media.length > 0) {
+ result.push({
+ id: MessageID.ascending(),
+ role: "user",
+ parts: [
+ {
+ type: "text" as const,
+ text: "Attached image(s) from tool result:",
+ },
+ ...media.map((attachment) => ({
+ type: "file" as const,
+ url: attachment.url,
+ mediaType: attachment.mime,
+ })),
+ ],
+ })
+ }
}
}
}
- }
- const tools = Object.fromEntries(Array.from(toolNames).map((toolName) => [toolName, { toModelOutput }]))
+ const tools = Object.fromEntries(Array.from(toolNames).map((toolName) => [toolName, { toModelOutput }]))
- return yield* Effect.promise(() =>
- convertToModelMessages(
+ return convertToModelMessages(
result.filter((msg) => msg.parts.some((part) => part.type !== "step-start")),
{
//@ts-expect-error (convertToModelMessages expects a ToolSet but only actually needs tools[name]?.toModelOutput)
tools,
},
- ),
- )
-})
-
-export function toModelMessages(
- input: WithParts[],
- model: Provider.Model,
- options?: { stripMedia?: boolean; toolOutputMaxChars?: number },
-): Promise {
- return Effect.runPromise(toModelMessagesEffect(input, model, options).pipe(Effect.provide(EffectLogger.layer)))
-}
-
-export function page(input: { sessionID: SessionID; limit: number; before?: string }) {
- const before = input.before ? cursor.decode(input.before) : undefined
- const where = before
- ? and(eq(MessageTable.session_id, input.sessionID), older(before))
- : eq(MessageTable.session_id, input.sessionID)
- const rows = Database.use((db) =>
- db
- .select()
- .from(MessageTable)
- .where(where)
- .orderBy(desc(MessageTable.time_created), desc(MessageTable.id))
- .limit(input.limit + 1)
- .all(),
- )
- if (rows.length === 0) {
- const row = Database.use((db) =>
- db.select({ id: SessionTable.id }).from(SessionTable).where(eq(SessionTable.id, input.sessionID)).get(),
)
- if (!row) throw new NotFoundError({ message: `Session not found: ${input.sessionID}` })
- return {
- items: [] as WithParts[],
- more: false,
- }
}
- const more = rows.length > input.limit
- const slice = more ? rows.slice(0, input.limit) : rows
- const items = hydrate(slice)
- items.reverse()
- const tail = slice.at(-1)
- return {
- items,
- more,
- cursor: more && tail ? cursor.encode({ id: tail.id, time: tail.time_created }) : undefined,
- }
-}
+ export const page = fn(
+ z.object({
+ sessionID: SessionID.zod,
+ limit: z.number().int().positive(),
+ before: z.string().optional(),
+ }),
+ async (input) => {
+ const before = input.before ? cursor.decode(input.before) : undefined
+ const where = before
+ ? and(eq(MessageTable.session_id, input.sessionID), older(before))
+ : eq(MessageTable.session_id, input.sessionID)
+ const rows = Database.use((db) =>
+ db
+ .select()
+ .from(MessageTable)
+ .where(where)
+ .orderBy(desc(MessageTable.time_created), desc(MessageTable.id))
+ .limit(input.limit + 1)
+ .all(),
+ )
+ if (rows.length === 0) {
+ const row = Database.use((db) =>
+ db.select({ id: SessionTable.id }).from(SessionTable).where(eq(SessionTable.id, input.sessionID)).get(),
+ )
+ if (!row) throw new NotFoundError({ message: `Session not found: ${input.sessionID}` })
+ return {
+ items: [] as MessageV2.WithParts[],
+ more: false,
+ }
+ }
-export function* stream(sessionID: SessionID) {
- const size = 50
- let before: string | undefined
- while (true) {
- const next = page({ sessionID, limit: size, before })
- if (next.items.length === 0) break
- for (let i = next.items.length - 1; i >= 0; i--) {
- yield next.items[i]
+ const more = rows.length > input.limit
+ const page = more ? rows.slice(0, input.limit) : rows
+ const items = await hydrate(page)
+ items.reverse()
+ const tail = page.at(-1)
+ return {
+ items,
+ more,
+ cursor: more && tail ? cursor.encode({ id: tail.id, time: tail.time_created }) : undefined,
+ }
+ },
+ )
+
+ export const stream = fn(SessionID.zod, async function* (sessionID) {
+ const size = 50
+ let before: string | undefined
+ while (true) {
+ const next = await page({ sessionID, limit: size, before })
+ if (next.items.length === 0) break
+ for (let i = next.items.length - 1; i >= 0; i--) {
+ yield next.items[i]
+ }
+ if (!next.more || !next.cursor) break
+ before = next.cursor
}
- if (!next.more || !next.cursor) break
- before = next.cursor
- }
-}
+ })
-export function parts(message_id: MessageID) {
- const rows = Database.use((db) =>
- db.select().from(PartTable).where(eq(PartTable.message_id, message_id)).orderBy(PartTable.id).all(),
- )
- return rows.map(
- (row) =>
- ({
- ...row.data,
- id: row.id,
- sessionID: row.session_id,
- messageID: row.message_id,
- }) as Part,
- )
-}
+ export const parts = fn(MessageID.zod, async (message_id) => {
+ const rows = Database.use((db) =>
+ db.select().from(PartTable).where(eq(PartTable.message_id, message_id)).orderBy(PartTable.id).all(),
+ )
+ return rows.map(
+ (row) => ({ ...row.data, id: row.id, sessionID: row.session_id, messageID: row.message_id }) as MessageV2.Part,
+ )
+ })
-export function get(input: { sessionID: SessionID; messageID: MessageID }): WithParts {
- const row = Database.use((db) =>
- db
- .select()
- .from(MessageTable)
- .where(and(eq(MessageTable.id, input.messageID), eq(MessageTable.session_id, input.sessionID)))
- .get(),
+ export const get = fn(
+ z.object({
+ sessionID: SessionID.zod,
+ messageID: MessageID.zod,
+ }),
+ async (input): Promise => {
+ const row = Database.use((db) =>
+ db
+ .select()
+ .from(MessageTable)
+ .where(and(eq(MessageTable.id, input.messageID), eq(MessageTable.session_id, input.sessionID)))
+ .get(),
+ )
+ if (!row) throw new NotFoundError({ message: `Message not found: ${input.messageID}` })
+ return {
+ info: info(row),
+ parts: await parts(input.messageID),
+ }
+ },
)
- if (!row) throw new NotFoundError({ message: `Message not found: ${input.messageID}` })
- return {
- info: info(row),
- parts: parts(input.messageID),
- }
-}
-export function filterCompacted(msgs: Iterable) {
- const result = [] as WithParts[]
- const completed = new Set()
- let retain: MessageID | undefined
- for (const msg of msgs) {
- result.push(msg)
- if (retain) {
- if (msg.info.id === retain) break
- continue
- }
- if (msg.info.role === "user" && completed.has(msg.info.id)) {
- const part = msg.parts.find((item): item is CompactionPart => item.type === "compaction")
- if (!part) continue
- if (!part.tail_start_id) break
- retain = part.tail_start_id
- if (msg.info.id === retain) break
- continue
+ export async function filterCompacted(stream: AsyncIterable) {
+ const result = [] as MessageV2.WithParts[]
+ const completed = new Set()
+ for await (const msg of stream) {
+ result.push(msg)
+ if (
+ msg.info.role === "user" &&
+ completed.has(msg.info.id) &&
+ msg.parts.some((part) => part.type === "compaction")
+ )
+ break
+ if (msg.info.role === "assistant" && msg.info.summary && msg.info.finish && !msg.info.error)
+ completed.add(msg.info.parentID)
}
- if (msg.info.role === "user" && completed.has(msg.info.id) && msg.parts.some((part) => part.type === "compaction"))
- break
- if (msg.info.role === "assistant" && msg.info.summary && msg.info.finish && !msg.info.error)
- completed.add(msg.info.parentID)
+ result.reverse()
+ return result
}
- result.reverse()
- return result
-}
-export const filterCompactedEffect = Effect.fnUntraced(function* (sessionID: SessionID) {
- return filterCompacted(stream(sessionID))
-})
-
-export function fromError(
- e: unknown,
- ctx: { providerID: ProviderID; aborted?: boolean },
-): NonNullable {
- switch (true) {
- case e instanceof DOMException && e.name === "AbortError":
- return new AbortedError(
- { message: e.message },
- {
- cause: e,
- },
- ).toObject()
- case OutputLengthError.isInstance(e):
- return e
- case LoadAPIKeyError.isInstance(e):
- return new AuthError(
- {
- providerID: ctx.providerID,
- message: e.message,
- },
- { cause: e },
- ).toObject()
- case (e as SystemError)?.code === "ECONNRESET":
- return new APIError(
- {
- message: "Connection reset by server",
- isRetryable: true,
- metadata: {
- code: (e as SystemError).code ?? "",
- syscall: (e as SystemError).syscall ?? "",
- message: (e as SystemError).message ?? "",
+ export function fromError(e: unknown, ctx: { providerID: ProviderID }): NonNullable {
+ switch (true) {
+ case e instanceof DOMException && e.name === "AbortError":
+ return new MessageV2.AbortedError(
+ { message: e.message },
+ {
+ cause: e,
},
- },
- { cause: e },
- ).toObject()
- case e instanceof Error && (e as FetchDecompressionError).code === "ZlibError":
- if (ctx.aborted) {
- return new AbortedError({ message: e.message }, { cause: e }).toObject()
- }
- return new APIError(
- {
- message: "Response decompression failed",
- isRetryable: true,
- metadata: {
- code: (e as FetchDecompressionError).code,
+ ).toObject()
+ case MessageV2.OutputLengthError.isInstance(e):
+ return e
+ case LoadAPIKeyError.isInstance(e):
+ return new MessageV2.AuthError(
+ {
+ providerID: ctx.providerID,
message: e.message,
},
- },
- { cause: e },
- ).toObject()
- case APICallError.isInstance(e):
- const parsed = ProviderError.parseAPICallError({
- providerID: ctx.providerID,
- error: e,
- })
- if (parsed.type === "context_overflow") {
- return new ContextOverflowError(
+ { cause: e },
+ ).toObject()
+ case NETWORK_ERROR_CODES.has((e as SystemError)?.code ?? ""):
+ return new MessageV2.APIError(
+ {
+ message: "Network error",
+ isRetryable: true,
+ metadata: {
+ code: (e as SystemError).code ?? "",
+ syscall: (e as SystemError).syscall ?? "",
+ message: (e as SystemError).message ?? "",
+ },
+ },
+ { cause: e },
+ ).toObject()
+ case e instanceof Error && e.message === "SSE read timed out":
+ return new MessageV2.APIError(
+ {
+ message: "SSE read timed out",
+ isRetryable: true,
+ metadata: {
+ message: e.message,
+ },
+ },
+ { cause: e },
+ ).toObject()
+ case APICallError.isInstance(e):
+ const parsed = ProviderError.parseAPICallError({
+ providerID: ctx.providerID,
+ error: e,
+ })
+ if (parsed.type === "context_overflow") {
+ return new MessageV2.ContextOverflowError(
+ {
+ message: parsed.message,
+ responseBody: parsed.responseBody,
+ },
+ { cause: e },
+ ).toObject()
+ }
+
+ return new MessageV2.APIError(
{
message: parsed.message,
+ statusCode: parsed.statusCode,
+ isRetryable: parsed.isRetryable,
+ responseHeaders: parsed.responseHeaders,
responseBody: parsed.responseBody,
+ metadata: parsed.metadata,
},
{ cause: e },
).toObject()
- }
-
- return new APIError(
- {
- message: parsed.message,
- statusCode: parsed.statusCode,
- isRetryable: parsed.isRetryable,
- responseHeaders: parsed.responseHeaders,
- responseBody: parsed.responseBody,
- metadata: parsed.metadata,
- },
- { cause: e },
- ).toObject()
- case e instanceof Error:
- return new NamedError.Unknown({ message: errorMessage(e) }, { cause: e }).toObject()
- default:
- try {
- const parsed = ProviderError.parseStreamError(e)
- if (parsed) {
- if (parsed.type === "context_overflow") {
- return new ContextOverflowError(
+ case e instanceof Error:
+ return new NamedError.Unknown({ message: e instanceof Error ? e.message : String(e) }, { cause: e }).toObject()
+ default:
+ try {
+ const parsed = ProviderError.parseStreamError(e)
+ if (parsed) {
+ if (parsed.type === "context_overflow") {
+ return new MessageV2.ContextOverflowError(
+ {
+ message: parsed.message,
+ responseBody: parsed.responseBody,
+ },
+ { cause: e },
+ ).toObject()
+ }
+ return new MessageV2.APIError(
{
message: parsed.message,
+ isRetryable: parsed.isRetryable,
responseBody: parsed.responseBody,
},
- { cause: e },
+ {
+ cause: e,
+ },
).toObject()
}
- return new APIError(
- {
- message: parsed.message,
- isRetryable: parsed.isRetryable,
- responseBody: parsed.responseBody,
- },
- {
- cause: e,
- },
- ).toObject()
- }
- } catch {}
- return new NamedError.Unknown({ message: JSON.stringify(e) }, { cause: e }).toObject()
+ } catch {}
+ return new NamedError.Unknown({ message: JSON.stringify(e) }, { cause: e }).toObject()
+ }
}
}
-
-export * as MessageV2 from "./message-v2"
diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts
index 21f9329c6fce..fd0a02fdcb75 100644
--- a/packages/opencode/src/session/processor.ts
+++ b/packages/opencode/src/session/processor.ts
@@ -1,619 +1,501 @@
-import { Cause, Deferred, Effect, Layer, Context, Scope } from "effect"
-import * as Stream from "effect/Stream"
+import { MessageV2 } from "./message-v2"
+import { Log } from "@/util/log"
+import { Session } from "."
import { Agent } from "@/agent/agent"
-import { Bus } from "@/bus"
-import { Config } from "@/config"
-import { Permission } from "@/permission"
-import { Plugin } from "@/plugin"
import { Snapshot } from "@/snapshot"
-import * as Session from "./session"
-import { LLM } from "./llm"
-import { MessageV2 } from "./message-v2"
-import { isOverflow } from "./overflow"
-import { PartID } from "./schema"
-import type { SessionID } from "./schema"
+import { SessionSummary } from "./summary"
+import { Bus } from "@/bus"
import { SessionRetry } from "./retry"
import { SessionStatus } from "./status"
-import { SessionSummary } from "./summary"
-import type { Provider } from "@/provider"
+import { Plugin } from "@/plugin"
+import type { Provider } from "@/provider/provider"
+import { LLM } from "./llm"
+import { Config } from "@/config/config"
+import { SessionCompaction } from "./compaction"
+import { Permission } from "@/permission"
import { Question } from "@/question"
-import { errorMessage } from "@/util/error"
-import { Log } from "@/util"
-import { isRecord } from "@/util/record"
-
-const DOOM_LOOP_THRESHOLD = 3
-const log = Log.create({ service: "session.processor" })
-
-export type Result = "compact" | "stop" | "continue"
-
-export type Event = LLM.Event
-
-export interface Handle {
- readonly message: MessageV2.Assistant
- readonly updateToolCall: (
- toolCallID: string,
- update: (part: MessageV2.ToolPart) => MessageV2.ToolPart,
- ) => Effect.Effect
- readonly completeToolCall: (
- toolCallID: string,
- output: {
- title: string
- metadata: Record
- output: string
- attachments?: MessageV2.FilePart[]
- },
- ) => Effect.Effect
- readonly process: (streamInput: LLM.StreamInput) => Effect.Effect
-}
-
-type Input = {
- assistantMessage: MessageV2.Assistant
- sessionID: SessionID
- model: Provider.Model
-}
-
-export interface Interface {
- readonly create: (input: Input) => Effect.Effect
-}
-
-type ToolCall = {
- partID: MessageV2.ToolPart["id"]
- messageID: MessageV2.ToolPart["messageID"]
- sessionID: MessageV2.ToolPart["sessionID"]
- done: Deferred.Deferred
-}
-
-interface ProcessorContext extends Input {
- toolcalls: Record
- shouldBreak: boolean
- snapshot: string | undefined
- blocked: boolean
- needsCompaction: boolean
- currentText: MessageV2.TextPart | undefined
- reasoningMap: Record
-}
-
-type StreamEvent = Event
-
-export class Service extends Context.Service()("@opencode/SessionProcessor") {}
-
-export const layer: Layer.Layer<
- Service,
- never,
- | Session.Service
- | Config.Service
- | Bus.Service
- | Snapshot.Service
- | Agent.Service
- | LLM.Service
- | Permission.Service
- | Plugin.Service
- | SessionSummary.Service
- | SessionStatus.Service
-> = Layer.effect(
- Service,
- Effect.gen(function* () {
- const session = yield* Session.Service
- const config = yield* Config.Service
- const bus = yield* Bus.Service
- const snapshot = yield* Snapshot.Service
- const agents = yield* Agent.Service
- const llm = yield* LLM.Service
- const permission = yield* Permission.Service
- const plugin = yield* Plugin.Service
- const summary = yield* SessionSummary.Service
- const scope = yield* Scope.Scope
- const status = yield* SessionStatus.Service
-
- const create = Effect.fn("SessionProcessor.create")(function* (input: Input) {
- // Pre-capture snapshot before the LLM stream starts. The AI SDK
- // may execute tools internally before emitting start-step events,
- // so capturing inside the event handler can be too late.
- const initialSnapshot = yield* snapshot.track()
- const ctx: ProcessorContext = {
- assistantMessage: input.assistantMessage,
- sessionID: input.sessionID,
- model: input.model,
- toolcalls: {},
- shouldBreak: false,
- snapshot: initialSnapshot,
- blocked: false,
- needsCompaction: false,
- currentText: undefined,
- reasoningMap: {},
- }
- let aborted = false
- const slog = log.clone().tag("session.id", input.sessionID).tag("messageID", input.assistantMessage.id)
-
- const parse = (e: unknown) =>
- MessageV2.fromError(e, {
- providerID: input.model.providerID,
- aborted,
- })
-
- const settleToolCall = Effect.fn("SessionProcessor.settleToolCall")(function* (toolCallID: string) {
- const done = ctx.toolcalls[toolCallID]?.done
- delete ctx.toolcalls[toolCallID]
- if (done) yield* Deferred.succeed(done, undefined).pipe(Effect.ignore)
- })
-
- const readToolCall = Effect.fn("SessionProcessor.readToolCall")(function* (toolCallID: string) {
- const call = ctx.toolcalls[toolCallID]
- if (!call) return
- const part = yield* session.getPart({
- partID: call.partID,
- messageID: call.messageID,
- sessionID: call.sessionID,
- })
- if (!part || part.type !== "tool") {
- delete ctx.toolcalls[toolCallID]
- return
+import { PartID } from "./schema"
+import type { SessionID, MessageID } from "./schema"
+
+export namespace SessionProcessor {
+ const DOOM_LOOP_THRESHOLD = 3
+ const MAX_NETWORK_RETRIES = 5
+ const log = Log.create({ service: "session.processor" })
+
+ export type Info = Awaited>
+ export type Result = Awaited>
+
+ export function create(input: {
+ assistantMessage: MessageV2.Assistant
+ sessionID: SessionID
+ model: Provider.Model
+ abort: AbortSignal
+ }) {
+ const toolcalls: Record = {}
+ let snapshot: string | undefined
+ let blocked = false
+ let attempt = 0
+ let networkAttempt = 0
+ let receivedChunk = false
+ let needsCompaction = false
+ const cleanup = async () => {
+ const parts = await MessageV2.parts(input.assistantMessage.id)
+ for (const part of parts) {
+ if (part.type === "tool" && part.state.status !== "completed" && part.state.status !== "error") {
+ await Session.removePart({
+ sessionID: input.sessionID,
+ messageID: input.assistantMessage.id,
+ partID: part.id,
+ })
+ continue
}
- return { call, part }
- })
-
- const updateToolCall = Effect.fn("SessionProcessor.updateToolCall")(function* (
- toolCallID: string,
- update: (part: MessageV2.ToolPart) => MessageV2.ToolPart,
- ) {
- const match = yield* readToolCall(toolCallID)
- if (!match) return
- const part = yield* session.updatePart(update(match.part))
- ctx.toolcalls[toolCallID] = {
- ...match.call,
- partID: part.id,
- messageID: part.messageID,
- sessionID: part.sessionID,
+ if (part.type === "text") {
+ await Session.updatePart({
+ ...part,
+ text: "",
+ time: part.time
+ ? {
+ start: part.time.start,
+ }
+ : undefined,
+ })
+ continue
}
- return part
- })
-
- const completeToolCall = Effect.fn("SessionProcessor.completeToolCall")(function* (
- toolCallID: string,
- output: {
- title: string
- metadata: Record
- output: string
- attachments?: MessageV2.FilePart[]
- },
- ) {
- const match = yield* readToolCall(toolCallID)
- if (!match || match.part.state.status !== "running") return
- yield* session.updatePart({
- ...match.part,
- state: {
- status: "completed",
- input: match.part.state.input,
- output: output.output,
- metadata: output.metadata,
- title: output.title,
- time: { start: match.part.state.time.start, end: Date.now() },
- attachments: output.attachments,
- },
- })
- yield* settleToolCall(toolCallID)
- })
-
- const failToolCall = Effect.fn("SessionProcessor.failToolCall")(function* (toolCallID: string, error: unknown) {
- const match = yield* readToolCall(toolCallID)
- if (!match || match.part.state.status !== "running") return false
- yield* session.updatePart({
- ...match.part,
- state: {
- status: "error",
- input: match.part.state.input,
- error: errorMessage(error),
- time: { start: match.part.state.time.start, end: Date.now() },
- },
- })
- if (error instanceof Permission.RejectedError || error instanceof Question.RejectedError) {
- ctx.blocked = ctx.shouldBreak
+ if (part.type === "reasoning") {
+ await Session.updatePart({
+ ...part,
+ text: "",
+ time: {
+ start: part.time.start,
+ },
+ })
}
- yield* settleToolCall(toolCallID)
- return true
+ }
+ Object.keys(toolcalls).forEach((id) => {
+ delete toolcalls[id]
})
+ input.assistantMessage.time.completed = undefined
+ await Session.updateMessage(input.assistantMessage)
+ }
+
+ const result = {
+ get message() {
+ return input.assistantMessage
+ },
+ partFromToolCall(toolCallID: string) {
+ return toolcalls[toolCallID]
+ },
+ async process(streamInput: LLM.StreamInput) {
+ log.info("process")
+ needsCompaction = false
+ const shouldBreak = (await Config.get()).experimental?.continue_loop_on_deny !== true
+ while (true) {
+ try {
+ receivedChunk = false
+ let currentText: MessageV2.TextPart | undefined
+ let reasoningMap: Record = {}
+ const stream = await LLM.stream(streamInput)
+
+ for await (const value of stream.fullStream) {
+ receivedChunk = true
+ input.abort.throwIfAborted()
+ switch (value.type) {
+ case "start":
+ await SessionStatus.set(input.sessionID, { type: "busy" })
+ break
+
+ case "reasoning-start":
+ if (value.id in reasoningMap) {
+ continue
+ }
+ const reasoningPart = {
+ id: PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.assistantMessage.sessionID,
+ type: "reasoning" as const,
+ text: "",
+ time: {
+ start: Date.now(),
+ },
+ metadata: value.providerMetadata,
+ }
+ reasoningMap[value.id] = reasoningPart
+ await Session.updatePart(reasoningPart)
+ break
+
+ case "reasoning-delta":
+ if (value.id in reasoningMap) {
+ const part = reasoningMap[value.id]
+ part.text += value.text
+ if (value.providerMetadata) part.metadata = value.providerMetadata
+ await Session.updatePartDelta({
+ sessionID: part.sessionID,
+ messageID: part.messageID,
+ partID: part.id,
+ field: "text",
+ delta: value.text,
+ })
+ }
+ break
+
+ case "reasoning-end":
+ if (value.id in reasoningMap) {
+ const part = reasoningMap[value.id]
+ part.text = part.text.trimEnd()
+
+ part.time = {
+ ...part.time,
+ end: Date.now(),
+ }
+ if (value.providerMetadata) part.metadata = value.providerMetadata
+ await Session.updatePart(part)
+ delete reasoningMap[value.id]
+ }
+ break
+
+ case "tool-input-start":
+ const part = await Session.updatePart({
+ id: toolcalls[value.id]?.id ?? PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.assistantMessage.sessionID,
+ type: "tool",
+ tool: value.toolName,
+ callID: value.id,
+ state: {
+ status: "pending",
+ input: {},
+ raw: "",
+ },
+ })
+ toolcalls[value.id] = part as MessageV2.ToolPart
+ break
+
+ case "tool-input-delta":
+ break
+
+ case "tool-input-end":
+ break
+
+ case "tool-call": {
+ const match = toolcalls[value.toolCallId]
+ if (match) {
+ const part = await Session.updatePart({
+ ...match,
+ tool: value.toolName,
+ state: {
+ status: "running",
+ input: value.input,
+ time: {
+ start: Date.now(),
+ },
+ },
+ metadata: value.providerMetadata,
+ })
+ toolcalls[value.toolCallId] = part as MessageV2.ToolPart
+
+ const parts = await MessageV2.parts(input.assistantMessage.id)
+ const lastThree = parts.slice(-DOOM_LOOP_THRESHOLD)
+
+ if (
+ lastThree.length === DOOM_LOOP_THRESHOLD &&
+ lastThree.every(
+ (p) =>
+ p.type === "tool" &&
+ p.tool === value.toolName &&
+ p.state.status !== "pending" &&
+ JSON.stringify(p.state.input) === JSON.stringify(value.input),
+ )
+ ) {
+ const agent = await Agent.get(input.assistantMessage.agent)
+ await Permission.ask({
+ permission: "doom_loop",
+ patterns: [value.toolName],
+ sessionID: input.assistantMessage.sessionID,
+ metadata: {
+ tool: value.toolName,
+ input: value.input,
+ },
+ always: [value.toolName],
+ ruleset: agent.permission,
+ })
+ }
+ }
+ break
+ }
+ case "tool-result": {
+ const match = toolcalls[value.toolCallId]
+ if (match && match.state.status === "running") {
+ await Session.updatePart({
+ ...match,
+ state: {
+ status: "completed",
+ input: value.input ?? match.state.input,
+ output: value.output.output,
+ metadata: value.output.metadata,
+ title: value.output.title,
+ time: {
+ start: match.state.time.start,
+ end: Date.now(),
+ },
+ attachments: value.output.attachments,
+ },
+ })
+
+ delete toolcalls[value.toolCallId]
+ }
+ break
+ }
- const handleEvent = Effect.fnUntraced(function* (value: StreamEvent) {
- switch (value.type) {
- case "start":
- yield* status.set(ctx.sessionID, { type: "busy" })
- return
-
- case "reasoning-start":
- if (value.id in ctx.reasoningMap) return
- ctx.reasoningMap[value.id] = {
- id: PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.assistantMessage.sessionID,
- type: "reasoning",
- text: "",
- time: { start: Date.now() },
- metadata: value.providerMetadata,
- }
- yield* session.updatePart(ctx.reasoningMap[value.id])
- return
-
- case "reasoning-delta":
- if (!(value.id in ctx.reasoningMap)) return
- ctx.reasoningMap[value.id].text += value.text
- if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
- yield* session.updatePartDelta({
- sessionID: ctx.reasoningMap[value.id].sessionID,
- messageID: ctx.reasoningMap[value.id].messageID,
- partID: ctx.reasoningMap[value.id].id,
- field: "text",
- delta: value.text,
- })
- return
-
- case "reasoning-end":
- if (!(value.id in ctx.reasoningMap)) return
- // oxlint-disable-next-line no-self-assign -- reactivity trigger
- ctx.reasoningMap[value.id].text = ctx.reasoningMap[value.id].text
- ctx.reasoningMap[value.id].time = { ...ctx.reasoningMap[value.id].time, end: Date.now() }
- if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
- yield* session.updatePart(ctx.reasoningMap[value.id])
- delete ctx.reasoningMap[value.id]
- return
-
- case "tool-input-start":
- if (ctx.assistantMessage.summary) {
- throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
- }
- const part = yield* session.updatePart({
- id: ctx.toolcalls[value.id]?.partID ?? PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.assistantMessage.sessionID,
- type: "tool",
- tool: value.toolName,
- callID: value.id,
- state: { status: "pending", input: {}, raw: "" },
- metadata: value.providerExecuted ? { providerExecuted: true } : undefined,
- } satisfies MessageV2.ToolPart)
- ctx.toolcalls[value.id] = {
- done: yield* Deferred.make(),
- partID: part.id,
- messageID: part.messageID,
- sessionID: part.sessionID,
- }
- return
-
- case "tool-input-delta":
- return
-
- case "tool-input-end":
- return
-
- case "tool-call": {
- if (ctx.assistantMessage.summary) {
- throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
- }
- yield* updateToolCall(value.toolCallId, (match) => ({
- ...match,
- tool: value.toolName,
- state: {
- ...match.state,
- status: "running",
- input: value.input,
- time: { start: Date.now() },
- },
- metadata: match.metadata?.providerExecuted
- ? { ...value.providerMetadata, providerExecuted: true }
- : value.providerMetadata,
- }))
-
- const parts = MessageV2.parts(ctx.assistantMessage.id)
- const recentParts = parts.slice(-DOOM_LOOP_THRESHOLD)
-
- if (
- recentParts.length !== DOOM_LOOP_THRESHOLD ||
- !recentParts.every(
- (part) =>
- part.type === "tool" &&
- part.tool === value.toolName &&
- part.state.status !== "pending" &&
- JSON.stringify(part.state.input) === JSON.stringify(value.input),
- )
- ) {
- return
+ case "tool-error": {
+ const match = toolcalls[value.toolCallId]
+ if (match && match.state.status === "running") {
+ await Session.updatePart({
+ ...match,
+ state: {
+ status: "error",
+ input: value.input ?? match.state.input,
+ error: value.error instanceof Error ? value.error.message : String(value.error),
+ time: {
+ start: match.state.time.start,
+ end: Date.now(),
+ },
+ },
+ })
+
+ if (
+ value.error instanceof Permission.RejectedError ||
+ value.error instanceof Question.RejectedError
+ ) {
+ blocked = shouldBreak
+ }
+ delete toolcalls[value.toolCallId]
+ }
+ break
+ }
+ case "error":
+ throw value.error
+
+ case "start-step":
+ snapshot = await Snapshot.track()
+ await Session.updatePart({
+ id: PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.sessionID,
+ snapshot,
+ type: "step-start",
+ })
+ break
+
+ case "finish-step":
+ const usage = Session.getUsage({
+ model: input.model,
+ usage: value.usage,
+ metadata: value.providerMetadata,
+ })
+ input.assistantMessage.finish = value.finishReason
+ input.assistantMessage.cost += usage.cost
+ input.assistantMessage.tokens = usage.tokens
+ await Session.updatePart({
+ id: PartID.ascending(),
+ reason: value.finishReason,
+ snapshot: await Snapshot.track(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.assistantMessage.sessionID,
+ type: "step-finish",
+ tokens: usage.tokens,
+ cost: usage.cost,
+ })
+ await Session.updateMessage(input.assistantMessage)
+ if (snapshot) {
+ const patch = await Snapshot.patch(snapshot)
+ if (patch.files.length) {
+ await Session.updatePart({
+ id: PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.sessionID,
+ type: "patch",
+ hash: patch.hash,
+ files: patch.files,
+ })
+ }
+ snapshot = undefined
+ }
+ SessionSummary.summarize({
+ sessionID: input.sessionID,
+ messageID: input.assistantMessage.parentID,
+ })
+ if (
+ !input.assistantMessage.summary &&
+ (await SessionCompaction.isOverflow({ tokens: usage.tokens, model: input.model }))
+ ) {
+ needsCompaction = true
+ }
+ break
+
+ case "text-start":
+ currentText = {
+ id: PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.assistantMessage.sessionID,
+ type: "text",
+ text: "",
+ time: {
+ start: Date.now(),
+ },
+ metadata: value.providerMetadata,
+ }
+ await Session.updatePart(currentText)
+ break
+
+ case "text-delta":
+ if (currentText) {
+ currentText.text += value.text
+ if (value.providerMetadata) currentText.metadata = value.providerMetadata
+ await Session.updatePartDelta({
+ sessionID: currentText.sessionID,
+ messageID: currentText.messageID,
+ partID: currentText.id,
+ field: "text",
+ delta: value.text,
+ })
+ }
+ break
+
+ case "text-end":
+ if (currentText) {
+ currentText.text = currentText.text.trimEnd()
+ const textOutput = await Plugin.trigger(
+ "experimental.text.complete",
+ {
+ sessionID: input.sessionID,
+ messageID: input.assistantMessage.id,
+ partID: currentText.id,
+ },
+ { text: currentText.text },
+ )
+ currentText.text = textOutput.text
+ currentText.time = {
+ start: Date.now(),
+ end: Date.now(),
+ }
+ if (value.providerMetadata) currentText.metadata = value.providerMetadata
+ await Session.updatePart(currentText)
+ }
+ currentText = undefined
+ break
+
+ case "finish":
+ break
+
+ default:
+ log.info("unhandled", {
+ ...value,
+ })
+ continue
+ }
+ if (needsCompaction) break
}
-
- const agent = yield* agents.get(ctx.assistantMessage.agent)
- yield* permission.ask({
- permission: "doom_loop",
- patterns: [value.toolName],
- sessionID: ctx.assistantMessage.sessionID,
- metadata: { tool: value.toolName, input: value.input },
- always: [value.toolName],
- ruleset: agent.permission,
- })
- return
- }
-
- case "tool-result": {
- yield* completeToolCall(value.toolCallId, value.output)
- return
- }
-
- case "tool-error": {
- yield* failToolCall(value.toolCallId, value.error)
- return
- }
-
- case "error":
- throw value.error
-
- case "start-step":
- if (!ctx.snapshot) ctx.snapshot = yield* snapshot.track()
- yield* session.updatePart({
- id: PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.sessionID,
- snapshot: ctx.snapshot,
- type: "step-start",
- })
- return
-
- case "finish-step": {
- const usage = Session.getUsage({
- model: ctx.model,
- usage: value.usage,
- metadata: value.providerMetadata,
- })
- ctx.assistantMessage.finish = value.finishReason
- ctx.assistantMessage.cost += usage.cost
- ctx.assistantMessage.tokens = usage.tokens
- yield* session.updatePart({
- id: PartID.ascending(),
- reason: value.finishReason,
- snapshot: yield* snapshot.track(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.assistantMessage.sessionID,
- type: "step-finish",
- tokens: usage.tokens,
- cost: usage.cost,
+ } catch (e: any) {
+ log.error("process", {
+ error: e,
+ stack: JSON.stringify(e.stack),
})
- yield* session.updateMessage(ctx.assistantMessage)
- if (ctx.snapshot) {
- const patch = yield* snapshot.patch(ctx.snapshot)
- if (patch.files.length) {
- yield* session.updatePart({
- id: PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.sessionID,
- type: "patch",
- hash: patch.hash,
- files: patch.files,
- })
+ const error = MessageV2.fromError(e, { providerID: input.model.providerID })
+ if (MessageV2.ContextOverflowError.isInstance(error)) {
+ needsCompaction = true
+ Bus.publish(Session.Event.Error, {
+ sessionID: input.sessionID,
+ error,
+ })
+ } else {
+ const retry = SessionRetry.retryable(error)
+ if (retry !== undefined) {
+ const network =
+ MessageV2.APIError.isInstance(error) &&
+ error.data.isRetryable &&
+ (error.data.message.includes("Network error") ||
+ error.data.message.includes("SSE read timed out") ||
+ error.data.message.includes("Connection reset by server"))
+ if (network) {
+ networkAttempt++
+ if (networkAttempt <= MAX_NETWORK_RETRIES) {
+ const delay = Math.min(1000 * Math.pow(2, networkAttempt - 1), 5000)
+ await SessionStatus.set(input.sessionID, {
+ type: "reconnecting",
+ attempt: networkAttempt,
+ message: retry,
+ })
+ if (receivedChunk) {
+ await cleanup()
+ }
+ await SessionRetry.sleep(delay, input.abort).catch(() => {})
+ continue
+ }
+ }
+ if (!network) {
+ attempt++
+ const delay = SessionRetry.delay(attempt, error.name === "APIError" ? error : undefined)
+ await SessionStatus.set(input.sessionID, {
+ type: "retry",
+ attempt,
+ message: retry,
+ next: Date.now() + delay,
+ })
+ if (receivedChunk) {
+ await cleanup()
+ }
+ await SessionRetry.sleep(delay, input.abort).catch(() => {})
+ continue
+ }
}
- ctx.snapshot = undefined
- }
- yield* summary
- .summarize({
- sessionID: ctx.sessionID,
- messageID: ctx.assistantMessage.parentID,
+ input.assistantMessage.error = error
+ Bus.publish(Session.Event.Error, {
+ sessionID: input.assistantMessage.sessionID,
+ error: input.assistantMessage.error,
})
- .pipe(Effect.ignore, Effect.forkIn(scope))
- if (
- !ctx.assistantMessage.summary &&
- isOverflow({ cfg: yield* config.get(), tokens: usage.tokens, model: ctx.model })
- ) {
- ctx.needsCompaction = true
+ await SessionStatus.set(input.sessionID, { type: "idle" })
}
- return
}
-
- case "text-start":
- ctx.currentText = {
- id: PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.assistantMessage.sessionID,
- type: "text",
- text: "",
- time: { start: Date.now() },
- metadata: value.providerMetadata,
+ if (snapshot) {
+ const patch = await Snapshot.patch(snapshot)
+ if (patch.files.length) {
+ await Session.updatePart({
+ id: PartID.ascending(),
+ messageID: input.assistantMessage.id,
+ sessionID: input.sessionID,
+ type: "patch",
+ hash: patch.hash,
+ files: patch.files,
+ })
}
- yield* session.updatePart(ctx.currentText)
- return
-
- case "text-delta":
- if (!ctx.currentText) return
- ctx.currentText.text += value.text
- if (value.providerMetadata) ctx.currentText.metadata = value.providerMetadata
- yield* session.updatePartDelta({
- sessionID: ctx.currentText.sessionID,
- messageID: ctx.currentText.messageID,
- partID: ctx.currentText.id,
- field: "text",
- delta: value.text,
- })
- return
-
- case "text-end":
- if (!ctx.currentText) return
- // oxlint-disable-next-line no-self-assign -- reactivity trigger
- ctx.currentText.text = ctx.currentText.text
- ctx.currentText.text = (yield* plugin.trigger(
- "experimental.text.complete",
- {
- sessionID: ctx.sessionID,
- messageID: ctx.assistantMessage.id,
- partID: ctx.currentText.id,
- },
- { text: ctx.currentText.text },
- )).text
- {
- const end = Date.now()
- ctx.currentText.time = { start: ctx.currentText.time?.start ?? end, end }
+ snapshot = undefined
+ }
+ const p = await MessageV2.parts(input.assistantMessage.id)
+ for (const part of p) {
+ if (part.type === "tool" && part.state.status !== "completed" && part.state.status !== "error") {
+ await Session.updatePart({
+ ...part,
+ state: {
+ ...part.state,
+ status: "error",
+ error: "Tool execution aborted",
+ time: {
+ start: Date.now(),
+ end: Date.now(),
+ },
+ },
+ })
}
- if (value.providerMetadata) ctx.currentText.metadata = value.providerMetadata
- yield* session.updatePart(ctx.currentText)
- ctx.currentText = undefined
- return
-
- case "finish":
- return
-
- default:
- slog.info("unhandled", { event: value.type, value })
- return
- }
- })
-
- const cleanup = Effect.fn("SessionProcessor.cleanup")(function* () {
- if (ctx.snapshot) {
- const patch = yield* snapshot.patch(ctx.snapshot)
- if (patch.files.length) {
- yield* session.updatePart({
- id: PartID.ascending(),
- messageID: ctx.assistantMessage.id,
- sessionID: ctx.sessionID,
- type: "patch",
- hash: patch.hash,
- files: patch.files,
- })
}
- ctx.snapshot = undefined
- }
-
- if (ctx.currentText) {
- const end = Date.now()
- ctx.currentText.time = { start: ctx.currentText.time?.start ?? end, end }
- yield* session.updatePart(ctx.currentText)
- ctx.currentText = undefined
- }
-
- for (const part of Object.values(ctx.reasoningMap)) {
- const end = Date.now()
- yield* session.updatePart({
- ...part,
- time: { start: part.time.start ?? end, end },
- })
- }
- ctx.reasoningMap = {}
-
- yield* Effect.forEach(
- Object.values(ctx.toolcalls),
- (call) => Deferred.await(call.done).pipe(Effect.timeout("250 millis"), Effect.ignore),
- { concurrency: "unbounded" },
- )
-
- for (const toolCallID of Object.keys(ctx.toolcalls)) {
- const match = yield* readToolCall(toolCallID)
- if (!match) continue
- const part = match.part
- const end = Date.now()
- const metadata = "metadata" in part.state && isRecord(part.state.metadata) ? part.state.metadata : {}
- yield* session.updatePart({
- ...part,
- state: {
- ...part.state,
- status: "error",
- error: "Tool execution aborted",
- metadata: { ...metadata, interrupted: true },
- time: { start: "time" in part.state ? part.state.time.start : end, end },
- },
- })
- }
- ctx.toolcalls = {}
- ctx.assistantMessage.time.completed = Date.now()
- yield* session.updateMessage(ctx.assistantMessage)
- })
-
- const halt = Effect.fn("SessionProcessor.halt")(function* (e: unknown) {
- slog.error("process", { error: errorMessage(e), stack: e instanceof Error ? e.stack : undefined })
- const error = parse(e)
- if (MessageV2.ContextOverflowError.isInstance(error)) {
- ctx.needsCompaction = true
- yield* bus.publish(Session.Event.Error, { sessionID: ctx.sessionID, error })
- return
- }
- ctx.assistantMessage.error = error
- yield* bus.publish(Session.Event.Error, {
- sessionID: ctx.assistantMessage.sessionID,
- error: ctx.assistantMessage.error,
- })
- yield* status.set(ctx.sessionID, { type: "idle" })
- })
-
- const process = Effect.fn("SessionProcessor.process")(function* (streamInput: LLM.StreamInput) {
- slog.info("process")
- ctx.needsCompaction = false
- ctx.shouldBreak = (yield* config.get()).experimental?.continue_loop_on_deny !== true
-
- return yield* Effect.gen(function* () {
- yield* Effect.gen(function* () {
- ctx.currentText = undefined
- ctx.reasoningMap = {}
- const stream = llm.stream(streamInput)
-
- yield* stream.pipe(
- Stream.tap((event) => handleEvent(event)),
- Stream.takeUntil(() => ctx.needsCompaction),
- Stream.runDrain,
- )
- }).pipe(
- Effect.onInterrupt(() =>
- Effect.gen(function* () {
- aborted = true
- if (!ctx.assistantMessage.error) {
- yield* halt(new DOMException("Aborted", "AbortError"))
- }
- }),
- ),
- Effect.catchCauseIf(
- (cause) => !Cause.hasInterruptsOnly(cause),
- (cause) => Effect.fail(Cause.squash(cause)),
- ),
- Effect.retry(
- SessionRetry.policy({
- parse,
- set: (info) =>
- status.set(ctx.sessionID, {
- type: "retry",
- attempt: info.attempt,
- message: info.message,
- next: info.next,
- }),
- }),
- ),
- Effect.catch(halt),
- Effect.ensuring(cleanup()),
- )
-
- if (ctx.needsCompaction) return "compact"
- if (ctx.blocked || ctx.assistantMessage.error) return "stop"
+ input.assistantMessage.time.completed = Date.now()
+ await Session.updateMessage(input.assistantMessage)
+ if (needsCompaction) return "compact"
+ if (blocked) return "stop"
+ if (input.assistantMessage.error) return "stop"
return "continue"
- })
- })
-
- return {
- get message() {
- return ctx.assistantMessage
- },
- updateToolCall,
- completeToolCall,
- process,
- } satisfies Handle
- })
-
- return Service.of({ create })
- }),
-)
-
-export const defaultLayer = Layer.suspend(() =>
- layer.pipe(
- Layer.provide(Session.defaultLayer),
- Layer.provide(Snapshot.defaultLayer),
- Layer.provide(Agent.defaultLayer),
- Layer.provide(LLM.defaultLayer),
- Layer.provide(Permission.defaultLayer),
- Layer.provide(Plugin.defaultLayer),
- Layer.provide(SessionSummary.defaultLayer),
- Layer.provide(SessionStatus.defaultLayer),
- Layer.provide(Bus.layer),
- Layer.provide(Config.defaultLayer),
- ),
-)
-
-export * as SessionProcessor from "./processor"
+ }
+ },
+ }
+ return result
+ }
+}
diff --git a/packages/opencode/src/session/status.ts b/packages/opencode/src/session/status.ts
index e5165a787945..b8e3768b1366 100644
--- a/packages/opencode/src/session/status.ts
+++ b/packages/opencode/src/session/status.ts
@@ -1,88 +1,104 @@
import { BusEvent } from "@/bus/bus-event"
import { Bus } from "@/bus"
-import { InstanceState } from "@/effect"
+import { InstanceState } from "@/effect/instance-state"
+import { makeRunPromise } from "@/effect/run-service"
import { SessionID } from "./schema"
-import { zod } from "@/util/effect-zod"
-import { withStatics } from "@/util/schema"
-import { Effect, Layer, Context, Schema } from "effect"
+import { Effect, Layer, ServiceMap } from "effect"
import z from "zod"
-export const Info = Schema.Union([
- Schema.Struct({
- type: Schema.Literal("idle"),
- }),
- Schema.Struct({
- type: Schema.Literal("retry"),
- attempt: Schema.Number,
- message: Schema.String,
- next: Schema.Number,
- }),
- Schema.Struct({
- type: Schema.Literal("busy"),
- }),
-])
- .annotate({ identifier: "SessionStatus" })
- .pipe(withStatics((s) => ({ zod: zod(s) })))
-export type Info = Schema.Schema.Type
+export namespace SessionStatus {
+ export const Info = z
+ .union([
+ z.object({
+ type: z.literal("idle"),
+ }),
+ z.object({
+ type: z.literal("retry"),
+ attempt: z.number(),
+ message: z.string(),
+ next: z.number(),
+ }),
+ z.object({
+ type: z.literal("reconnecting"),
+ attempt: z.number(),
+ message: z.string(),
+ }),
+ z.object({
+ type: z.literal("busy"),
+ }),
+ ])
+ .meta({
+ ref: "SessionStatus",
+ })
+ export type Info = z.infer
-export const Event = {
- Status: BusEvent.define(
- "session.status",
- Schema.Struct({
- sessionID: SessionID,
- status: Info,
- }),
- ),
- // deprecated
- Idle: BusEvent.define(
- "session.idle",
- Schema.Struct({
- sessionID: SessionID,
- }),
- ),
-}
+ export const Event = {
+ Status: BusEvent.define(
+ "session.status",
+ z.object({
+ sessionID: SessionID.zod,
+ status: Info,
+ }),
+ ),
+ // deprecated
+ Idle: BusEvent.define(
+ "session.idle",
+ z.object({
+ sessionID: SessionID.zod,
+ }),
+ ),
+ }
-export interface Interface {
- readonly get: (sessionID: SessionID) => Effect.Effect
- readonly list: () => Effect.Effect