diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 0fe53e6e47f0..f65a02693f23 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -1177,7 +1177,7 @@ const layer: Layer.Layer< model.modalities?.output?.includes("video") ?? existingModel?.capabilities.output.video ?? false, pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false, }, - interleaved: model.interleaved ?? existingModel?.capabilities.interleaved ?? false, + interleaved: model.interleaved ?? existingModel?.capabilities.interleaved ?? (model.reasoning ? { field: "reasoning_content" } : false), }, cost: { input: model?.cost?.input ?? existingModel?.cost?.input ?? 0, diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 50529c4dd7ad..5d431000b6d4 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -176,7 +176,8 @@ function normalizeMessages( } // Deepseek requires all assistant messages to have reasoning on them - if (model.api.id.includes("deepseek")) { + // Check both API ID and model ID to cover OpenRouter-routed DeepSeek models + if (model.api.id.includes("deepseek") || model.id.includes("deepseek")) { msgs = msgs.map((msg) => { if (msg.role !== "assistant") return msg if (Array.isArray(msg.content)) { @@ -195,6 +196,7 @@ function normalizeMessages( if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) { const field = model.capabilities.interleaved.field + const sdk = sdkKey(model.api.npm) ?? "openaiCompatible" return msgs.map((msg) => { if (msg.role === "assistant" && Array.isArray(msg.content)) { const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning") @@ -206,14 +208,18 @@ function normalizeMessages( // Include reasoning_content | reasoning_details directly on the message for all assistant messages. // Always set the field even when empty — some providers (e.g. DeepSeek) may return empty // reasoning_content which still needs to be sent back in subsequent requests. + // Preserve existing providerOptions[field] when content no longer has reasoning + // parts (e.g. after a prior transform pass already extracted them). The first + // pass sets the field from reasoning parts; on subsequent passes reasoningText + // is empty and must not overwrite the preserved value from DB. return { ...msg, content: filteredContent, providerOptions: { ...msg.providerOptions, - openaiCompatible: { - ...msg.providerOptions?.openaiCompatible, + [sdk]: { [field]: reasoningText, + ...msg.providerOptions?.[sdk], }, }, } @@ -223,6 +229,44 @@ function normalizeMessages( }) } + // When reasoning is active but interleaved is not configured, still inject empty reasoning_content + // for ALL assistant messages. This covers historical messages from DB that were stored before + // reasoning mode was enabled — they have no reasoning part to extract but DeepSeek's API still + // requires reasoning_content on every assistant turn in thinking mode. + if (model.capabilities.reasoning) { + msgs = msgs.map((msg) => { + if (msg.role !== "assistant") return msg + if (Array.isArray(msg.content)) { + const sdk = sdkKey(model.api.npm) ?? "openaiCompatible" + return { + ...msg, + providerOptions: { + ...msg.providerOptions, + [sdk]: { + ...msg.providerOptions?.[sdk], + reasoning_content: "", + }, + }, + } + } + if (typeof msg.content === "string") { + const sdk = sdkKey(model.api.npm) ?? "openaiCompatible" + return { + ...msg, + content: [{ type: "text" as const, text: msg.content }, { type: "reasoning" as const, text: "" }], + providerOptions: { + ...msg.providerOptions, + [sdk]: { + ...msg.providerOptions?.[sdk], + reasoning_content: "", + }, + }, + } + } + return msg + }) + } + return msgs }