diff --git a/backend/src/adapters/upstream/openai.test.ts b/backend/src/adapters/upstream/openai.test.ts new file mode 100644 index 0000000..ae8102f --- /dev/null +++ b/backend/src/adapters/upstream/openai.test.ts @@ -0,0 +1,98 @@ +import { describe, expect, test } from "bun:test"; +import { openaiUpstreamAdapter } from "./openai"; + +describe("openaiUpstreamAdapter reasoning compatibility", () => { + test("parses non-stream reasoning field into thinking block", async () => { + const response = new Response( + JSON.stringify({ + id: "chatcmpl-1", + object: "chat.completion", + created: 1700000000, + model: "test-model", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "final answer", + reasoning: "chain of thought summary", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 1, + completion_tokens: 2, + total_tokens: 3, + }, + }), + ); + + const parsed = await openaiUpstreamAdapter.parseResponse(response); + expect(parsed.content).toEqual([ + { type: "thinking", thinking: "chain of thought summary" }, + { type: "text", text: "final answer" }, + ]); + }); + + test("prefers reasoning_content over reasoning when both exist", async () => { + const response = new Response( + JSON.stringify({ + id: "chatcmpl-2", + object: "chat.completion", + created: 1700000001, + model: "test-model", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "final answer", + reasoning_content: "preferred reasoning content", + reasoning: "fallback reasoning", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 1, + completion_tokens: 2, + total_tokens: 3, + }, + }), + ); + + const parsed = await openaiUpstreamAdapter.parseResponse(response); + expect(parsed.content).toEqual([ + { type: "thinking", thinking: "preferred reasoning content" }, + { type: "text", text: "final answer" }, + ]); + }); + + test("parses stream delta reasoning field into thinking_delta", async () => { + const stream = [ + 'data: {"id":"chatcmpl-3","object":"chat.completion.chunk","created":1700000002,"model":"test-model","choices":[{"index":0,"delta":{"role":"assistant","reasoning":"stream reasoning"},"finish_reason":null}]}', + 'data: {"id":"chatcmpl-3","object":"chat.completion.chunk","created":1700000002,"model":"test-model","choices":[{"index":0,"delta":{"content":"stream text"},"finish_reason":"stop"}]}', + "data: [DONE]", + ].join("\n"); + + const response = new Response(stream); + const chunks: Array = []; + for await (const chunk of openaiUpstreamAdapter.parseStreamResponse( + response, + )) { + chunks.push(chunk); + } + + expect(chunks).toContainEqual({ + type: "content_block_delta", + index: 0, + delta: { type: "thinking_delta", thinking: "stream reasoning" }, + }); + expect(chunks).toContainEqual({ + type: "content_block_delta", + index: 0, + delta: { type: "text_delta", text: "stream text" }, + }); + }); +}); diff --git a/backend/src/adapters/upstream/openai.ts b/backend/src/adapters/upstream/openai.ts index 64c935c..bfb8693 100644 --- a/backend/src/adapters/upstream/openai.ts +++ b/backend/src/adapters/upstream/openai.ts @@ -93,6 +93,7 @@ interface OpenAIChoice { content: string | null; tool_calls?: OpenAIToolCall[]; reasoning_content?: string; + reasoning?: string; }; finish_reason: string | null; } @@ -117,6 +118,7 @@ interface OpenAIStreamChoice { content?: string | null; tool_calls?: OpenAIToolCallDelta[]; reasoning_content?: string; + reasoning?: string; }; finish_reason: string | null; } @@ -291,6 +293,24 @@ function convertFinishReason(finishReason: string | null): StopReason { } } +function extractReasoningText( + payload?: { + reasoning_content?: string; + reasoning?: string; + }, +): string | undefined { + if (!payload) { + return undefined; + } + if (payload.reasoning_content && payload.reasoning_content.length > 0) { + return payload.reasoning_content; + } + if (payload.reasoning && payload.reasoning.length > 0) { + return payload.reasoning; + } + return undefined; +} + /** * Convert OpenAI response to internal format */ @@ -298,11 +318,12 @@ function convertResponse(resp: OpenAIChatResponse): InternalResponse { const choice = resp.choices[0]; const content: InternalContentBlock[] = []; - // Handle reasoning content (for o1/deepseek models) - if (choice?.message.reasoning_content) { + // Handle reasoning content (reasoning_content or reasoning) + const reasoningText = extractReasoningText(choice?.message); + if (reasoningText) { content.push({ type: "thinking", - thinking: choice.message.reasoning_content, + thinking: reasoningText, } as ThinkingContentBlock); } @@ -524,13 +545,14 @@ export const openaiUpstreamAdapter: UpstreamAdapter = { } // Handle reasoning content (thinking) - if (choice.delta.reasoning_content) { + const reasoningDelta = extractReasoningText(choice.delta); + if (reasoningDelta) { yield { type: "content_block_delta", index: blockIndex, delta: { type: "thinking_delta", - thinking: choice.delta.reasoning_content, + thinking: reasoningDelta, }, }; }