Skip to content

Commit a394871

Browse files
authored
fix: support reasoning fallback and keep reasoning_content output (#78)
1 parent 80cc1ef commit a394871

2 files changed

Lines changed: 125 additions & 5 deletions

File tree

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
import { describe, expect, test } from "bun:test";
2+
import { openaiUpstreamAdapter } from "./openai";
3+
4+
describe("openaiUpstreamAdapter reasoning compatibility", () => {
5+
test("parses non-stream reasoning field into thinking block", async () => {
6+
const response = new Response(
7+
JSON.stringify({
8+
id: "chatcmpl-1",
9+
object: "chat.completion",
10+
created: 1700000000,
11+
model: "test-model",
12+
choices: [
13+
{
14+
index: 0,
15+
message: {
16+
role: "assistant",
17+
content: "final answer",
18+
reasoning: "chain of thought summary",
19+
},
20+
finish_reason: "stop",
21+
},
22+
],
23+
usage: {
24+
prompt_tokens: 1,
25+
completion_tokens: 2,
26+
total_tokens: 3,
27+
},
28+
}),
29+
);
30+
31+
const parsed = await openaiUpstreamAdapter.parseResponse(response);
32+
expect(parsed.content).toEqual([
33+
{ type: "thinking", thinking: "chain of thought summary" },
34+
{ type: "text", text: "final answer" },
35+
]);
36+
});
37+
38+
test("prefers reasoning_content over reasoning when both exist", async () => {
39+
const response = new Response(
40+
JSON.stringify({
41+
id: "chatcmpl-2",
42+
object: "chat.completion",
43+
created: 1700000001,
44+
model: "test-model",
45+
choices: [
46+
{
47+
index: 0,
48+
message: {
49+
role: "assistant",
50+
content: "final answer",
51+
reasoning_content: "preferred reasoning content",
52+
reasoning: "fallback reasoning",
53+
},
54+
finish_reason: "stop",
55+
},
56+
],
57+
usage: {
58+
prompt_tokens: 1,
59+
completion_tokens: 2,
60+
total_tokens: 3,
61+
},
62+
}),
63+
);
64+
65+
const parsed = await openaiUpstreamAdapter.parseResponse(response);
66+
expect(parsed.content).toEqual([
67+
{ type: "thinking", thinking: "preferred reasoning content" },
68+
{ type: "text", text: "final answer" },
69+
]);
70+
});
71+
72+
test("parses stream delta reasoning field into thinking_delta", async () => {
73+
const stream = [
74+
'data: {"id":"chatcmpl-3","object":"chat.completion.chunk","created":1700000002,"model":"test-model","choices":[{"index":0,"delta":{"role":"assistant","reasoning":"stream reasoning"},"finish_reason":null}]}',
75+
'data: {"id":"chatcmpl-3","object":"chat.completion.chunk","created":1700000002,"model":"test-model","choices":[{"index":0,"delta":{"content":"stream text"},"finish_reason":"stop"}]}',
76+
"data: [DONE]",
77+
].join("\n");
78+
79+
const response = new Response(stream);
80+
const chunks: Array<unknown> = [];
81+
for await (const chunk of openaiUpstreamAdapter.parseStreamResponse(
82+
response,
83+
)) {
84+
chunks.push(chunk);
85+
}
86+
87+
expect(chunks).toContainEqual({
88+
type: "content_block_delta",
89+
index: 0,
90+
delta: { type: "thinking_delta", thinking: "stream reasoning" },
91+
});
92+
expect(chunks).toContainEqual({
93+
type: "content_block_delta",
94+
index: 0,
95+
delta: { type: "text_delta", text: "stream text" },
96+
});
97+
});
98+
});

backend/src/adapters/upstream/openai.ts

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ interface OpenAIChoice {
9393
content: string | null;
9494
tool_calls?: OpenAIToolCall[];
9595
reasoning_content?: string;
96+
reasoning?: string;
9697
};
9798
finish_reason: string | null;
9899
}
@@ -117,6 +118,7 @@ interface OpenAIStreamChoice {
117118
content?: string | null;
118119
tool_calls?: OpenAIToolCallDelta[];
119120
reasoning_content?: string;
121+
reasoning?: string;
120122
};
121123
finish_reason: string | null;
122124
}
@@ -291,18 +293,37 @@ function convertFinishReason(finishReason: string | null): StopReason {
291293
}
292294
}
293295

296+
function extractReasoningText(
297+
payload?: {
298+
reasoning_content?: string;
299+
reasoning?: string;
300+
},
301+
): string | undefined {
302+
if (!payload) {
303+
return undefined;
304+
}
305+
if (payload.reasoning_content && payload.reasoning_content.length > 0) {
306+
return payload.reasoning_content;
307+
}
308+
if (payload.reasoning && payload.reasoning.length > 0) {
309+
return payload.reasoning;
310+
}
311+
return undefined;
312+
}
313+
294314
/**
295315
* Convert OpenAI response to internal format
296316
*/
297317
function convertResponse(resp: OpenAIChatResponse): InternalResponse {
298318
const choice = resp.choices[0];
299319
const content: InternalContentBlock[] = [];
300320

301-
// Handle reasoning content (for o1/deepseek models)
302-
if (choice?.message.reasoning_content) {
321+
// Handle reasoning content (reasoning_content or reasoning)
322+
const reasoningText = extractReasoningText(choice?.message);
323+
if (reasoningText) {
303324
content.push({
304325
type: "thinking",
305-
thinking: choice.message.reasoning_content,
326+
thinking: reasoningText,
306327
} as ThinkingContentBlock);
307328
}
308329

@@ -524,13 +545,14 @@ export const openaiUpstreamAdapter: UpstreamAdapter = {
524545
}
525546

526547
// Handle reasoning content (thinking)
527-
if (choice.delta.reasoning_content) {
548+
const reasoningDelta = extractReasoningText(choice.delta);
549+
if (reasoningDelta) {
528550
yield {
529551
type: "content_block_delta",
530552
index: blockIndex,
531553
delta: {
532554
type: "thinking_delta",
533-
thinking: choice.delta.reasoning_content,
555+
thinking: reasoningDelta,
534556
},
535557
};
536558
}

0 commit comments

Comments
 (0)