Skip to content

Commit dcb93f9

Browse files
add test coverage
1 parent 073a92e commit dcb93f9

File tree

3 files changed

+103
-1
lines changed

3 files changed

+103
-1
lines changed

packages/agents-openai/test/openaiChatCompletionsConverter.test.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,23 @@ describe('itemsToMessages', () => {
204204
];
205205
expect(() => itemsToMessages(bad)).toThrow(UserError);
206206
});
207+
208+
test('converts reasoning items into assistant reasoning', () => {
209+
const items: protocol.ModelItem[] = [
210+
{
211+
type: 'reasoning',
212+
content: [],
213+
rawContent: [{ type: 'reasoning_text', text: 'why' }],
214+
} as protocol.ReasoningItem,
215+
];
216+
const msgs = itemsToMessages(items);
217+
expect(msgs).toEqual([
218+
{
219+
role: 'assistant',
220+
reasoning: 'why',
221+
},
222+
]);
223+
});
207224
});
208225

209226
describe('tool helpers', () => {

packages/agents-openai/test/openaiChatCompletionsModel.test.ts

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,13 @@ describe('OpenAIChatCompletionsModel', () => {
6868
type: 'message',
6969
role: 'assistant',
7070
status: 'completed',
71-
content: [{ type: 'output_text', text: 'hi', providerData: {} }],
71+
content: [
72+
{
73+
type: 'output_text',
74+
text: 'hi',
75+
providerData: {},
76+
},
77+
],
7278
},
7379
]);
7480
});
@@ -171,6 +177,53 @@ describe('OpenAIChatCompletionsModel', () => {
171177
]);
172178
});
173179

180+
it('handles reasoning messages from third-party providers', async () => {
181+
const client = new FakeClient();
182+
const response = {
183+
id: 'r',
184+
choices: [
185+
{
186+
message: { reasoning: 'because', content: 'hi' },
187+
},
188+
],
189+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
190+
} as any;
191+
client.chat.completions.create.mockResolvedValue(response);
192+
193+
const model = new OpenAIChatCompletionsModel(client as any, 'gpt');
194+
const req: any = {
195+
input: 'u',
196+
modelSettings: {},
197+
tools: [],
198+
outputType: 'text',
199+
handoffs: [],
200+
tracing: false,
201+
};
202+
203+
const result = await withTrace('t', () => model.getResponse(req));
204+
205+
expect(result.output).toEqual([
206+
{
207+
type: 'reasoning',
208+
content: [],
209+
rawContent: [{ type: 'reasoning_text', text: 'because' }],
210+
},
211+
{
212+
id: 'r',
213+
type: 'message',
214+
role: 'assistant',
215+
status: 'completed',
216+
content: [
217+
{
218+
type: 'output_text',
219+
text: 'hi',
220+
providerData: { reasoning: 'because' },
221+
},
222+
],
223+
},
224+
]);
225+
});
226+
174227
it('handles function tool calls', async () => {
175228
const client = new FakeClient();
176229
const response = {

packages/agents-openai/test/openaiChatCompletionsStreaming.test.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,4 +233,36 @@ describe('convertChatCompletionsStreamToResponses', () => {
233233
expect(deltas).toHaveLength(1);
234234
expect(deltas[0].delta).toBe('hi');
235235
});
236+
237+
it('accumulates reasoning deltas into a reasoning item', async () => {
238+
const resp: ChatCompletion = {
239+
id: 'r1',
240+
created: 0,
241+
model: 'gpt-test',
242+
object: 'chat.completion',
243+
choices: [],
244+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
245+
} as any;
246+
247+
async function* stream() {
248+
yield makeChunk({ reasoning: 'foo' });
249+
yield makeChunk({ reasoning: 'bar' });
250+
}
251+
252+
const events: any[] = [];
253+
for await (const e of convertChatCompletionsStreamToResponses(
254+
resp,
255+
stream() as any,
256+
)) {
257+
events.push(e);
258+
}
259+
260+
const final = events[events.length - 1];
261+
expect(final.type).toBe('response_done');
262+
expect(final.response.output[0]).toEqual({
263+
type: 'reasoning',
264+
content: [],
265+
rawContent: [{ type: 'reasoning_text', text: 'foobar' }],
266+
});
267+
});
236268
});

0 commit comments

Comments
 (0)