Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changeset/three-moles-strive.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@openai/agents-openai': patch
'@openai/agents-core': patch
---

feat: add reasoning handling in chat completions
15 changes: 15 additions & 0 deletions packages/agents-core/src/types/protocol.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,16 @@ export const InputText = SharedBase.extend({

export type InputText = z.infer<typeof InputText>;

export const ReasoningText = SharedBase.extend({
type: z.literal('reasoning_text'),
/**
* A text input for example a message from a user
*/
text: z.string(),
});

export type ReasoningText = z.infer<typeof ReasoningText>;

export const InputImage = SharedBase.extend({
type: z.literal('input_image'),

Expand Down Expand Up @@ -452,6 +462,11 @@ export const ReasoningItem = SharedBase.extend({
* The user facing representation of the reasoning. Additional information might be in the `providerData` field.
*/
content: z.array(InputText),

/**
* The raw reasoning text from the model.
*/
rawContent: z.array(ReasoningText).optional(),
});

export type ReasoningItem = z.infer<typeof ReasoningItem>;
Expand Down
9 changes: 5 additions & 4 deletions packages/agents-openai/src/openaiChatCompletionsConverter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,11 @@ export function itemsToMessages(
});
}
} else if (item.type === 'reasoning') {
throw new UserError(
'Reasoning is not supported for chat completions. Got item: ' +
JSON.stringify(item),
);
const asst = ensureAssistantMessage();
// @ts-expect-error - reasoning is not supported in the official Chat Completion API spec
// this is handling third party providers that support reasoning
asst.reasoning = item.rawContent?.[0]?.text;
continue;
} else if (item.type === 'hosted_tool_call') {
if (item.name === 'file_search_call') {
const asst = ensureAssistantMessage();
Expand Down
31 changes: 30 additions & 1 deletion packages/agents-openai/src/openaiChatCompletionsModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,23 @@ import { protocol } from '@openai/agents-core';

export const FAKE_ID = 'FAKE_ID';

// Some Chat Completions API compatible providers return a reasoning property on the message
// If that's the case we handle them separately
type OpenAIMessageWithReasoning =
OpenAI.Chat.Completions.ChatCompletionMessage & {
reasoning: string;
};

function hasReasoningContent(
message: OpenAI.Chat.Completions.ChatCompletionMessage,
): message is OpenAIMessageWithReasoning {
return (
'reasoning' in message &&
typeof message.reasoning === 'string' &&
message.reasoning !== ''
);
}

/**
* A model that uses (or is compatible with) OpenAI's Chat Completions API.
*/
Expand Down Expand Up @@ -67,7 +84,19 @@ export class OpenAIChatCompletionsModel implements Model {
const output: protocol.OutputModelItem[] = [];
if (response.choices && response.choices[0]) {
const message = response.choices[0].message;


if (hasReasoningContent(message)) {
output.push({
type: 'reasoning',
content: [],
rawContent: [
{
type: 'reasoning_text',
text: message.reasoning,
},
],
});
}
if (
message.content !== undefined &&
message.content !== null &&
Expand Down
19 changes: 19 additions & 0 deletions packages/agents-openai/src/openaiChatCompletionsStreaming.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ type StreamingState = {
text_content_index_and_output: [number, protocol.OutputText] | null;
refusal_content_index_and_output: [number, protocol.Refusal] | null;
function_calls: Record<number, protocol.FunctionCallItem>;
reasoning: string;
};

export async function* convertChatCompletionsStreamToResponses(
Expand All @@ -21,6 +22,7 @@ export async function* convertChatCompletionsStreamToResponses(
text_content_index_and_output: null,
refusal_content_index_and_output: null,
function_calls: {},
reasoning: '',
};

for await (const chunk of stream) {
Expand Down Expand Up @@ -64,6 +66,14 @@ export async function* convertChatCompletionsStreamToResponses(
state.text_content_index_and_output[1].text += delta.content;
}

if (
'reasoning' in delta &&
delta.reasoning &&
typeof delta.reasoning === 'string'
) {
state.reasoning += delta.reasoning;
}

// Handle refusals
if ('refusal' in delta && delta.refusal) {
if (!state.refusal_content_index_and_output) {
Expand Down Expand Up @@ -98,6 +108,15 @@ export async function* convertChatCompletionsStreamToResponses(

// Final output message
const outputs: protocol.OutputModelItem[] = [];

if (state.reasoning) {
outputs.push({
type: 'reasoning',
content: [],
rawContent: [{ type: 'reasoning_text', text: state.reasoning }],
});
}

if (
state.text_content_index_and_output ||
state.refusal_content_index_and_output
Expand Down
17 changes: 17 additions & 0 deletions packages/agents-openai/test/openaiChatCompletionsConverter.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,23 @@ describe('itemsToMessages', () => {
];
expect(() => itemsToMessages(bad)).toThrow(UserError);
});

test('converts reasoning items into assistant reasoning', () => {
const items: protocol.ModelItem[] = [
{
type: 'reasoning',
content: [],
rawContent: [{ type: 'reasoning_text', text: 'why' }],
} as protocol.ReasoningItem,
];
const msgs = itemsToMessages(items);
expect(msgs).toEqual([
{
role: 'assistant',
reasoning: 'why',
},
]);
});
});

describe('tool helpers', () => {
Expand Down
55 changes: 54 additions & 1 deletion packages/agents-openai/test/openaiChatCompletionsModel.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,13 @@ describe('OpenAIChatCompletionsModel', () => {
type: 'message',
role: 'assistant',
status: 'completed',
content: [{ type: 'output_text', text: 'hi', providerData: {} }],
content: [
{
type: 'output_text',
text: 'hi',
providerData: {},
},
],
},
]);
});
Expand Down Expand Up @@ -171,6 +177,53 @@ describe('OpenAIChatCompletionsModel', () => {
]);
});

it('handles reasoning messages from third-party providers', async () => {
const client = new FakeClient();
const response = {
id: 'r',
choices: [
{
message: { reasoning: 'because', content: 'hi' },
},
],
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
} as any;
client.chat.completions.create.mockResolvedValue(response);

const model = new OpenAIChatCompletionsModel(client as any, 'gpt');
const req: any = {
input: 'u',
modelSettings: {},
tools: [],
outputType: 'text',
handoffs: [],
tracing: false,
};

const result = await withTrace('t', () => model.getResponse(req));

expect(result.output).toEqual([
{
type: 'reasoning',
content: [],
rawContent: [{ type: 'reasoning_text', text: 'because' }],
},
{
id: 'r',
type: 'message',
role: 'assistant',
status: 'completed',
content: [
{
type: 'output_text',
text: 'hi',
providerData: { reasoning: 'because' },
},
],
},
]);
});

it('handles function tool calls', async () => {
const client = new FakeClient();
const response = {
Expand Down
32 changes: 32 additions & 0 deletions packages/agents-openai/test/openaiChatCompletionsStreaming.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -233,4 +233,36 @@ describe('convertChatCompletionsStreamToResponses', () => {
expect(deltas).toHaveLength(1);
expect(deltas[0].delta).toBe('hi');
});

it('accumulates reasoning deltas into a reasoning item', async () => {
const resp: ChatCompletion = {
id: 'r1',
created: 0,
model: 'gpt-test',
object: 'chat.completion',
choices: [],
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
} as any;

async function* stream() {
yield makeChunk({ reasoning: 'foo' });
yield makeChunk({ reasoning: 'bar' });
}

const events: any[] = [];
for await (const e of convertChatCompletionsStreamToResponses(
resp,
stream() as any,
)) {
events.push(e);
}

const final = events[events.length - 1];
expect(final.type).toBe('response_done');
expect(final.response.output[0]).toEqual({
type: 'reasoning',
content: [],
rawContent: [{ type: 'reasoning_text', text: 'foobar' }],
});
});
});