Skip to content

Commit 5820f19

Browse files
committed
feat(agents-extensions): #628 Add support for Anthropic extended thinking
Implements support for Anthropic's reasoning and interleaved thinking capabilities, achieving parity with the Python SDK. - Modifies `aiSdk.ts` to handle both streaming and non-streaming cases. - `getResponse` (non-streaming) passes the `reasoning` field from the provider result. - `getStreamedResponse` (streaming) captures `reasoning-delta` chunks and accumulates them into the `reasoning` field of the final `response_done` event. - Updates the core `protocol.ts` Zod schema. - Adds new unit tests to `aiSdk.test.ts` Closes #[Issue 628]
1 parent 14016fd commit 5820f19

File tree

6 files changed

+259
-1
lines changed

6 files changed

+259
-1
lines changed

.changeset/happy-shrimps-sell.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-extensions': patch
3+
'@openai/agents-core': patch
4+
---
5+
6+
feat: Add support for Anthropic extended thinking

package.json

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,5 +71,9 @@
7171
"verdaccio": "^6.2.1",
7272
"vitest": "^3.2.4"
7373
},
74-
"packageManager": "pnpm@10.20.0"
74+
"packageManager": "pnpm@10.20.0",
75+
"dependencies": {
76+
"@ai-sdk/openai": "^2.0.62",
77+
"@openai/agents-extensions": "^0.2.1"
78+
}
7579
}

packages/agents-core/src/types/protocol.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -694,6 +694,11 @@ export const StreamEventResponseCompleted = SharedBase.extend({
694694
* The output from the model.
695695
*/
696696
output: z.array(OutputModelItem),
697+
698+
/**
699+
* The reasoning/thinking text from the model.
700+
*/
701+
reasoning: z.string().optional(),
697702
}),
698703
});
699704

packages/agents-extensions/src/aiSdk.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -741,6 +741,7 @@ export class AiSdkModel implements Model {
741741
: ((result as any).usage?.outputTokens ?? 0)) || 0,
742742
}),
743743
output,
744+
reasoning: (result as any).reasoning ?? undefined,
744745
providerData: result,
745746
} as const;
746747

@@ -874,6 +875,7 @@ export class AiSdkModel implements Model {
874875
let usageCompletionTokens = 0;
875876
const functionCalls: Record<string, protocol.FunctionCallItem> = {};
876877
let textOutput: protocol.OutputText | undefined;
878+
let reasoningText: string | undefined;
877879

878880
for await (const part of stream) {
879881
if (!started) {
@@ -922,6 +924,16 @@ export class AiSdkModel implements Model {
922924
: ((part as any).usage?.outputTokens ?? 0);
923925
break;
924926
}
927+
case 'reasoning-delta': {
928+
const reasoningDelta = (part as any).reasoningDelta;
929+
if (reasoningDelta) {
930+
if (!reasoningText) {
931+
reasoningText = '';
932+
}
933+
reasoningText += reasoningDelta;
934+
}
935+
break;
936+
}
925937
case 'error': {
926938
throw part.error;
927939
}
@@ -953,6 +965,7 @@ export class AiSdkModel implements Model {
953965
totalTokens: usagePromptTokens + usageCompletionTokens,
954966
},
955967
output: outputs,
968+
reasoning: reasoningText,
956969
},
957970
};
958971

packages/agents-extensions/test/aiSdk.test.ts

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -820,6 +820,115 @@ describe('AiSdkModel.getResponse', () => {
820820
outputTokensDetails: [],
821821
});
822822
});
823+
824+
test('should store reasoning in response for non-streaming text output', async () => {
825+
const mockProviderResult = {
826+
content: [{ type: 'text', text: 'This is the final answer.' }],
827+
usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 },
828+
providerMetadata: { p: 1 },
829+
response: { id: 'fake-id-123' },
830+
finishReason: 'stop',
831+
warnings: [],
832+
reasoning: '<thinking>I am thinking about the answer.</thinking>',
833+
};
834+
835+
const model = new AiSdkModel(
836+
stubModel({
837+
async doGenerate() {
838+
return mockProviderResult as any;
839+
},
840+
}),
841+
);
842+
843+
const res = await withTrace('t', () =>
844+
model.getResponse({
845+
input: 'hi',
846+
tools: [],
847+
handoffs: [],
848+
modelSettings: {},
849+
outputType: 'text',
850+
tracing: false,
851+
} as any),
852+
);
853+
854+
expect(res.reasoning).toBeDefined();
855+
expect(res.reasoning).toBe(
856+
'<thinking>I am thinking about the answer.</thinking>',
857+
);
858+
expect(res.responseId).toBe('fake-id-123');
859+
});
860+
861+
test('should store reasoning in final response_done event for streaming', async () => {
862+
async function* mockProviderStream() {
863+
yield {
864+
type: 'response-metadata',
865+
id: 'fake-stream-id-456',
866+
};
867+
868+
yield {
869+
type: 'reasoning-delta',
870+
reasoningDelta: '<thinking>Step 1: I am thinking.',
871+
};
872+
873+
yield {
874+
type: 'text-delta',
875+
delta: 'Here is the answer.',
876+
};
877+
878+
yield {
879+
type: 'reasoning-delta',
880+
reasoningDelta: ' Step 2: More thinking.</thinking>',
881+
};
882+
883+
yield {
884+
type: 'finish',
885+
usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 },
886+
};
887+
}
888+
889+
const model = new AiSdkModel(
890+
stubModel({
891+
async doStream() {
892+
return {
893+
stream: mockProviderStream(),
894+
} as any;
895+
},
896+
}),
897+
);
898+
899+
const stream = model.getStreamedResponse({
900+
input: 'hi',
901+
tools: [],
902+
handoffs: [],
903+
modelSettings: {},
904+
outputType: 'text',
905+
tracing: false,
906+
} as any);
907+
908+
const events = [];
909+
for await (const event of stream) {
910+
events.push(event);
911+
}
912+
913+
const finalEvent = events.find((e) => e.type === 'response_done') as
914+
| protocol.StreamEventResponseCompleted
915+
| undefined;
916+
917+
expect(finalEvent).toBeDefined();
918+
919+
expect(finalEvent!.response.reasoning).toBeDefined();
920+
expect(finalEvent!.response.reasoning).toBe(
921+
'<thinking>Step 1: I am thinking. Step 2: More thinking.</thinking>',
922+
);
923+
924+
expect(finalEvent!.response.id).toBe('fake-stream-id-456');
925+
expect(finalEvent!.response.usage.totalTokens).toBe(15);
926+
927+
const textOutput = finalEvent!.response.output.find(
928+
(o) => o.type === 'message' && o.content[0].type === 'output_text',
929+
) as any;
930+
expect(textOutput.content[0].text).toBe('Here is the answer.');
931+
});
823932
});
824933

825934
describe('AiSdkModel.getStreamedResponse', () => {

pnpm-lock.yaml

Lines changed: 121 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)