Skip to content

Commit 28363da

Browse files
authored
feat(openai-compatible): add supportsStructuredOutputs to provider settings (#8546)
## Background Alternative implementation to #5262 which doesn't introduce a 2nd argument for a model constructor, which would break our provider registry feature related: - #8475 (comment) - #8428 - #8427 ## Summary Introduce a new `supportsStructuredOutputs` option to `createOpenAICompatible()` that gets passed to `provider.languageModel` and `provider.chatModel` ## Manual Verification ```ts import 'dotenv/config'; import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { generateObject } from 'ai'; import { z } from 'zod/v4'; async function main() { const togetherai = createOpenAICompatible({ baseURL: 'https://api.together.xyz/v1', name: 'togetherai', headers: { Authorization: `Bearer ${process.env.TOGETHER_AI_API_KEY}`, }, supportsStructuredOutputs: true, }); const model = togetherai.chatModel('mistralai/Mistral-7B-Instruct-v0.1'); const result = await generateObject({ model, schema: z.object({ recipe: z.object({ name: z.string(), ingredients: z.array( z.object({ name: z.string(), amount: z.string(), }), ), steps: z.array(z.string()), }), }), prompt: 'Generate a lasagna recipe.', }); console.log(JSON.stringify(result.object.recipe, null, 2)); console.log(); console.log('Token usage:', result.usage); console.log('Finish reason:', result.finishReason); } main().catch(console.error); ``` ## Tasks <!-- This task list is intended to help you keep track of what you need to do. Feel free to add tasks and remove unnecessary tasks or this section as needed. Please check if the PR fulfills the following requirements: --> - [x] Tests have been added / updated (for bug fixes / features) - [x] Documentation has been added / updated (for bug fixes / features) - [x] A _patch_ changeset for relevant packages has been added (for bug fixes / features - run `pnpm changeset` in the project root) - [x] Formatting issues have been fixed (run `pnpm prettier-fix` in the project root) - [x] I have reviewed this pull request (self-review) ## Future work Since the `createOpenAICompatible` option is only relevant to some of the model types, we might consider adding sub configuration for each model type if more of these type of options get added ## Related Issues closes #5262 closes #8427 closes #8428
1 parent 6bfd007 commit 28363da

File tree

5 files changed

+84
-2
lines changed

5 files changed

+84
-2
lines changed

.changeset/yellow-jeans-know.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@ai-sdk/openai-compatible': patch
3+
---
4+
5+
feat(openai-compatible): add `supportsStructuredOutputs` to provider settings

content/providers/02-openai-compatible-providers/index.mdx

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,10 @@ You can use the following optional settings to customize the provider instance:
8585

8686
Include usage information in streaming responses. When enabled, usage data will be included in the response metadata for streaming requests. Defaults to `undefined` (`false`).
8787

88+
- **supportsStructuredOutputs** _boolean_
89+
90+
Set to true if the provider supports structured outputs. Only relevant for `provider()`, `provider.chatModel()`, and `provider.languageModel()`.
91+
8892
## Language Models
8993

9094
You can create provider models using a provider instance.

examples/ai-core/src/generate-object/openai-compatible-togetherai.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ async function main() {
1010
headers: {
1111
Authorization: `Bearer ${process.env.TOGETHER_AI_API_KEY}`,
1212
},
13+
supportsStructuredOutputs: true,
1314
});
1415
const model = togetherai.chatModel('mistralai/Mistral-7B-Instruct-v0.1');
1516
const result = await generateObject({

packages/openai-compatible/src/openai-compatible-provider.test.ts

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { createOpenAICompatible } from './openai-compatible-provider';
22
import { OpenAICompatibleChatLanguageModel } from './chat/openai-compatible-chat-language-model';
33
import { OpenAICompatibleCompletionLanguageModel } from './completion/openai-compatible-completion-language-model';
44
import { OpenAICompatibleEmbeddingModel } from './embedding/openai-compatible-embedding-model';
5+
import { OpenAICompatibleImageModel } from './image/openai-compatible-image-model';
56
import { vi, describe, beforeEach, it, expect } from 'vitest';
67

78
// Mock version
@@ -19,10 +20,16 @@ const OpenAICompatibleEmbeddingModelMock = vi.mocked(
1920
OpenAICompatibleEmbeddingModel,
2021
);
2122

23+
const OpenAICompatibleImageModelMock = vi.mocked(OpenAICompatibleImageModel);
24+
2225
vi.mock('./chat/openai-compatible-chat-language-model', () => ({
2326
OpenAICompatibleChatLanguageModel: vi.fn(),
2427
}));
2528

29+
vi.mock('./image/openai-compatible-image-model', () => ({
30+
OpenAICompatibleImageModel: vi.fn(),
31+
}));
32+
2633
vi.mock('./completion/openai-compatible-completion-language-model', () => ({
2734
OpenAICompatibleCompletionLanguageModel: vi.fn(),
2835
}));
@@ -266,4 +273,57 @@ describe('OpenAICompatibleProvider', () => {
266273
).toBeUndefined();
267274
});
268275
});
276+
277+
describe('supportsStructuredOutputs setting', () => {
278+
it('should pass supportsStructuredOutputs to to .chatModel() and .languageModel() only', () => {
279+
const options = {
280+
baseURL: 'https://api.example.com',
281+
name: 'test-provider',
282+
supportsStructuredOutputs: true,
283+
};
284+
const provider = createOpenAICompatible(options);
285+
286+
provider('model-id');
287+
expect(
288+
OpenAICompatibleChatLanguageModelMock.mock.calls[0][1]
289+
.supportsStructuredOutputs,
290+
).toBe(true);
291+
292+
provider.chatModel('chat-model');
293+
expect(
294+
OpenAICompatibleChatLanguageModelMock.mock.calls[1][1]
295+
.supportsStructuredOutputs,
296+
).toBe(true);
297+
298+
provider.languageModel('completion-model');
299+
expect(
300+
OpenAICompatibleChatLanguageModelMock.mock.calls[2][1]
301+
.supportsStructuredOutputs,
302+
).toBe(true);
303+
304+
provider.completionModel('completion-model');
305+
const completionModelConfigArg =
306+
OpenAICompatibleCompletionLanguageModelMock.mock.calls[0][1];
307+
expect(
308+
// @ts-expect-error - testing
309+
completionModelConfigArg.supportsStructuredOutputs,
310+
).toBe(undefined);
311+
312+
provider.textEmbeddingModel('embedding-model');
313+
const embeddingModelConfigArg =
314+
OpenAICompatibleEmbeddingModelMock.mock.calls[0][1];
315+
expect(
316+
// @ts-expect-error - testing
317+
embeddingModelConfigArg.supportsStructuredOutputs,
318+
).toBe(undefined);
319+
320+
provider.imageModel('image-model');
321+
const imageModelConfigArg =
322+
OpenAICompatibleImageModelMock.mock.calls[0][1];
323+
expect(
324+
// @ts-expect-error - testing
325+
imageModelConfigArg.supportsStructuredOutputs,
326+
).toBe(undefined);
327+
});
328+
});
269329
});

packages/openai-compatible/src/openai-compatible-provider.ts

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,10 @@ import {
1010
withUserAgentSuffix,
1111
getRuntimeEnvironmentUserAgent,
1212
} from '@ai-sdk/provider-utils';
13-
import { OpenAICompatibleChatLanguageModel } from './chat/openai-compatible-chat-language-model';
13+
import {
14+
OpenAICompatibleChatConfig,
15+
OpenAICompatibleChatLanguageModel,
16+
} from './chat/openai-compatible-chat-language-model';
1417
import { OpenAICompatibleCompletionLanguageModel } from './completion/openai-compatible-completion-language-model';
1518
import { OpenAICompatibleEmbeddingModel } from './embedding/openai-compatible-embedding-model';
1619
import { OpenAICompatibleImageModel } from './image/openai-compatible-image-model';
@@ -24,7 +27,10 @@ export interface OpenAICompatibleProvider<
2427
> extends Omit<ProviderV2, 'imageModel'> {
2528
(modelId: CHAT_MODEL_IDS): LanguageModelV2;
2629

27-
languageModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
30+
languageModel(
31+
modelId: CHAT_MODEL_IDS,
32+
config?: Partial<OpenAICompatibleChatConfig>,
33+
): LanguageModelV2;
2834

2935
chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
3036

@@ -74,6 +80,11 @@ or to provide a custom fetch implementation for e.g. testing.
7480
Include usage information in streaming responses.
7581
*/
7682
includeUsage?: boolean;
83+
84+
/**
85+
* Whether the provider supports structured outputs in chat models.
86+
*/
87+
supportsStructuredOutputs?: boolean;
7788
}
7889

7990
/**
@@ -130,6 +141,7 @@ export function createOpenAICompatible<
130141
new OpenAICompatibleChatLanguageModel(modelId, {
131142
...getCommonModelConfig('chat'),
132143
includeUsage: options.includeUsage,
144+
supportsStructuredOutputs: options.supportsStructuredOutputs,
133145
});
134146

135147
const createCompletionModel = (modelId: COMPLETION_MODEL_IDS) =>

0 commit comments

Comments
 (0)