@@ -267,6 +267,56 @@ describe("ProviderTransform.maxOutputTokens", () => {
267267 expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
268268 } )
269269 } )
270+
271+ describe ( "openai-compatible with thinking options (snake_case)" , ( ) => {
272+ test ( "returns 32k when budget_tokens + 32k <= modelLimit" , ( ) => {
273+ const modelLimit = 100000
274+ const options = {
275+ thinking : {
276+ type : "enabled" ,
277+ budget_tokens : 10000 ,
278+ } ,
279+ }
280+ const result = ProviderTransform . maxOutputTokens ( "@ai-sdk/openai-compatible" , options , modelLimit , OUTPUT_TOKEN_MAX )
281+ expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
282+ } )
283+
284+ test ( "returns modelLimit - budget_tokens when budget_tokens + 32k > modelLimit" , ( ) => {
285+ const modelLimit = 50000
286+ const options = {
287+ thinking : {
288+ type : "enabled" ,
289+ budget_tokens : 30000 ,
290+ } ,
291+ }
292+ const result = ProviderTransform . maxOutputTokens ( "@ai-sdk/openai-compatible" , options , modelLimit , OUTPUT_TOKEN_MAX )
293+ expect ( result ) . toBe ( 20000 )
294+ } )
295+
296+ test ( "returns 32k when thinking type is not enabled" , ( ) => {
297+ const modelLimit = 100000
298+ const options = {
299+ thinking : {
300+ type : "disabled" ,
301+ budget_tokens : 10000 ,
302+ } ,
303+ }
304+ const result = ProviderTransform . maxOutputTokens ( "@ai-sdk/openai-compatible" , options , modelLimit , OUTPUT_TOKEN_MAX )
305+ expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
306+ } )
307+
308+ test ( "returns 32k when budget_tokens is 0" , ( ) => {
309+ const modelLimit = 100000
310+ const options = {
311+ thinking : {
312+ type : "enabled" ,
313+ budget_tokens : 0 ,
314+ } ,
315+ }
316+ const result = ProviderTransform . maxOutputTokens ( "@ai-sdk/openai-compatible" , options , modelLimit , OUTPUT_TOKEN_MAX )
317+ expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
318+ } )
319+ } )
270320} )
271321
272322describe ( "ProviderTransform.schema - gemini array items" , ( ) => {
@@ -1494,6 +1544,67 @@ describe("ProviderTransform.variants", () => {
14941544 expect ( result . low ) . toEqual ( { reasoningEffort : "low" } )
14951545 expect ( result . high ) . toEqual ( { reasoningEffort : "high" } )
14961546 } )
1547+
1548+ test ( "Claude via LiteLLM returns thinking with snake_case budget_tokens" , ( ) => {
1549+ const model = createMockModel ( {
1550+ id : "anthropic/claude-sonnet-4-5" ,
1551+ providerID : "anthropic" ,
1552+ api : {
1553+ id : "claude-sonnet-4-5-20250929" ,
1554+ url : "http://localhost:4000" ,
1555+ npm : "@ai-sdk/openai-compatible" ,
1556+ } ,
1557+ } )
1558+ const result = ProviderTransform . variants ( model )
1559+ expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1560+ expect ( result . high ) . toEqual ( {
1561+ thinking : {
1562+ type : "enabled" ,
1563+ budget_tokens : 16000 ,
1564+ } ,
1565+ } )
1566+ expect ( result . max ) . toEqual ( {
1567+ thinking : {
1568+ type : "enabled" ,
1569+ budget_tokens : 31999 ,
1570+ } ,
1571+ } )
1572+ } )
1573+
1574+ test ( "Claude model (by model.id) via openai-compatible uses snake_case" , ( ) => {
1575+ const model = createMockModel ( {
1576+ id : "litellm/claude-3-opus" ,
1577+ providerID : "litellm" ,
1578+ api : {
1579+ id : "claude-3-opus-20240229" ,
1580+ url : "http://localhost:4000" ,
1581+ npm : "@ai-sdk/openai-compatible" ,
1582+ } ,
1583+ } )
1584+ const result = ProviderTransform . variants ( model )
1585+ expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1586+ expect ( result . high ) . toEqual ( {
1587+ thinking : {
1588+ type : "enabled" ,
1589+ budget_tokens : 16000 ,
1590+ } ,
1591+ } )
1592+ } )
1593+
1594+ test ( "Anthropic model (by model.api.id) via openai-compatible uses snake_case" , ( ) => {
1595+ const model = createMockModel ( {
1596+ id : "custom/my-model" ,
1597+ providerID : "custom" ,
1598+ api : {
1599+ id : "anthropic.claude-sonnet" ,
1600+ url : "http://localhost:4000" ,
1601+ npm : "@ai-sdk/openai-compatible" ,
1602+ } ,
1603+ } )
1604+ const result = ProviderTransform . variants ( model )
1605+ expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1606+ expect ( result . high . thinking . budget_tokens ) . toBe ( 16000 )
1607+ } )
14971608 } )
14981609
14991610 describe ( "@ai-sdk/azure" , ( ) => {
0 commit comments