@@ -19,11 +19,35 @@ public class OpenAIProvider : ILLMProvider
1919
2020 public IReadOnlyList < LLMModel > Models { get ; } = new [ ]
2121 {
22+ new LLMModel
23+ {
24+ Id = "gpt-4.1" ,
25+ Name = "GPT-4.1" ,
26+ Description = "Latest flagship GPT-4 generation model" ,
27+ MaxTokens = 4096 ,
28+ ContextWindow = 128000 ,
29+ CostPerInputToken = 0m ,
30+ CostPerOutputToken = 0m ,
31+ SupportsTools = true ,
32+ SupportsStreaming = true
33+ } ,
34+ new LLMModel
35+ {
36+ Id = "gpt-4.1-mini" ,
37+ Name = "GPT-4.1 Mini" ,
38+ Description = "Smaller, efficient 4.1 family model" ,
39+ MaxTokens = 4096 ,
40+ ContextWindow = 128000 ,
41+ CostPerInputToken = 0m ,
42+ CostPerOutputToken = 0m ,
43+ SupportsTools = true ,
44+ SupportsStreaming = true
45+ } ,
2246 new LLMModel
2347 {
2448 Id = "gpt-4o" ,
2549 Name = "GPT-4o" ,
26- Description = "Most capable GPT-4 model" ,
50+ Description = "Multimodal GPT-4o model" ,
2751 MaxTokens = 4096 ,
2852 ContextWindow = 128000 ,
2953 CostPerInputToken = 0.005m / 1000 ,
@@ -35,7 +59,7 @@ public class OpenAIProvider : ILLMProvider
3559 {
3660 Id = "gpt-4o-mini" ,
3761 Name = "GPT-4o Mini" ,
38- Description = "Faster, cheaper GPT-4 model " ,
62+ Description = "Fast, low-cost GPT-4o variant " ,
3963 MaxTokens = 4096 ,
4064 ContextWindow = 128000 ,
4165 CostPerInputToken = 0.00015m / 1000 ,
@@ -46,8 +70,8 @@ public class OpenAIProvider : ILLMProvider
4670 new LLMModel
4771 {
4872 Id = "gpt-3.5-turbo" ,
49- Name = "GPT-3.5 Turbo" ,
50- Description = "Legacy GPT-3.5 model" ,
73+ Name = "GPT-3.5 Turbo (Legacy) " ,
74+ Description = "Legacy model (kept for backwards compatibility) " ,
5175 MaxTokens = 4096 ,
5276 ContextWindow = 16385 ,
5377 CostPerInputToken = 0.0015m / 1000 ,
@@ -86,6 +110,37 @@ public OpenAIProvider(HttpClient httpClient, LLMProviderConfig config)
86110 } ;
87111 }
88112
113+ /// <summary>
114+ /// Attempt to fetch available models from the OpenAI service. Falls back to an empty list on error.
115+ /// </summary>
116+ public async Task < IReadOnlyList < LLMModel > > FetchModelsAsync ( CancellationToken cancellationToken = default )
117+ {
118+ try
119+ {
120+ var resp = await _httpClient . GetFromJsonAsync < OpenAIModelList > ( "models" , _jsonOptions , cancellationToken ) ;
121+ if ( resp ? . Data == null ) return Array . Empty < LLMModel > ( ) ;
122+
123+ var list = resp . Data . Select ( d => new LLMModel
124+ {
125+ Id = d . Id ,
126+ Name = d . Id ,
127+ Description = d . Purpose ?? string . Empty ,
128+ MaxTokens = 4096 ,
129+ ContextWindow = 4096 ,
130+ CostPerInputToken = 0m ,
131+ CostPerOutputToken = 0m ,
132+ SupportsTools = true ,
133+ SupportsStreaming = true
134+ } ) . ToList ( ) ;
135+
136+ return list ;
137+ }
138+ catch
139+ {
140+ return Array . Empty < LLMModel > ( ) ;
141+ }
142+ }
143+
89144 public async Task < LLMResponse > SendAsync ( LLMRequest request , CancellationToken cancellationToken = default )
90145 {
91146 var openAIRequest = ConvertToOpenAIRequest ( request , stream : false ) ;
@@ -325,6 +380,17 @@ private record OpenAIChatStreamResponse
325380 public OpenAIChatChoice [ ] ? Choices { get ; init ; }
326381 }
327382
383+ private record OpenAIModelList
384+ {
385+ public OpenAIModel [ ] ? Data { get ; init ; }
386+ }
387+
388+ private record OpenAIModel
389+ {
390+ public string Id { get ; init ; } = string . Empty ;
391+ public string ? Purpose { get ; init ; }
392+ }
393+
328394 private record OpenAIChatChoice
329395 {
330396 public int Index { get ; init ; }
0 commit comments