@@ -59,7 +59,7 @@ def test_gemini_only_fallback_selection(self):
5959 balanced = ModelProviderRegistry .get_preferred_fallback_model (ToolModelCategory .BALANCED )
6060
6161 # Should select appropriate Gemini models
62- assert extended_reasoning in ["gemini-2.5-pro" , "pro" ]
62+ assert extended_reasoning in ["gemini-3-pro-preview" , "gemini- 2.5-pro" , "pro" ]
6363 assert fast_response in ["gemini-2.5-flash" , "flash" ]
6464 assert balanced in ["gemini-2.5-flash" , "flash" ]
6565
@@ -139,7 +139,7 @@ def test_both_gemini_and_openai_priority(self):
139139 fast_response = ModelProviderRegistry .get_preferred_fallback_model (ToolModelCategory .FAST_RESPONSE )
140140
141141 # Should prefer Gemini now (based on new provider priority: Gemini before OpenAI)
142- assert extended_reasoning == "gemini-2.5 -pro" # Gemini has higher priority now
142+ assert extended_reasoning == "gemini-3 -pro-preview " # Gemini 3 Pro Preview has higher priority now
143143
144144 # Should prefer Gemini for fast response
145145 assert fast_response == "gemini-2.5-flash" # Gemini has higher priority now
@@ -317,7 +317,7 @@ def test_alias_resolution_before_api_calls(self):
317317 # Test that providers resolve aliases correctly
318318 test_cases = [
319319 ("flash" , ProviderType .GOOGLE , "gemini-2.5-flash" ),
320- ("pro" , ProviderType .GOOGLE , "gemini-2.5 -pro" ),
320+ ("pro" , ProviderType .GOOGLE , "gemini-3 -pro-preview " ), # "pro" now resolves to gemini-3-pro-preview
321321 ("mini" , ProviderType .OPENAI , "gpt-5-mini" ), # "mini" now resolves to gpt-5-mini
322322 ("o3mini" , ProviderType .OPENAI , "o3-mini" ),
323323 ("grok" , ProviderType .XAI , "grok-4" ),
0 commit comments