|
5 | 5 | import pytest |
6 | 6 | from unittest.mock import Mock, patch |
7 | 7 |
|
8 | | -from stream_agents.core.llm import FunctionRegistry, function_registry |
9 | | -from stream_agents.core.llm.llm import LLM |
10 | | -from stream_agents.plugins.openai import LLM as OpenAILLM |
11 | | -from stream_agents.plugins.anthropic import LLM as ClaudeLLM |
12 | | -from stream_agents.plugins.gemini import LLM as GeminiLLM |
| 8 | +from vision_agents.core.llm import FunctionRegistry, function_registry |
| 9 | +from vision_agents.core.llm.llm import LLM |
| 10 | +from vision_agents.plugins.openai import LLM as OpenAILLM |
| 11 | +from vision_agents.plugins.anthropic import LLM as ClaudeLLM |
| 12 | +from vision_agents.plugins.gemini import LLM as GeminiLLM |
13 | 13 |
|
14 | 14 |
|
15 | 15 | class TestFunctionRegistry: |
@@ -166,7 +166,7 @@ class TestOpenAIFunctionCalling: |
166 | 166 | """Test OpenAI function calling functionality.""" |
167 | 167 |
|
168 | 168 | @pytest.mark.asyncio |
169 | | - @patch('stream_agents.plugins.openai.openai_llm.AsyncOpenAI') |
| 169 | + @patch('vision_agents.plugins.openai.openai_llm.AsyncOpenAI') |
170 | 170 | async def test_openai_function_calling_response(self, mock_openai): |
171 | 171 | """Test OpenAI function calling response.""" |
172 | 172 | # Mock the OpenAI client and response |
@@ -197,7 +197,7 @@ def get_weather(location: str) -> str: |
197 | 197 | result = llm.call_function("get_weather", {"location": "New York"}) |
198 | 198 | assert result == "Weather in New York: Sunny, 72°F" |
199 | 199 |
|
200 | | - @patch('stream_agents.plugins.openai.openai_llm.AsyncOpenAI') |
| 200 | + @patch('vision_agents.plugins.openai.openai_llm.AsyncOpenAI') |
201 | 201 | async def test_openai_conversational_response(self, mock_openai): |
202 | 202 | """Test OpenAI conversational response generation.""" |
203 | 203 | mock_client = Mock() |
@@ -228,7 +228,7 @@ class TestClaudeFunctionCalling: |
228 | 228 | """Test Claude function calling functionality.""" |
229 | 229 |
|
230 | 230 | @pytest.mark.asyncio |
231 | | - @patch('stream_agents.plugins.anthropic.anthropic_llm.AsyncAnthropic') |
| 231 | + @patch('vision_agents.plugins.anthropic.anthropic_llm.AsyncAnthropic') |
232 | 232 | async def test_claude_function_calling_response(self, mock_anthropic): |
233 | 233 | """Test Claude function calling response.""" |
234 | 234 | # Mock the Anthropic client and response |
@@ -259,7 +259,7 @@ def get_weather(location: str) -> str: |
259 | 259 | result = llm.call_function("get_weather", {"location": "New York"}) |
260 | 260 | assert result == "Weather in New York: Sunny, 72°F" |
261 | 261 |
|
262 | | - @patch('stream_agents.plugins.anthropic.anthropic_llm.AsyncAnthropic') |
| 262 | + @patch('vision_agents.plugins.anthropic.anthropic_llm.AsyncAnthropic') |
263 | 263 | async def test_claude_conversational_response(self, mock_anthropic): |
264 | 264 | """Test Claude conversational response generation.""" |
265 | 265 | mock_client = Mock() |
@@ -290,7 +290,7 @@ class TestGeminiFunctionCalling: |
290 | 290 | """Test Gemini function calling functionality.""" |
291 | 291 |
|
292 | 292 | @pytest.mark.asyncio |
293 | | - @patch('stream_agents.plugins.gemini.gemini_llm.genai') |
| 293 | + @patch('vision_agents.plugins.gemini.gemini_llm.genai') |
294 | 294 | async def test_gemini_function_calling_response(self, mock_genai): |
295 | 295 | """Test Gemini function calling response.""" |
296 | 296 | # Mock the Gemini client and response |
@@ -325,7 +325,7 @@ def get_weather(location: str) -> str: |
325 | 325 | assert result == "Weather in New York: Sunny, 72°F" |
326 | 326 |
|
327 | 327 | @pytest.mark.asyncio |
328 | | - @patch('stream_agents.plugins.gemini.gemini_llm.genai') |
| 328 | + @patch('vision_agents.plugins.gemini.gemini_llm.genai') |
329 | 329 | async def test_gemini_conversational_response(self, mock_genai): |
330 | 330 | """Test Gemini conversational response generation.""" |
331 | 331 | mock_client = Mock() |
@@ -478,7 +478,7 @@ def test_func(x: int) -> int: |
478 | 478 | @pytest.mark.asyncio |
479 | 479 | async def test_tool_lifecycle_events(self): |
480 | 480 | """Test that tool lifecycle events are emitted.""" |
481 | | - from stream_agents.core.llm.events import ToolStartEvent, ToolEndEvent |
| 481 | + from vision_agents.core.llm.events import ToolStartEvent, ToolEndEvent |
482 | 482 |
|
483 | 483 | llm = LLM() |
484 | 484 |
|
|
0 commit comments