Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -172,3 +172,6 @@ cython_debug/

# PyPI configuration file
.pypirc

# Ignore JetBrains IDE configuration folder
.idea/
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# ai-server
# markus-ai-server

## Developers

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
8 changes: 4 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ build-backend = "hatchling.build"


[project]
name = "ai_server"
version = "0.0.1"
name = "markus_ai_server"
version = "0.0.2"
authors = [
{ name="David Liu", email="david@cs.toronto.edu" },
]
Expand Down Expand Up @@ -33,8 +33,8 @@ dev = [
]

[project.urls]
Homepage = "https://github.com/MarkUsProject/ai-server"
Issues = "https://github.com/MarkUsProject/ai-server/issues"
Homepage = "https://github.com/MarkUsProject/markus-ai-server"
Issues = "https://github.com/MarkUsProject/markus-ai-server/issues"

[tool.black]
line-length = 120
Expand Down
20 changes: 10 additions & 10 deletions test/test_cli_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')

from ai_server.server import chat_with_llamacpp, chat_with_model
from markus_ai_server.server import chat_with_llamacpp, chat_with_model

# Test models
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
Expand All @@ -16,28 +16,28 @@
@pytest.fixture
def mock_subprocess():
"""Mock subprocess.run for CLI tests."""
with patch('ai_server.server.subprocess.run') as mock:
with patch('markus_ai_server.server.subprocess.run') as mock:
yield mock


@pytest.fixture
def mock_resolve_model_path():
"""Mock resolve_model_path for CLI tests."""
with patch('ai_server.server.resolve_model_path') as mock:
with patch('markus_ai_server.server.resolve_model_path') as mock:
yield mock


@pytest.fixture
def mock_glob():
"""Mock glob.glob for model discovery tests."""
with patch('ai_server.server.glob.glob') as mock:
with patch('markus_ai_server.server.glob.glob') as mock:
yield mock


@pytest.fixture
def mock_ollama():
"""Mock ollama.chat for fallback tests."""
with patch('ai_server.server.ollama.chat') as mock:
with patch('markus_ai_server.server.ollama.chat') as mock:
yield mock


Expand Down Expand Up @@ -91,9 +91,9 @@ class TestCLIModeRouting:
@pytest.fixture(autouse=True)
def setup_routing_mocks(self):
"""Set up common mocks for routing tests."""
with patch('ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp, patch(
'ai_server.server.is_llamacpp_available'
) as mock_available, patch('ai_server.server.chat_with_ollama') as mock_chat_ollama:
with patch('markus_ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp, patch(
'markus_ai_server.server.is_llamacpp_available'
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama:
self.mock_chat_llamacpp = mock_chat_llamacpp
self.mock_available = mock_available
self.mock_chat_ollama = mock_chat_ollama
Expand Down Expand Up @@ -215,8 +215,8 @@ def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}

# Prepare mocks
with patch('ai_server.server.is_llamacpp_available', return_value=False), patch(
'ai_server.server.chat_with_ollama'
with patch('markus_ai_server.server.is_llamacpp_available', return_value=False), patch(
'markus_ai_server.server.chat_with_ollama'
) as mock_ollama:
mock_ollama.return_value = "schema-aware response"

Expand Down
14 changes: 9 additions & 5 deletions test/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@

os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')

from ai_server.server import chat_with_ollama, is_llamacpp_available, resolve_model_path
from markus_ai_server.server import (
chat_with_ollama,
is_llamacpp_available,
resolve_model_path,
)

# Test models
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
Expand All @@ -15,14 +19,14 @@
@pytest.fixture
def mock_glob():
"""Mock glob.glob for model discovery tests."""
with patch('ai_server.server.glob.glob') as mock:
with patch('markus_ai_server.server.glob.glob') as mock:
yield mock


@pytest.fixture
def mock_ollama():
"""Mock ollama.chat for ollama tests."""
with patch('ai_server.server.ollama.chat') as mock:
with patch('markus_ai_server.server.ollama.chat') as mock:
yield mock


Expand All @@ -49,7 +53,7 @@ def test_resolve_model_path_not_found(self, mock_glob):

def test_is_llamacpp_available_true(self):
"""Test model availability check when model exists."""
with patch('ai_server.server.resolve_model_path') as mock_resolve:
with patch('markus_ai_server.server.resolve_model_path') as mock_resolve:
mock_resolve.return_value = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'

result = is_llamacpp_available(TEST_LLAMACPP_MODEL)
Expand All @@ -59,7 +63,7 @@ def test_is_llamacpp_available_true(self):

def test_is_llamacpp_available_false(self):
"""Test model availability check when model doesn't exist."""
with patch('ai_server.server.resolve_model_path') as mock_resolve:
with patch('markus_ai_server.server.resolve_model_path') as mock_resolve:
mock_resolve.return_value = None

result = is_llamacpp_available('nonexistent-model')
Expand Down
24 changes: 12 additions & 12 deletions test/test_server_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')

from ai_server.server import chat_with_llama_server_http, chat_with_model
from markus_ai_server.server import chat_with_llama_server_http, chat_with_model

# Test models
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
Expand All @@ -15,28 +15,28 @@
@pytest.fixture
def mock_requests_post():
"""Mock requests.post for HTTP tests."""
with patch('ai_server.server.requests.post') as mock:
with patch('markus_ai_server.server.requests.post') as mock:
yield mock


@pytest.fixture
def mock_llama_server_url():
"""Mock LLAMA_SERVER_URL for server tests."""
with patch('ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'):
with patch('markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'):
yield


@pytest.fixture
def mock_glob():
"""Mock glob.glob for model discovery tests."""
with patch('ai_server.server.glob.glob') as mock:
with patch('markus_ai_server.server.glob.glob') as mock:
yield mock


@pytest.fixture
def mock_ollama():
"""Mock ollama.chat for fallback tests."""
with patch('ai_server.server.ollama.chat') as mock:
with patch('markus_ai_server.server.ollama.chat') as mock:
yield mock


Expand All @@ -62,7 +62,7 @@ def test_chat_with_llama_server_http_success(self, mock_requests_post, mock_llam

def test_chat_with_llama_server_http_no_url(self):
"""Test HTTP chat when LLAMA_SERVER_URL is not set."""
with patch('ai_server.server.LLAMA_SERVER_URL', None):
with patch('markus_ai_server.server.LLAMA_SERVER_URL', None):
with pytest.raises(Exception, match="LLAMA_SERVER_URL environment variable not set"):
chat_with_llama_server_http(TEST_LLAMACPP_MODEL, 'Hello')

Expand Down Expand Up @@ -92,10 +92,10 @@ class TestServerModeRouting:
@pytest.fixture(autouse=True)
def setup_routing_mocks(self):
"""Set up common mocks for routing tests."""
with patch('ai_server.server.chat_with_llama_server_http') as mock_chat_server, patch(
'ai_server.server.is_llamacpp_available'
) as mock_available, patch('ai_server.server.chat_with_ollama') as mock_chat_ollama, patch(
'ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'
with patch('markus_ai_server.server.chat_with_llama_server_http') as mock_chat_server, patch(
'markus_ai_server.server.is_llamacpp_available'
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama, patch(
'markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'
):
self.mock_chat_server = mock_chat_server
self.mock_available = mock_available
Expand Down Expand Up @@ -158,7 +158,7 @@ def test_server_mode_fallback_to_ollama_with_model_options(self):

def test_server_mode_requires_server_url(self):
"""Test server mode requires LLAMA_SERVER_URL to be set."""
with patch('ai_server.server.LLAMA_SERVER_URL', None):
with patch('markus_ai_server.server.LLAMA_SERVER_URL', None):
self.mock_available.return_value = True

with pytest.raises(Exception, match="LLAMA_SERVER_URL environment variable not set"):
Expand Down Expand Up @@ -218,7 +218,7 @@ def test_server_mode_passes_json_schema_to_llama_server(self, tmp_path, mock_req
"""
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}

with patch('ai_server.server.is_llamacpp_available', return_value=True):
with patch('markus_ai_server.server.is_llamacpp_available', return_value=True):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"choices": [{"message": {"content": "Schema-aware server reply"}}]}
Expand Down
22 changes: 11 additions & 11 deletions test/test_system_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ def setup_env(self, monkeypatch):
"""Set up environment variables for each test."""
monkeypatch.setenv('REDIS_URL', 'redis://localhost:6379')

@patch('ai_server.server.subprocess.run')
@patch('ai_server.server.resolve_model_path')
@patch('markus_ai_server.server.subprocess.run')
@patch('markus_ai_server.server.resolve_model_path')
def test_llamacpp_cli_with_system_prompt(self, mock_resolve, mock_subprocess):
"""Test system_prompt passed to llama.cpp CLI."""
from ai_server.server import chat_with_llamacpp
from markus_ai_server.server import chat_with_llamacpp

mock_resolve.return_value = f'/data1/GGUF/{TEST_MODEL}/{TEST_MODEL}.gguf'
mock_result = MagicMock()
Expand All @@ -33,11 +33,11 @@ def test_llamacpp_cli_with_system_prompt(self, mock_resolve, mock_subprocess):
assert '--system-prompt' in cmd
assert TEST_SYSTEM_PROMPT in cmd

@patch('ai_server.server.requests.post')
@patch('ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080')
@patch('markus_ai_server.server.requests.post')
@patch('markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080')
def test_llama_server_http_with_system_prompt(self, mock_post):
"""Test system_prompt passed to llama-server HTTP."""
from ai_server.server import chat_with_llama_server_http
from markus_ai_server.server import chat_with_llama_server_http

mock_response = MagicMock()
mock_response.status_code = 200
Expand All @@ -51,10 +51,10 @@ def test_llama_server_http_with_system_prompt(self, mock_post):
assert messages[0]['role'] == 'system'
assert messages[0]['content'] == TEST_SYSTEM_PROMPT

@patch('ai_server.server.ollama.chat')
@patch('markus_ai_server.server.ollama.chat')
def test_ollama_with_system_prompt(self, mock_ollama):
"""Test system_prompt passed to ollama."""
from ai_server.server import chat_with_ollama
from markus_ai_server.server import chat_with_ollama

mock_response = MagicMock()
mock_response.message.content = "result"
Expand All @@ -67,11 +67,11 @@ def test_ollama_with_system_prompt(self, mock_ollama):
assert messages[0]['role'] == 'system'
assert messages[0]['content'] == TEST_SYSTEM_PROMPT

@patch('ai_server.server.chat_with_llamacpp')
@patch('ai_server.server.is_llamacpp_available')
@patch('markus_ai_server.server.chat_with_llamacpp')
@patch('markus_ai_server.server.is_llamacpp_available')
def test_chat_with_model_routing(self, mock_available, mock_chat):
"""Test system_prompt passed through chat_with_model routing."""
from ai_server.server import chat_with_model
from markus_ai_server.server import chat_with_model

mock_available.return_value = True
mock_chat.return_value = "result"
Expand Down
12 changes: 6 additions & 6 deletions test/test_system_prompt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ def setup_env(self, monkeypatch):
@pytest.fixture
def client(self):
"""Create test client for Flask app."""
from ai_server.server import app
from markus_ai_server.server import app

app.config['TESTING'] = True
with app.test_client() as client:
yield client

@patch('ai_server.server.REDIS_CONNECTION')
@patch('ai_server.server.chat_with_model')
@patch('markus_ai_server.server.REDIS_CONNECTION')
@patch('markus_ai_server.server.chat_with_model')
def test_api_with_system_prompt(self, mock_chat, mock_redis, client):
"""Test /chat endpoint receives and passes system_prompt."""
mock_redis.get.return_value = b'test_user'
Expand All @@ -44,8 +44,8 @@ def test_api_with_system_prompt(self, mock_chat, mock_redis, client):
TEST_MODEL, TEST_USER_CONTENT, 'cli', TEST_SYSTEM_PROMPT, [], json_schema=None, model_options=None
)

@patch('ai_server.server.REDIS_CONNECTION')
@patch('ai_server.server.chat_with_model')
@patch('markus_ai_server.server.REDIS_CONNECTION')
@patch('markus_ai_server.server.chat_with_model')
def test_api_without_system_prompt(self, mock_chat, mock_redis, client):
"""Test /chat endpoint works without system_prompt."""
mock_redis.get.return_value = b'test_user'
Expand All @@ -61,7 +61,7 @@ def test_api_without_system_prompt(self, mock_chat, mock_redis, client):
TEST_MODEL, TEST_USER_CONTENT, 'cli', None, [], model_options=None, json_schema=None
)

@patch('ai_server.server.REDIS_CONNECTION')
@patch('markus_ai_server.server.REDIS_CONNECTION')
def test_api_authentication_still_required(self, mock_redis, client):
"""Test that authentication is still required with system_prompt."""
mock_redis.get.return_value = None
Expand Down