-
-
Notifications
You must be signed in to change notification settings - Fork 9.6k
[Misc] Enable vLLM to Dynamically Load LoRA from a Remote Server #10546
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
19 commits
Select commit
Hold shift + click to select a range
397ed64
Dynamic LoRA plugin
1320607
Check lora_resolver output before adding to lora_requests
f489204
Merge remote-tracking branch 'origin/main' into dynamic_lora_resolver
6088b36
add base_model_name to LoRaResolver.resolve_lora and make resolve_lor…
91ab230
Fix precommit check
e4886bc
update lora resolver test
a152aa9
comment out engine_client.add_lora
9d1c422
use atomic counter as lora_int_id
e0a2cef
Re add engine_client.add_lora
eeb7772
Merge remote-tracking branch 'origin/main' into dynamic_lora_resolver
9ca98c1
Test for LoRA Resolver
02c33da
Rename lora/test_lora_resolver to lora/test_resolver
0c5c109
Add VLLM_ALLOW_RUNTIME_LORA_UPDATING flag check
c5c172d
Add documentation for LoRAResolver plugin
19af34c
Fix doc new line
c72da0e
Add engine.generate lora_request assertion for successful completion …
b577d2b
Rename generate_lora_request to called_lora_request
82f8e40
Add example of LoRAResolver
3b47f94
Minor doc update, remove redundant word
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,209 @@ | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
from contextlib import suppress | ||
from dataclasses import dataclass, field | ||
from http import HTTPStatus | ||
from typing import Optional | ||
from unittest.mock import MagicMock | ||
|
||
import pytest | ||
|
||
from vllm.config import MultiModalConfig | ||
from vllm.engine.multiprocessing.client import MQLLMEngineClient | ||
from vllm.entrypoints.openai.protocol import CompletionRequest, ErrorResponse | ||
from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion | ||
from vllm.entrypoints.openai.serving_models import (BaseModelPath, | ||
OpenAIServingModels) | ||
from vllm.lora.request import LoRARequest | ||
from vllm.lora.resolver import LoRAResolver, LoRAResolverRegistry | ||
from vllm.transformers_utils.tokenizer import get_tokenizer | ||
|
||
MODEL_NAME = "openai-community/gpt2" | ||
BASE_MODEL_PATHS = [BaseModelPath(name=MODEL_NAME, model_path=MODEL_NAME)] | ||
|
||
MOCK_RESOLVER_NAME = "mock_test_resolver" | ||
|
||
|
||
@dataclass | ||
class MockHFConfig: | ||
model_type: str = "any" | ||
|
||
|
||
@dataclass | ||
class MockModelConfig: | ||
"""Minimal mock ModelConfig for testing.""" | ||
model: str = MODEL_NAME | ||
tokenizer: str = MODEL_NAME | ||
trust_remote_code: bool = False | ||
tokenizer_mode: str = "auto" | ||
max_model_len: int = 100 | ||
tokenizer_revision: Optional[str] = None | ||
multimodal_config: MultiModalConfig = field( | ||
default_factory=MultiModalConfig) | ||
hf_config: MockHFConfig = field(default_factory=MockHFConfig) | ||
logits_processor_pattern: Optional[str] = None | ||
diff_sampling_param: Optional[dict] = None | ||
allowed_local_media_path: str = "" | ||
encoder_config = None | ||
generation_config: str = "auto" | ||
|
||
def get_diff_sampling_param(self): | ||
return self.diff_sampling_param or {} | ||
|
||
|
||
class MockLoRAResolver(LoRAResolver): | ||
|
||
async def resolve_lora(self, base_model_name: str, | ||
lora_name: str) -> Optional[LoRARequest]: | ||
if lora_name == "test-lora": | ||
return LoRARequest(lora_name="test-lora", | ||
lora_int_id=1, | ||
lora_local_path="/fake/path/test-lora") | ||
elif lora_name == "invalid-lora": | ||
return LoRARequest(lora_name="invalid-lora", | ||
lora_int_id=2, | ||
lora_local_path="/fake/path/invalid-lora") | ||
return None | ||
|
||
|
||
@pytest.fixture(autouse=True) | ||
def register_mock_resolver(): | ||
"""Fixture to register and unregister the mock LoRA resolver.""" | ||
resolver = MockLoRAResolver() | ||
LoRAResolverRegistry.register_resolver(MOCK_RESOLVER_NAME, resolver) | ||
yield | ||
# Cleanup: remove the resolver after the test runs | ||
if MOCK_RESOLVER_NAME in LoRAResolverRegistry.resolvers: | ||
del LoRAResolverRegistry.resolvers[MOCK_RESOLVER_NAME] | ||
|
||
|
||
@pytest.fixture | ||
def mock_serving_setup(): | ||
"""Provides a mocked engine and serving completion instance.""" | ||
mock_engine = MagicMock(spec=MQLLMEngineClient) | ||
mock_engine.get_tokenizer.return_value = get_tokenizer(MODEL_NAME) | ||
mock_engine.errored = False | ||
|
||
def mock_add_lora_side_effect(lora_request: LoRARequest): | ||
"""Simulate engine behavior when adding LoRAs.""" | ||
if lora_request.lora_name == "test-lora": | ||
# Simulate successful addition | ||
return | ||
elif lora_request.lora_name == "invalid-lora": | ||
# Simulate failure during addition (e.g. invalid format) | ||
raise ValueError(f"Simulated failure adding LoRA: " | ||
f"{lora_request.lora_name}") | ||
|
||
mock_engine.add_lora.side_effect = mock_add_lora_side_effect | ||
mock_engine.generate.reset_mock() | ||
mock_engine.add_lora.reset_mock() | ||
|
||
mock_model_config = MockModelConfig() | ||
models = OpenAIServingModels(engine_client=mock_engine, | ||
base_model_paths=BASE_MODEL_PATHS, | ||
model_config=mock_model_config) | ||
|
||
serving_completion = OpenAIServingCompletion(mock_engine, | ||
mock_model_config, | ||
models, | ||
request_logger=None) | ||
|
||
return mock_engine, serving_completion | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_serving_completion_with_lora_resolver(mock_serving_setup, | ||
monkeypatch): | ||
monkeypatch.setenv("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "true") | ||
|
||
mock_engine, serving_completion = mock_serving_setup | ||
|
||
lora_model_name = "test-lora" | ||
req_found = CompletionRequest( | ||
model=lora_model_name, | ||
prompt="Generate with LoRA", | ||
) | ||
|
||
# Suppress potential errors during the mocked generate call, | ||
# as we are primarily checking for add_lora and generate calls | ||
with suppress(Exception): | ||
await serving_completion.create_completion(req_found) | ||
|
||
mock_engine.add_lora.assert_called_once() | ||
called_lora_request = mock_engine.add_lora.call_args[0][0] | ||
assert isinstance(called_lora_request, LoRARequest) | ||
assert called_lora_request.lora_name == lora_model_name | ||
|
||
mock_engine.generate.assert_called_once() | ||
called_lora_request = mock_engine.generate.call_args[1]['lora_request'] | ||
assert isinstance(called_lora_request, LoRARequest) | ||
assert called_lora_request.lora_name == lora_model_name | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_serving_completion_resolver_not_found(mock_serving_setup, | ||
monkeypatch): | ||
monkeypatch.setenv("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "true") | ||
|
||
mock_engine, serving_completion = mock_serving_setup | ||
|
||
non_existent_model = "non-existent-lora-adapter" | ||
req = CompletionRequest( | ||
model=non_existent_model, | ||
prompt="what is 1+1?", | ||
) | ||
|
||
response = await serving_completion.create_completion(req) | ||
|
||
mock_engine.add_lora.assert_not_called() | ||
mock_engine.generate.assert_not_called() | ||
|
||
assert isinstance(response, ErrorResponse) | ||
assert response.code == HTTPStatus.NOT_FOUND.value | ||
assert non_existent_model in response.message | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_serving_completion_resolver_add_lora_fails( | ||
mock_serving_setup, monkeypatch): | ||
monkeypatch.setenv("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "true") | ||
|
||
mock_engine, serving_completion = mock_serving_setup | ||
|
||
invalid_model = "invalid-lora" | ||
req = CompletionRequest( | ||
model=invalid_model, | ||
prompt="what is 1+1?", | ||
) | ||
|
||
response = await serving_completion.create_completion(req) | ||
|
||
# Assert add_lora was called before the failure | ||
mock_engine.add_lora.assert_called_once() | ||
called_lora_request = mock_engine.add_lora.call_args[0][0] | ||
assert isinstance(called_lora_request, LoRARequest) | ||
assert called_lora_request.lora_name == invalid_model | ||
|
||
# Assert generate was *not* called due to the failure | ||
mock_engine.generate.assert_not_called() | ||
|
||
# Assert the correct error response | ||
assert isinstance(response, ErrorResponse) | ||
assert response.code == HTTPStatus.BAD_REQUEST.value | ||
assert invalid_model in response.message | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_serving_completion_flag_not_set(mock_serving_setup): | ||
mock_engine, serving_completion = mock_serving_setup | ||
|
||
lora_model_name = "test-lora" | ||
req_found = CompletionRequest( | ||
model=lora_model_name, | ||
prompt="Generate with LoRA", | ||
) | ||
|
||
await serving_completion.create_completion(req_found) | ||
|
||
mock_engine.add_lora.assert_not_called() | ||
mock_engine.generate.assert_not_called() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
from typing import Optional | ||
|
||
import pytest | ||
|
||
from vllm.lora.request import LoRARequest | ||
from vllm.lora.resolver import LoRAResolver, LoRAResolverRegistry | ||
|
||
|
||
class DummyLoRAResolver(LoRAResolver): | ||
"""A dummy LoRA resolver for testing.""" | ||
|
||
async def resolve_lora(self, base_model_name: str, | ||
lora_name: str) -> Optional[LoRARequest]: | ||
if lora_name == "test_lora": | ||
return LoRARequest( | ||
lora_name=lora_name, | ||
lora_path=f"/dummy/path/{base_model_name}/{lora_name}", | ||
lora_int_id=abs(hash(lora_name))) | ||
return None | ||
|
||
|
||
def test_resolver_registry_registration(): | ||
"""Test basic resolver registration functionality.""" | ||
registry = LoRAResolverRegistry | ||
resolver = DummyLoRAResolver() | ||
|
||
# Register a new resolver | ||
registry.register_resolver("dummy", resolver) | ||
assert "dummy" in registry.get_supported_resolvers() | ||
|
||
# Get registered resolver | ||
retrieved_resolver = registry.get_resolver("dummy") | ||
assert retrieved_resolver is resolver | ||
|
||
|
||
def test_resolver_registry_duplicate_registration(): | ||
"""Test registering a resolver with an existing name.""" | ||
registry = LoRAResolverRegistry | ||
resolver1 = DummyLoRAResolver() | ||
resolver2 = DummyLoRAResolver() | ||
|
||
registry.register_resolver("dummy", resolver1) | ||
registry.register_resolver("dummy", resolver2) | ||
|
||
assert registry.get_resolver("dummy") is resolver2 | ||
|
||
|
||
def test_resolver_registry_unknown_resolver(): | ||
"""Test getting a non-existent resolver.""" | ||
registry = LoRAResolverRegistry | ||
|
||
with pytest.raises(KeyError, match="not found"): | ||
registry.get_resolver("unknown_resolver") | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_dummy_resolver_resolve(): | ||
"""Test the dummy resolver's resolve functionality.""" | ||
dummy_resolver = DummyLoRAResolver() | ||
base_model_name = "base_model_test" | ||
lora_name = "test_lora" | ||
|
||
# Test successful resolution | ||
result = await dummy_resolver.resolve_lora(base_model_name, lora_name) | ||
assert isinstance(result, LoRARequest) | ||
assert result.lora_name == lora_name | ||
assert result.lora_path == f"/dummy/path/{base_model_name}/{lora_name}" | ||
|
||
# Test failed resolution | ||
result = await dummy_resolver.resolve_lora(base_model_name, | ||
"nonexistent_lora") | ||
assert result is None |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.