Skip to content

[Bugfix] Allow CUDA_VISIBLE_DEVICES='' in Platform.device_id_to_physical_device_id #18979

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 22 commits into from
Jun 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
22fdba1
Fix FlashMLA detection in ray environment
eicherseiji May 30, 2025
979129b
Move FlashMLA capability check to GPU worker
eicherseiji Jun 4, 2025
5055e5b
Unit test
eicherseiji Jun 6, 2025
9e943ea
reate helper function _resolve_hardware_dependent_config
eicherseiji Jun 6, 2025
b812cd1
Add V0 support under env flag
eicherseiji Jun 6, 2025
4e4e092
Parameterize _get_and_verify_dtype by defer_to_worker
eicherseiji Jun 6, 2025
baadec8
Change parameter name to 'defer_auto_to_worker'
eicherseiji Jun 9, 2025
8df7253
Add _resolve_hardware_dependent_config for V0 Worker
eicherseiji Jun 9, 2025
b2fea3c
Resolve lora dtype
eicherseiji Jun 9, 2025
6500644
Move V1 supported type check into auto resolution's if block
eicherseiji Jun 9, 2025
8c30841
Move config resolution to WorkerBase
eicherseiji Jun 10, 2025
7ff5dae
Fix kwargs list
eicherseiji Jun 10, 2025
ceccc0d
Inline init_config with init_worker
eicherseiji Jun 12, 2025
c191299
Testing without V0-specific flag
eicherseiji Jun 12, 2025
190f9f7
Remove whitespace changes
eicherseiji Jun 12, 2025
ab58431
Avoid modifying _get_and_verify_dtype signature for the sake of testing
eicherseiji Jun 12, 2025
5f87e53
Only check V1 supported dtypes in V1
eicherseiji Jun 13, 2025
e77592c
Move config fixup logic to VllmConfig.resolve_config_with_hardware, f…
eicherseiji Jun 16, 2025
8e0f77b
Support inplace model weights loading
eicherseiji Jun 16, 2025
d5f88bb
Try resolving dtype on worker in ray
eicherseiji Jun 18, 2025
afe0e01
Treat empty device control env var as unset
eicherseiji Jun 23, 2025
66bd1fb
Allow empty string CUDA_VISIBLE_DEVICES
eicherseiji Jun 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions tests/config/test_config_generation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest

from vllm.engine.arg_utils import EngineArgs
from vllm.model_executor.layers.quantization.quark.utils import deep_compare


def test_cuda_empty_vs_unset_configs(monkeypatch: pytest.MonkeyPatch):
"""Test that configs created with normal (untouched) CUDA_VISIBLE_DEVICES
and CUDA_VISIBLE_DEVICES="" are equivalent. This ensures consistent
behavior regardless of whether GPU visibility is disabled via empty string
or left in its normal state.
"""

def create_config():
engine_args = EngineArgs(model="deepseek-ai/DeepSeek-V2-Lite",
trust_remote_code=True)
return engine_args.create_engine_config()

# Create config with CUDA_VISIBLE_DEVICES set normally
normal_config = create_config()

# Create config with CUDA_VISIBLE_DEVICES=""
with monkeypatch.context() as m:
m.setenv("CUDA_VISIBLE_DEVICES", "")
empty_config = create_config()

normal_config_dict = vars(normal_config)
empty_config_dict = vars(empty_config)

# Remove instance_id before comparison as it's expected to be different
normal_config_dict.pop("instance_id", None)
empty_config_dict.pop("instance_id", None)

assert deep_compare(normal_config_dict, empty_config_dict), (
"Configs with normal CUDA_VISIBLE_DEVICES and CUDA_VISIBLE_DEVICES=\"\""
" should be equivalent")
71 changes: 71 additions & 0 deletions tests/v1/engine/test_engine_core_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
import uuid
from threading import Thread
from typing import Optional
from unittest.mock import MagicMock

import pytest
import torch
from transformers import AutoTokenizer

from tests.utils import multi_gpu_test
Expand Down Expand Up @@ -517,3 +519,72 @@ def kill_first_child():
)

assert "Engine core initialization failed" in str(e_info.value)


@create_new_process_for_each_test()
def test_engine_core_proc_instantiation_cuda_empty(
monkeypatch: pytest.MonkeyPatch):
"""
Test that EngineCoreProc can be instantiated when CUDA_VISIBLE_DEVICES
is empty. This ensures the engine frontend does not need access to GPUs.
"""

from vllm.v1.engine.core import EngineCoreProc
from vllm.v1.executor.abstract import Executor

# Create a simple mock executor instead of a complex custom class
mock_executor_class = MagicMock(spec=Executor)

def create_mock_executor(vllm_config):
mock_executor = MagicMock()

# Only implement the methods that are actually called during init
from vllm.v1.kv_cache_interface import FullAttentionSpec
mock_spec = FullAttentionSpec(block_size=16,
num_kv_heads=1,
head_size=64,
dtype=torch.float16,
use_mla=False)

mock_executor.get_kv_cache_specs.return_value = [{
"default": mock_spec
}]
mock_executor.determine_available_memory.return_value = [
1024 * 1024 * 1024
]
mock_executor.initialize_from_config.return_value = None
mock_executor.max_concurrent_batches = 1

return mock_executor

mock_executor_class.side_effect = create_mock_executor

with monkeypatch.context() as m:
m.setenv("VLLM_USE_V1", "1")
m.setenv("CUDA_VISIBLE_DEVICES", "") # No CUDA devices

from vllm.v1.utils import EngineZmqAddresses

def mock_startup_handshake(self, handshake_socket, on_head_node,
parallel_config):
return EngineZmqAddresses(inputs=["tcp://127.0.0.1:5555"],
outputs=["tcp://127.0.0.1:5556"],
coordinator_input=None,
coordinator_output=None)

# Background processes are not important here
m.setattr(EngineCoreProc, "startup_handshake", mock_startup_handshake)

vllm_config = EngineArgs(
model="deepseek-ai/DeepSeek-V2-Lite",
trust_remote_code=True).create_engine_config()
engine_core_proc = EngineCoreProc(
vllm_config=vllm_config,
on_head_node=True,
handshake_address="tcp://127.0.0.1:12345",
executor_class=mock_executor_class,
log_stats=False,
engine_index=0,
)

engine_core_proc.shutdown()
15 changes: 5 additions & 10 deletions vllm/platforms/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,17 +173,12 @@ def is_sleep_mode_available(self) -> bool:

@classmethod
def device_id_to_physical_device_id(cls, device_id: int):
if cls.device_control_env_var in os.environ:
# Treat empty device control env var as unset. This is a valid
# configuration in Ray setups where the engine is launched in
# a CPU-only placement group located on a GPU node.
if cls.device_control_env_var in os.environ and os.environ[
cls.device_control_env_var] != "":
device_ids = os.environ[cls.device_control_env_var].split(",")
if device_ids == [""]:
msg = (f"{cls.device_control_env_var} is set to empty string, "
"which means current platform support is disabled. If "
"you are using ray, please unset the environment "
f"variable `{cls.device_control_env_var}` inside the "
"worker/actor. Check "
"https://github.com/vllm-project/vllm/issues/8402 for "
"more information.")
raise RuntimeError(msg)
physical_device_id = device_ids[device_id]
return int(physical_device_id)
else:
Expand Down