Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions docker/run-vllm-qwen.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/sh
export HUGGING_FACE_HUB_TOKEN=hf_XXX-XXX-XXX
export CUDA_VISIBLE_DEVICES="0,1,2,3"
docker run \
--name vllm-qwen-vl \
--network vllm-qwen-vl \
--gpus all \
--runtime=nvidia \
--ipc=host \
--rm --init \
-p 8000:8000 \
-v /opt/vllm:/root/.cache/huggingface \
vllm/vllm-openai:latest --model Qwen/Qwen2.5-VL-32B-Instruct --served-model-name "Qwen2.5-VL-32B-Instruct" --tensor-parallel-size 4 --max_model_len 32768 --enable-auto-tool-choice --tool-call-parser qwen
9 changes: 9 additions & 0 deletions packages/bytebot-llm-proxy/litellm-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,12 @@ model_list:
litellm_params:
model: gemini/gemini-2.5-flash
api_key: os.environ/GEMINI_API_KEY

# Self-hosted vLLM Models
- model_name: VM426:Qwen2.5-VL-32B-Instruct
litellm_params:
model: openai/Qwen2.5-VL-32B-Instruct
api_base: https://XXX-XXX-XXX-XXX/v1
supports_function_calling: true
drop_params: true
temperature: 0.1