@@ -203,23 +203,26 @@ EOF
203203
204204
205205start_vllm () {
206+
206207 echo " ========================================"
207- echo " 启动 vLLM 服务"
208- echo " 模型路径: $MODEL "
209- echo " 端口: $DEFAULT_PORT "
210- echo " GPU 内存利用率: $DEFAULT_GPU_UTIL "
211- echo " 张量并行度: $DEFAULT_TP_SIZE "
212- echo " 最大模型长度: $DEFAULT_MODEL_LEN "
213- echo " 信任远程代码: $DEFAULT_TRUST_REMOTE_CODE "
214- echo " 其他参数: ${USER_ARGS[*]} "
215- echo " 完整命令: vllm serve ${FINAL_ARGS[*]} "
208+ echo " Starting vLLM Service..."
209+ echo " Model path : $MODEL "
210+ echo " Served Model Name : ${DEFUALT_SERVED_MODEL_NAME:- $(basename " $MODEL " )} "
211+ echo " Port : $DEFAULT_PORT "
212+ echo " GPU Utilization : $DEFAULT_GPU_UTIL "
213+ echo " Tensor Parallel : $DEFAULT_TP_SIZE "
214+ echo " Max Model Length : $DEFAULT_MODEL_LEN "
215+ echo " Trust Remote Code : $DEFAULT_TRUST_REMOTE_CODE "
216+ echo " Extra Arguments : ${USER_ARGS[*]} "
217+ echo " Full Command : vllm serve ${FINAL_ARGS[*]} "
216218 echo " ========================================"
217219
220+ log_file=vllm_server.log
221+ : > " $log_file "
218222
223+ PYTHONUNBUFFERED=1 vllm serve " ${FINAL_ARGS[@]} " 2>&1 | tee -a " $log_file " &
219224
220- PYTHONUNBUFFERED=1 vllm serve " ${FINAL_ARGS[@]} " 2>&1 | tee -a vllm.log &
221-
222- wait_for_log_update vllm.log $! " ${DEFUALT_SERVED_MODEL_NAME:- $(basename " $MODEL " )} " " $DEFAULT_HOST " " $DEFAULT_PORT "
225+ wait_for_log_update $log_file $! " ${DEFUALT_SERVED_MODEL_NAME:- $(basename " $MODEL " )} " " $DEFAULT_HOST " " $DEFAULT_PORT "
223226}
224227
225228
0 commit comments