File tree 2 files changed +1
-304
lines changed
2 files changed +1
-304
lines changed Original file line number Diff line number Diff line change @@ -287,7 +287,7 @@ steps:
287
287
source_file_dependencies :
288
288
- vllm/lora
289
289
- tests/lora
290
- command : pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py --ignore=lora/ test_chatglm3_tp.py --ignore=lora/test_llama_tp.py --ignore=lora/test_minicpmv_tp.py --ignore=lora/test_transfomers_model.py
290
+ command : pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py --ignore=lora/test_minicpmv_tp.py --ignore=lora/test_transfomers_model.py
291
291
parallelism : 4
292
292
293
293
- label : PyTorch Fullgraph Smoke Test # 9min
@@ -592,8 +592,6 @@ steps:
592
592
# FIXIT: find out which code initialize cuda before running the test
593
593
# before the fix, we need to use spawn to test it
594
594
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
595
- # This test runs llama 13B, so it is required to run on 4 GPUs.
596
- - pytest -v -s -x lora/test_long_context.py
597
595
# There is some Tensor Parallelism related processing logic in LoRA that
598
596
# requires multi-GPU testing for validation.
599
597
- pytest -v -s -x lora/test_chatglm3_tp.py
Load Diff This file was deleted.
You can’t perform that action at this time.
0 commit comments