Skip to content

Commit c61aa44

Browse files
authored
Revert "Parallel test with pytest-xdist" (#526)
This reverts commit cbaff6c.
1 parent cbaff6c commit c61aa44

File tree

6 files changed

+4
-14
lines changed

6 files changed

+4
-14
lines changed

.github/workflows/regression_test.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,5 +68,4 @@ jobs:
6868
pip install ${{ matrix.torch-spec }}
6969
pip install -r dev-requirements.txt
7070
pip install .
71-
pytest test --verbose -s -m "not multi_gpu" --dist load --tx popen//env:CUDA_VISIBLE_DEVICES=0 --tx popen//env:CUDA_VISIBLE_DEVICES=1 --tx popen//env:CUDA_VISIBLE_DEVICES=2 --tx popen//env:CUDA_VISIBLE_DEVICES=3
72-
pytest test --verbose -s -m "multi_gpu"
71+
pytest test --verbose -s

dev-requirements.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ transformers
77
hypothesis # Avoid test derandomization warning
88
sentencepiece # for gpt-fast tokenizer
99
expecttest
10-
pytest-xdist
1110

1211
# For prototype features and benchmarks
1312
bitsandbytes #needed for testing triton quant / dequant ops for 8-bit optimizers

pytest.ini

Lines changed: 0 additions & 3 deletions
This file was deleted.

test/dtypes/test_nf4.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,6 @@ class TestQLoRA(FSDPTest):
486486
def world_size(self) -> int:
487487
return 2
488488

489-
@pytest.mark.multi_gpu
490489
@pytest.mark.skipif(
491490
version.parse(torch.__version__).base_version < "2.4.0",
492491
reason="torch >= 2.4 required",

test/integration/test_integration.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -985,10 +985,7 @@ def forward(self, x):
985985
# save quantized state_dict
986986
api(model)
987987

988-
# unique filename to avoid collision in parallel tests
989-
ckpt_name = f"{api.__name__}_{test_device}_{test_dtype}_test.pth"
990-
991-
torch.save(model.state_dict(), ckpt_name)
988+
torch.save(model.state_dict(), "test.pth")
992989
# get quantized reference
993990
model_qc = torch.compile(model, mode="max-autotune")
994991
ref_q = model_qc(x).detach()
@@ -1001,8 +998,8 @@ def forward(self, x):
1001998
api(model)
1002999

10031000
# load quantized state_dict
1004-
state_dict = torch.load(ckpt_name, mmap=True)
1005-
os.remove(ckpt_name)
1001+
state_dict = torch.load("test.pth", mmap=True)
1002+
os.remove("test.pth")
10061003

10071004
model.load_state_dict(state_dict, assign=True)
10081005
model = model.to(device=test_device, dtype=test_dtype).eval()

test/prototype/test_low_bit_optim.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,6 @@ class TestFSDP2(FSDPTest):
163163
def world_size(self) -> int:
164164
return 2
165165

166-
@pytest.mark.multi_gpu
167166
@pytest.mark.skipif(not TORCH_VERSION_AFTER_2_4, reason="torch >= 2.4 required")
168167
@skip_if_lt_x_gpu(2)
169168
def test_fsdp2(self):

0 commit comments

Comments
 (0)