|
6 | 6 |
|
7 | 7 | from tests.kernels.utils import DEFAULT_OPCHECK_TEST_UTILS, opcheck
|
8 | 8 | from vllm import _custom_ops as ops
|
| 9 | +from vllm.utils import seed_everything |
9 | 10 |
|
10 | 11 | COPYING_DIRECTION = [('cuda', 'cpu'), ('cuda', 'cuda'), ('cpu', 'cuda')]
|
11 | 12 | DTYPES = [torch.half, torch.bfloat16, torch.float]
|
@@ -55,10 +56,7 @@ def test_copy_blocks(
|
55 | 56 | ) -> None:
|
56 | 57 | if kv_cache_dtype == "fp8" and head_size % 16:
|
57 | 58 | pytest.skip()
|
58 |
| - random.seed(seed) |
59 |
| - torch.random.manual_seed(seed) |
60 |
| - if torch.cuda.is_available(): |
61 |
| - torch.cuda.manual_seed(seed) |
| 59 | + seed_everything(seed) |
62 | 60 | torch.set_default_device(device)
|
63 | 61 | # Generate random block mappings where each source block is mapped to two
|
64 | 62 | # destination blocks.
|
@@ -134,10 +132,7 @@ def test_reshape_and_cache(
|
134 | 132 | ) -> None:
|
135 | 133 | if kv_cache_dtype == "fp8" and head_size % 16:
|
136 | 134 | pytest.skip()
|
137 |
| - random.seed(seed) |
138 |
| - torch.random.manual_seed(seed) |
139 |
| - if torch.cuda.is_available(): |
140 |
| - torch.cuda.manual_seed(seed) |
| 135 | + seed_everything(seed) |
141 | 136 | torch.set_default_device(device)
|
142 | 137 | # Create a random slot mapping.
|
143 | 138 | num_slots = block_size * num_blocks
|
@@ -229,9 +224,7 @@ def test_reshape_and_cache_flash(
|
229 | 224 | device: str,
|
230 | 225 | kv_cache_dtype: str,
|
231 | 226 | ) -> None:
|
232 |
| - random.seed(seed) |
233 |
| - torch.random.manual_seed(seed) |
234 |
| - torch.cuda.manual_seed(seed) |
| 227 | + seed_everything(seed) |
235 | 228 | torch.set_default_device(device)
|
236 | 229 |
|
237 | 230 | # Create a random slot mapping.
|
@@ -345,10 +338,8 @@ def test_swap_blocks(
|
345 | 338 | pytest.skip()
|
346 | 339 | if kv_cache_dtype == "fp8" and head_size % 16:
|
347 | 340 | pytest.skip()
|
348 |
| - random.seed(seed) |
349 |
| - torch.random.manual_seed(seed) |
350 |
| - if torch.cuda.is_available(): |
351 |
| - torch.cuda.manual_seed(seed) |
| 341 | + |
| 342 | + seed_everything(seed) |
352 | 343 |
|
353 | 344 | src_device = device if direction[0] == "cuda" else 'cpu'
|
354 | 345 | dst_device = device if direction[1] == "cuda" else 'cpu'
|
@@ -417,9 +408,7 @@ def test_fp8_e4m3_conversion(
|
417 | 408 | seed: int,
|
418 | 409 | device: str,
|
419 | 410 | ) -> None:
|
420 |
| - random.seed(seed) |
421 |
| - torch.random.manual_seed(seed) |
422 |
| - torch.cuda.manual_seed(seed) |
| 411 | + seed_everything(seed) |
423 | 412 |
|
424 | 413 | low = -224.0
|
425 | 414 | high = 224.0
|
|
0 commit comments