forked from facebookresearch/dlrm
-
Notifications
You must be signed in to change notification settings - Fork 2
/
test.py
48 lines (40 loc) · 1.3 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# import torch
# import torch.distributed as dist
# import extend_distributed as ext_dist
# ext_dist.init_distributed(local_rank=-1, use_gpu=True, backend="nccl")
# # ext_dist.print_all(
# # "world size: %d, current rank: %d, local rank: %d"
# # % (ext_dist.my_size, ext_dist.my_rank, ext_dist.my_local_rank)
# # )
# rank = ext_dist.my_rank
# input = (torch.arange(4) + rank * 4)
# input = input.to("cuda:{}".format(rank))
# print(input)
"""run.py:"""
#!/usr/bin/env python
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def run(rank, size):
""" Distributed function to be implemented later. """
input = (torch.arange(4) + rank * 4)
input = input.to("cuda:{}".format(rank))
print(input)
pass
def init_process(rank, size, fn, backend='nccl'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 4
processes = []
mp.set_start_method("spawn")
for rank in range(size):
p = mp.Process(target=init_process, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()