forked from olehb/pytorch_ddp_tutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ddp_tutorial_cpu.py
110 lines (90 loc) · 3.47 KB
/
ddp_tutorial_cpu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
from typing import Tuple
import torch
from torch.utils.data.dataloader import DataLoader
from torchvision import datasets, transforms
from torch import nn, optim
from tqdm import tqdm
DISABLE_TQDM = True
def create_data_loaders(batch_size: int) -> Tuple[DataLoader, DataLoader]:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset_loc = './mnist_data'
train_dataset = datasets.MNIST(dataset_loc,
download=True,
train=True,
transform=transform)
train_loader = DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=True)
# This is not necessary to use distributed sampler for the test or validation sets.
test_dataset = datasets.MNIST(dataset_loc,
download=True,
train=False,
transform=transform)
test_loader = DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=True)
return train_loader, test_loader
def create_model():
# create model architecture
model = nn.Sequential(
nn.Linear(28*28, 128), # MNIST images are 28x28 pixels
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 10, bias=False) # 10 classes to predict
)
return model
def main(epochs: int,
model: nn.Module,
train_loader: DataLoader,
test_loader: DataLoader) -> nn.Module:
# initialize optimizer and loss function
optimizer = optim.SGD(model.parameters(), lr=0.01)
loss = nn.CrossEntropyLoss()
# train the model
for i in range(epochs):
model.train()
epoch_loss = 0
# train the model for one epoch
pbar = tqdm(train_loader)
for x, y in pbar:
x = x.view(x.shape[0], -1)
optimizer.zero_grad()
y_hat = model(x)
batch_loss = loss(y_hat, y)
batch_loss.backward()
optimizer.step()
batch_loss_scalar = batch_loss.item()
epoch_loss += batch_loss_scalar / x.shape[0]
pbar.set_description(f'training batch_loss={batch_loss_scalar:.4f}')
# calculate validation loss
with torch.no_grad():
model.eval()
val_loss = 0
pbar = tqdm(test_loader)
for x, y in pbar:
x = x.view(x.shape[0], -1)
y_hat = model(x)
batch_loss = loss(y_hat, y)
batch_loss_scalar = batch_loss.item()
val_loss += batch_loss_scalar / x.shape[0]
pbar.set_description(f'validation batch_loss={batch_loss_scalar:.4f}')
print(f"Epoch={i}, train_loss={epoch_loss:.4f}, val_loss={val_loss:.4f}")
return model
if __name__ == '__main__':
batch_size = 128
epochs = 1
train_loader, test_loader = create_data_loaders(batch_size)
model = main(epochs=epochs,
model=create_model(),
train_loader=train_loader,
test_loader=test_loader)
torch.save(model.state_dict(), 'model.pt')