-
Notifications
You must be signed in to change notification settings - Fork 0
/
Main.py
58 lines (40 loc) · 1.7 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from torch.utils.data import random_split, DataLoader
from Model import Model
from DataSet import DataSet
import torch
# 0 Load Data
data_set = DataSet('Dataset/iris/iris.csv')
# 1 Split Data
train_dataset, validation_dataset, test_dataset = random_split(data_set, (110, 10, 30))
# 2 Set Data Loader
train_loader = DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=False)
validation_loader = DataLoader(validation_dataset, batch_size=len(validation_dataset), shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)
# 4 initial Model
model = Model()
# 5 Choice Loss & Optimizer Function
loss_function = torch.nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# 6 Train Model
for num in range(500):
for data_batch, label_batch in train_loader:
optimizer.zero_grad()
out = model(data_batch)
loss = loss_function(out, label_batch)
loss.backward()
optimizer.step()
if (num + 1) % 50 == 0:
for data_batch, label_batch in validation_loader:
out = model(data_batch)
predicted = torch.max(out.data, 1)
result = int(100 * torch.sum(label_batch == predicted[1]) / len(validation_dataset))
print(f"validation {num + 1} : {result}")
# 7 Test Model
# model.load_state_dict(torch.load('model_weights.pth'))
for data_batch, label_batch in test_loader:
out = model(data_batch)
predicted = torch.max(out.data, 1)
result = int(100 * torch.sum(label_batch == predicted[1]) / len(test_dataset))
print(f"test : {result}")
# torch.save(model.state_dict(), 'model_weights.pth')