Skip to content

Commit 345b439

Browse files
committed
No.2 please
1 parent 1709887 commit 345b439

File tree

12 files changed

+620
-1
lines changed

12 files changed

+620
-1
lines changed

.gitignore

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
11
Chapter03/cifar10/
22
Chapter03/save/
3-
Chapter03/runs/
3+
Chapter03/runs/
4+
5+
Chapter04/__pycache__/
6+
Chapter04/dataset/
7+
Chapter04/final_result/
8+
Chapter04/runs/
9+
Chapter04/save/
10+
*.pyc

Chapter04/config/config.yaml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# config 파일에 대하여
2+
# config 파일에는
3+
# 딥러닝 모델
4+
# 손실 함수
5+
# 옵티마이저
6+
# 하이퍼파라미터
7+
# 에폭
8+
# 등 모든 것들이 담겨져 있다.
9+
# 뭔가 모델을 바꾸거나 에폭을 바꾸고 싶을 때는 여기서 해당 이름에 대응하는
10+
# 값만 조정하면 된다.
11+
12+
# 단, 모델, 손실 함수, 옵티마이저의 경우
13+
# 내가 원하는 것의 이름으로 변경 해당 모델, 손실함수, 옵티마이저 파일에
14+
# 정의되어 있어야 한다.
15+
16+
---
17+
18+
use_cuda: true
19+
epoch: 100
20+
train_batch_size: 64
21+
test_batch_size: 64
22+
learning_rate: 0.001
23+
dataset_name: "CIFAR10"
24+
is_trained: false
25+
26+
# train_dataset
27+
# test_dataset
28+
# 은 CIFAR10 Dataloader에서만 작업할 것이므로 pass
29+
30+
num_workers: 2
31+
train_dataset_shuffle: True
32+
test_dataset_shuffle: False
33+
data_loader_name: 'data_load_only_normalizing'
34+
35+
model: "VGG11"
36+
loss: "CrossEntropyLoss"
37+
optimizer: "Adam"
38+
scheduler: 'ExponentialLR'
39+
momentum: 0.9
40+
weight_decay : 0.01
41+
metrics: "accuracy_score"
42+
43+
44+
VGG_types: {
45+
'VGG11' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
46+
'VGG13' : [64,64, 'M', 128, 128, 'M', 256, 256, 'M', 512,512, 'M', 512,512,'M'],
47+
'VGG16' : [64,64, 'M', 128, 128, 'M', 256, 256,256, 'M', 512,512,512, 'M',512,512,512,'M'],
48+
'VGG19' : [64,64, 'M', 128, 128, 'M', 256, 256,256,256, 'M', 512,512,512,512, 'M',512,512,512,512,'M']
49+
}
50+
51+
52+
53+
# momentum이랑 weight_decay 정의하기
54+

Chapter04/predict_images.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import torch
2+
import yaml
3+
import src.models.model as vgg
4+
import src.dataloader.dataloader as dataloader
5+
import os
6+
from torch.utils.tensorboard import SummaryWriter
7+
import torchvision.utils
8+
def predict(model, test_loader):
9+
model.eval()
10+
11+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12+
13+
# get classes
14+
train_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/train/'
15+
classes = os.listdir(train_dir)
16+
17+
# get tensorboard
18+
writer = SummaryWriter('final_result/test_01')
19+
20+
21+
with torch.no_grad(): # 그래디언트를 구하지 않겠다 = 가중치값을 변화하지 않겠다
22+
dataiter = iter(test_loader)
23+
images, labels = dataiter.next()
24+
25+
images, labels = images.to(DEVICE), labels.to(DEVICE)
26+
outputs = model(images)
27+
28+
_ , index = torch.max(outputs.data, 1)
29+
img_grid = torchvision.utils.make_grid(images)
30+
31+
for i in range(len(index.tolist())):
32+
result = classes[index[i]]
33+
writer.add_image('The predicted result is ' + result, img_grid)
34+
35+
36+
if __name__ == '__main__':
37+
38+
# get config file
39+
print("Get config file...")
40+
with open("config/config.yaml", 'r', encoding = 'utf-8') as stream:
41+
try:
42+
config = yaml.safe_load(stream) # return into Dict
43+
except yaml.YAMLError as exc:
44+
print(exc)
45+
46+
47+
# load data
48+
print("Loading data...")
49+
train_data, valid_data, test_data = dataloader.get_data()
50+
51+
# Use cuda
52+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
53+
54+
# create learned model
55+
print("Creating learned model...")
56+
model = vgg.get_VGG(config).to(DEVICE)
57+
58+
# load weights >> 디렉토리에서 가중치 파일 불러오기
59+
model.load_state_dict(torch.load('save/VGG11/2022-03-24_13:32:10/saved_weights_297'))
60+
61+
predict(model, test_data)
62+
63+
64+
65+
66+
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import os
2+
from torchvision import datasets, transforms, models
3+
from torchvision.datasets import ImageFolder
4+
from torch.utils.data import DataLoader
5+
6+
import numpy as np
7+
from PIL import Image
8+
# 전체 과일 종류 개수 출력하기
9+
# file_list = os.listdir(path + 'train')
10+
# print('total number of type of fruits:',len(file_list))
11+
12+
13+
def get_data():
14+
train_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/train/'
15+
val_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/validation'
16+
test_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/test'
17+
18+
# 과일 이름을 담은 리스트
19+
classes = os.listdir(train_dir)
20+
# print(classes)
21+
22+
train_transform = transforms.Compose([
23+
transforms.RandomRotation(10), # +/- 10 degrees
24+
transforms.RandomHorizontalFlip(), # reverse 50% of images -> 위아로 filp은 X
25+
transforms.Resize(40), # (40, 40)
26+
transforms.CenterCrop(40), #(40, 40)
27+
transforms.ToTensor(), # 텐서로 변환
28+
transforms.Normalize(mean = [0.5, 0.5, 0.5], \
29+
std = [0.5, 0.5, 0.5]) # mu와 std는 나중에 구해보기
30+
])
31+
32+
train_set = ImageFolder(train_dir, transform = train_transform)
33+
valid_set = ImageFolder(val_dir, transform = train_transform)
34+
test_set = ImageFolder(test_dir, transform = train_transform)
35+
36+
# Train, Valid, Test
37+
# num_data = [len(train_set), len(valid_set), len(test_set)]
38+
# print(num_data)
39+
print(type(train_set))
40+
print(type(valid_set))
41+
42+
train_loader = DataLoader(train_set, batch_size = 64, num_workers = 2, shuffle = True)
43+
valid_loader = DataLoader(valid_set, batch_size = 64, num_workers = 2, shuffle = True)
44+
test_loader = DataLoader(test_set, batch_size = 64, num_workers = 2, shuffle = False)
45+
46+
return train_loader, valid_loader, test_loader
47+
48+
49+
if __name__ == '__main__':
50+
# 전체 과일 종류 개수 출력하기
51+
file_list = os.listdir('/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/train/')
52+
print('total number of type of fruits:',len(file_list))
53+
54+
a = get_data()
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import os
2+
from torchvision.datasets import ImageFolder
3+
from torchvision import transforms
4+
5+
train_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/train/'
6+
val_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/validation'
7+
test_dir = '/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/dataset/test'
8+
9+
# 과일 이름을 담은 리스트
10+
classes = os.listdir(train_dir)
11+
# print(classes)
12+
13+
train_transform = transforms.Compose([
14+
transforms.RandomRotation(10), # +/- 10 degrees
15+
transforms.RandomHorizontalFlip(), # reverse 50% of images -> 위아로 filp은 X
16+
transforms.Resize(40), # (40, 40)
17+
transforms.CenterCrop(40), #(40, 40)
18+
transforms.ToTensor(), # 텐서로 변환
19+
transforms.Normalize(mean = [0.5, 0.5, 0.5], \
20+
std = [0.5, 0.5, 0.5]) # mu와 std는 나중에 구해보기
21+
])
22+
23+
train_set = ImageFolder(train_dir, transform = train_transform)
24+
valid_set = ImageFolder(val_dir, transform = train_transform)
25+
test_set = ImageFolder(test_dir, transform = train_transform)
26+
27+
# Train, Valid, Test
28+
num_data = [len(train_set), len(valid_set), len(test_set)]
29+
print(num_data)
30+
print(type(train_set))
31+
print(type(valid_set))

Chapter04/src/metrics/metrics.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, confusion_matrix
2+
3+
4+
def get_metrics(targets, predicted, config):
5+
name = config['metrics']
6+
if name == "accuracy_score":
7+
return accuracy_score(targets, predicted)
8+
elif name == "recall_score":
9+
return recall_score(targets, predicted)
10+
elif name == "precision_score":
11+
return precision_score(targets, predicted)
12+
elif name == "f1_score":
13+
return f1_score(targets, predicted)
14+
else:
15+
print("There is no metrics in metrics_name")
16+
17+
18+
def get_confusion_metric(targets, predicted):
19+
return confusion_matrix(targets, predicted)
20+
21+
def get_recall_score(targets, predicted):
22+
return recall_score(targets, predicted)
23+
24+
def get_precision_score(targets, predicted):
25+
return precision_score(targets, predicted)
26+

Chapter04/src/models/model.py

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
import yaml
5+
6+
def get_VGG(config):
7+
name = config['model']
8+
model_list = config['VGG_types']
9+
10+
if name == 'VGG11':
11+
return VGGnet(model_list[name])
12+
elif name == 'VGG13':
13+
return VGGnet(model_list[name])
14+
elif name == 'VGG16':
15+
return VGGnet(model_list[name])
16+
elif name == 'VGG19':
17+
return VGGnet(model_list[name])
18+
else:
19+
print("There is no name in models")
20+
21+
22+
23+
class VGGnet(nn.Module):
24+
def __init__(self, model, in_channels = 3, num_classes = 36, init_weights = True):
25+
super(VGGnet, self).__init__()
26+
self.in_channels = in_channels
27+
28+
self.conv_layers = self.create_conv_layers(model)
29+
30+
self.fcs = nn.Sequential(
31+
nn.Linear(512, 4096),
32+
nn.ReLU(),
33+
nn.Dropout(p = 0.5),
34+
nn.Linear(4096, 4096),
35+
nn.ReLU(),
36+
nn.Dropout(p = 0.5),
37+
nn.Linear(4096, num_classes)
38+
)
39+
40+
if init_weights:
41+
self._initialize_weights()
42+
43+
44+
def forward(self, x):
45+
x = self.conv_layers(x)
46+
x = x.view(-1, 512)
47+
x = self.fcs(x)
48+
return x
49+
50+
51+
def _initialize_weights(self):
52+
for m in self.modules():
53+
if isinstance(m, nn.Conv2d):
54+
nn.init.kaiming_normal_(m.weight, mode = 'fan_out', nonlinearity='relu')
55+
if m.bias is not None:
56+
nn.init.constant_(m.bias, 0)
57+
elif isinstance(m, nn.BatchNorm2d):
58+
nn.init.constant_(m.weight, 1)
59+
nn.init.constant_(m.bias, 0)
60+
elif isinstance(m, nn.Linear):
61+
nn.init.normal_(m.weight, 0, 0.01)
62+
nn.init.constant_(m.bias, 0)
63+
64+
def create_conv_layers(self, architecture):
65+
layers = []
66+
in_channels = self.in_channels
67+
68+
for x in architecture:
69+
if type(x) == int:
70+
out_channels = x
71+
layers += [nn.Conv2d(in_channels = in_channels, out_channels = out_channels,
72+
kernel_size = (3,3), stride = (1,1), padding = (1,1)),
73+
nn.BatchNorm2d(x),
74+
nn.ReLU()]
75+
in_channels = x
76+
elif x == 'M':
77+
layers += [nn.MaxPool2d(kernel_size = (2,2), stride = (2,2))]
78+
79+
return nn.Sequential(*layers)
80+
81+
82+
# Open config file -> quick test
83+
def open_config_file():
84+
with open("/data/Github_Management/StartDeepLearningWithPytorch/Chapter04/config/config.yaml", 'r', encoding = 'utf-8') as stream:
85+
try:
86+
config = yaml.safe_load(stream) # return into Dict
87+
except yaml.YAMLError as exc:
88+
print(exc)
89+
return config['VGG_types']
90+
91+
92+
93+
if __name__ == '__main__':
94+
print('Quick Test...')
95+
96+
models = open_config_file()
97+
model = VGGnet(models['VGG19'])
98+
print(model)
99+
100+
input = torch.zeros([1,3,32,32], dtype = torch.float32)
101+
# model = VGG_19(32, 3)
102+
output = model(input)
103+
104+
print('input_shape: {}, output_size: {}'
105+
.format(input.shape, output.shape))
106+

Chapter04/src/optimizers/loss.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import torch.nn as nn
2+
3+
def get_loss(config, params = None):
4+
name = config['loss']
5+
if name == 'MSELoss':
6+
return nn.MSELoss()
7+
elif name == 'CrossEntropyLoss':
8+
return nn.CrossEntropyLoss()
9+
elif name == 'Softmax':
10+
return nn.Softmax()
11+
else:
12+
print("There is no name in loss")
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import torch.optim as optimizer
2+
3+
4+
def get_optimizer(model_paramter, config):
5+
name = config['optimizer']
6+
7+
if name == 'Adam':
8+
return optimizer.Adam(params = model_paramter,
9+
lr = config['learning_rate'],
10+
weight_decay = config['weight_decay'])
11+
12+
elif name == 'SGD':
13+
return optimizer.SGD(params = model_paramter,
14+
lr = config['learning_rate'],
15+
momentum = config['momentum'],
16+
weight_decay = config['weight_decay'])
17+
18+
else:
19+
print("There is no name in optimizer")

0 commit comments

Comments
 (0)