@@ -20,12 +20,11 @@ def to_np(x):
20
20
21
21
22
22
# 下载训练集 MNIST 手写数字训练集
23
- train_dataset = datasets .MNIST (root = './data' , train = True ,
24
- transform = transforms .ToTensor (),
25
- download = True )
23
+ train_dataset = datasets .MNIST (
24
+ root = './data' , train = True , transform = transforms .ToTensor (), download = True )
26
25
27
- test_dataset = datasets .MNIST (root = './data' , train = False ,
28
- transform = transforms .ToTensor ())
26
+ test_dataset = datasets .MNIST (
27
+ root = './data' , train = False , transform = transforms .ToTensor ())
29
28
30
29
train_loader = DataLoader (train_dataset , batch_size = batch_size , shuffle = True )
31
30
test_loader = DataLoader (test_dataset , batch_size = batch_size , shuffle = False )
@@ -41,14 +40,10 @@ def __init__(self, in_dim, n_class):
41
40
nn .MaxPool2d (2 , 2 ),
42
41
nn .Conv2d (6 , 16 , 5 , stride = 1 , padding = 0 ),
43
42
nn .ReLU (True ),
44
- nn .MaxPool2d (2 , 2 ),
45
- )
43
+ nn .MaxPool2d (2 , 2 ), )
46
44
47
45
self .fc = nn .Sequential (
48
- nn .Linear (400 , 120 ),
49
- nn .Linear (120 , 84 ),
50
- nn .Linear (84 , n_class )
51
- )
46
+ nn .Linear (400 , 120 ), nn .Linear (120 , 84 ), nn .Linear (84 , n_class ))
52
47
53
48
def forward (self , x ):
54
49
out = self .conv (x )
@@ -67,18 +62,17 @@ def forward(self, x):
67
62
logger = Logger ('./logs' )
68
63
# 开始训练
69
64
for epoch in range (num_epoches ):
70
- print ('epoch {}' .format (epoch + 1 ))
71
- print ('*' * 10 )
65
+ print ('epoch {}' .format (epoch + 1 ))
66
+ print ('*' * 10 )
72
67
running_loss = 0.0
73
68
running_acc = 0.0
74
69
for i , data in enumerate (train_loader , 1 ):
75
70
img , label = data
76
71
if use_gpu :
77
- img = Variable (img ).cuda ()
78
- label = Variable (label ).cuda ()
79
- else :
80
- img = Variable (img )
81
- label = Variable (label )
72
+ img = img .cuda ()
73
+ label = label .cuda ()
74
+ img = Variable (img )
75
+ label = Variable (label )
82
76
# 向前传播
83
77
out = model (img )
84
78
loss = criterion (out , label )
@@ -94,10 +88,7 @@ def forward(self, x):
94
88
# ========================= Log ======================
95
89
step = epoch * len (train_loader ) + i
96
90
# (1) Log the scalar values
97
- info = {
98
- 'loss' : loss .data [0 ],
99
- 'accuracy' : accuracy .data [0 ]
100
- }
91
+ info = {'loss' : loss .data [0 ], 'accuracy' : accuracy .data [0 ]}
101
92
102
93
for tag , value in info .items ():
103
94
logger .scalar_summary (tag , value , step )
@@ -106,26 +97,20 @@ def forward(self, x):
106
97
for tag , value in model .named_parameters ():
107
98
tag = tag .replace ('.' , '/' )
108
99
logger .histo_summary (tag , to_np (value ), step )
109
- logger .histo_summary (tag + '/grad' , to_np (value .grad ), step )
100
+ logger .histo_summary (tag + '/grad' , to_np (value .grad ), step )
110
101
111
102
# (3) Log the images
112
- info = {
113
- 'images' : to_np (img .view (- 1 , 28 , 28 )[:10 ])
114
- }
103
+ info = {'images' : to_np (img .view (- 1 , 28 , 28 )[:10 ])}
115
104
116
105
for tag , images in info .items ():
117
106
logger .image_summary (tag , images , step )
118
107
if i % 300 == 0 :
119
108
print ('[{}/{}] Loss: {:.6f}, Acc: {:.6f}' .format (
120
- epoch + 1 , num_epoches ,
121
- running_loss / (batch_size * i ),
122
- running_acc / (batch_size * i )
123
- ))
109
+ epoch + 1 , num_epoches , running_loss / (batch_size * i ),
110
+ running_acc / (batch_size * i )))
124
111
print ('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}' .format (
125
- epoch + 1 ,
126
- running_loss / (len (train_dataset )),
127
- running_acc / (len (train_dataset ))
128
- ))
112
+ epoch + 1 , running_loss / (len (train_dataset )), running_acc / (len (
113
+ train_dataset ))))
129
114
model .eval ()
130
115
eval_loss = 0
131
116
eval_acc = 0
@@ -135,18 +120,16 @@ def forward(self, x):
135
120
img = Variable (img , volatile = True ).cuda ()
136
121
label = Variable (label , volatile = True ).cuda ()
137
122
else :
138
- img = Variabel (img , volatile = True )
123
+ img = Variable (img , volatile = True )
139
124
label = Variable (label , volatile = True )
140
125
out = model (img )
141
126
loss = criterion (out , label )
142
- eval_loss += loss .data [0 ]* label .size (0 )
127
+ eval_loss += loss .data [0 ] * label .size (0 )
143
128
_ , pred = torch .max (out , 1 )
144
129
num_correct = (pred == label ).sum ()
145
130
eval_acc += num_correct .data [0 ]
146
- print ('Test Loss: {:.6f}, Acc: {:.6f}' .format (
147
- eval_loss / (len (test_dataset )),
148
- eval_acc / (len (test_dataset ))
149
- ))
131
+ print ('Test Loss: {:.6f}, Acc: {:.6f}' .format (eval_loss / (len (
132
+ test_dataset )), eval_acc / (len (test_dataset ))))
150
133
print ()
151
134
152
135
# 保存模型
0 commit comments