13
13
num_epoches = 20
14
14
15
15
# 下载训练集 MNIST 手写数字训练集
16
- train_dataset = datasets .MNIST (root = './data' , train = True ,
17
- transform = transforms .ToTensor (),
18
- download = True )
16
+ train_dataset = datasets .MNIST (
17
+ root = './data' , train = True , transform = transforms .ToTensor (), download = True )
19
18
20
- test_dataset = datasets .MNIST (root = './data' , train = False ,
21
- transform = transforms .ToTensor ())
19
+ test_dataset = datasets .MNIST (
20
+ root = './data' , train = False , transform = transforms .ToTensor ())
22
21
23
22
train_loader = DataLoader (train_dataset , batch_size = batch_size , shuffle = True )
24
23
test_loader = DataLoader (test_dataset , batch_size = batch_size , shuffle = False )
@@ -30,15 +29,14 @@ def __init__(self, in_dim, hidden_dim, n_layer, n_class):
30
29
super (Rnn , self ).__init__ ()
31
30
self .n_layer = n_layer
32
31
self .hidden_dim = hidden_dim
33
- self .lstm = nn .LSTM (in_dim , hidden_dim , n_layer ,
34
- batch_first = True )
32
+ self .lstm = nn .LSTM (in_dim , hidden_dim , n_layer , batch_first = True )
35
33
self .classifier = nn .Linear (hidden_dim , n_class )
36
34
37
35
def forward (self , x ):
38
36
# h0 = Variable(torch.zeros(self.n_layer, x.size(1),
39
- # self.hidden_dim)).cuda()
37
+ # self.hidden_dim)).cuda()
40
38
# c0 = Variable(torch.zeros(self.n_layer, x.size(1),
41
- # self.hidden_dim)).cuda()
39
+ # self.hidden_dim)).cuda()
42
40
out , _ = self .lstm (x )
43
41
out = out [:, - 1 , :]
44
42
out = self .classifier (out )
@@ -55,8 +53,8 @@ def forward(self, x):
55
53
56
54
# 开始训练
57
55
for epoch in range (num_epoches ):
58
- print ('epoch {}' .format (epoch + 1 ))
59
- print ('*' * 10 )
56
+ print ('epoch {}' .format (epoch + 1 ))
57
+ print ('*' * 10 )
60
58
running_loss = 0.0
61
59
running_acc = 0.0
62
60
for i , data in enumerate (train_loader , 1 ):
@@ -87,15 +85,11 @@ def forward(self, x):
87
85
88
86
if i % 300 == 0 :
89
87
print ('[{}/{}] Loss: {:.6f}, Acc: {:.6f}' .format (
90
- epoch + 1 , num_epoches ,
91
- running_loss / (batch_size * i ),
92
- running_acc / (batch_size * i )
93
- ))
88
+ epoch + 1 , num_epoches , running_loss / (batch_size * i ),
89
+ running_acc / (batch_size * i )))
94
90
print ('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}' .format (
95
- epoch + 1 ,
96
- running_loss / (len (train_dataset )),
97
- running_acc / (len (train_dataset ))
98
- ))
91
+ epoch + 1 , running_loss / (len (train_dataset )), running_acc / (len (
92
+ train_dataset ))))
99
93
model .eval ()
100
94
eval_loss = 0
101
95
eval_acc = 0
@@ -111,18 +105,16 @@ def forward(self, x):
111
105
img = Variable (img , volatile = True ).cuda ()
112
106
label = Variable (label , volatile = True ).cuda ()
113
107
else :
114
- img = Variabel (img , volatile = True )
108
+ img = Variable (img , volatile = True )
115
109
label = Variable (label , volatile = True )
116
110
out = model (img )
117
111
loss = criterion (out , label )
118
- eval_loss += loss .data [0 ]* label .size (0 )
112
+ eval_loss += loss .data [0 ] * label .size (0 )
119
113
_ , pred = torch .max (out , 1 )
120
114
num_correct = (pred == label ).sum ()
121
115
eval_acc += num_correct .data [0 ]
122
- print ('Test Loss: {:.6f}, Acc: {:.6f}' .format (
123
- eval_loss / (len (test_dataset )),
124
- eval_acc / (len (test_dataset ))
125
- ))
116
+ print ('Test Loss: {:.6f}, Acc: {:.6f}' .format (eval_loss / (len (
117
+ test_dataset )), eval_acc / (len (test_dataset ))))
126
118
print ()
127
119
128
120
# 保存模型
0 commit comments