@@ -161,20 +161,10 @@ def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
161
161
162
162
# The linear layer that maps from hidden state space to tag space
163
163
self .hidden2tag = nn .Linear (hidden_dim , tagset_size )
164
- self .hidden = self .init_hidden ()
165
-
166
- def init_hidden (self ):
167
- # Before we've done anything, we dont have any hidden state.
168
- # Refer to the Pytorch documentation to see exactly
169
- # why they have this dimensionality.
170
- # The axes semantics are (num_layers, minibatch_size, hidden_dim)
171
- return (torch .zeros (1 , 1 , self .hidden_dim ),
172
- torch .zeros (1 , 1 , self .hidden_dim ))
173
164
174
165
def forward (self , sentence ):
175
166
embeds = self .word_embeddings (sentence )
176
- lstm_out , self .hidden = self .lstm (
177
- embeds .view (len (sentence ), 1 , - 1 ), self .hidden )
167
+ lstm_out , _ = self .lstm (embeds .view (len (sentence ), 1 , - 1 ))
178
168
tag_space = self .hidden2tag (lstm_out .view (len (sentence ), - 1 ))
179
169
tag_scores = F .log_softmax (tag_space , dim = 1 )
180
170
return tag_scores
@@ -201,10 +191,6 @@ def forward(self, sentence):
201
191
# We need to clear them out before each instance
202
192
model .zero_grad ()
203
193
204
- # Also, we need to clear out the hidden state of the LSTM,
205
- # detaching it from its history on the last instance.
206
- model .hidden = model .init_hidden ()
207
-
208
194
# Step 2. Get our inputs ready for the network, that is, turn them into
209
195
# Tensors of word indices.
210
196
sentence_in = prepare_sequence (sentence , word_to_ix )
0 commit comments