Skip to content

Commit 88577d7

Browse files
darabossoumith
authored andcommitted
Remove unused hidden state. (pytorch#435)
1 parent 068d57a commit 88577d7

File tree

1 file changed

+1
-15
lines changed

1 file changed

+1
-15
lines changed

beginner_source/nlp/sequence_models_tutorial.py

+1-15
Original file line numberDiff line numberDiff line change
@@ -161,20 +161,10 @@ def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
161161

162162
# The linear layer that maps from hidden state space to tag space
163163
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
164-
self.hidden = self.init_hidden()
165-
166-
def init_hidden(self):
167-
# Before we've done anything, we dont have any hidden state.
168-
# Refer to the Pytorch documentation to see exactly
169-
# why they have this dimensionality.
170-
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
171-
return (torch.zeros(1, 1, self.hidden_dim),
172-
torch.zeros(1, 1, self.hidden_dim))
173164

174165
def forward(self, sentence):
175166
embeds = self.word_embeddings(sentence)
176-
lstm_out, self.hidden = self.lstm(
177-
embeds.view(len(sentence), 1, -1), self.hidden)
167+
lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
178168
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
179169
tag_scores = F.log_softmax(tag_space, dim=1)
180170
return tag_scores
@@ -201,10 +191,6 @@ def forward(self, sentence):
201191
# We need to clear them out before each instance
202192
model.zero_grad()
203193

204-
# Also, we need to clear out the hidden state of the LSTM,
205-
# detaching it from its history on the last instance.
206-
model.hidden = model.init_hidden()
207-
208194
# Step 2. Get our inputs ready for the network, that is, turn them into
209195
# Tensors of word indices.
210196
sentence_in = prepare_sequence(sentence, word_to_ix)

0 commit comments

Comments
 (0)