Skip to content

Commit 705a883

Browse files
hollichsasank
authored andcommitted
removed references to layers as they were not working correctly (pytorch#195)
1 parent 7b35173 commit 705a883

File tree

1 file changed

+10
-16
lines changed

1 file changed

+10
-16
lines changed

intermediate_source/seq2seq_translation_tutorial.py

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -336,9 +336,8 @@ def prepareData(lang1, lang2, reverse=False):
336336
#
337337

338338
class EncoderRNN(nn.Module):
339-
def __init__(self, input_size, hidden_size, n_layers=1):
339+
def __init__(self, input_size, hidden_size):
340340
super(EncoderRNN, self).__init__()
341-
self.n_layers = n_layers
342341
self.hidden_size = hidden_size
343342

344343
self.embedding = nn.Embedding(input_size, hidden_size)
@@ -347,8 +346,7 @@ def __init__(self, input_size, hidden_size, n_layers=1):
347346
def forward(self, input, hidden):
348347
embedded = self.embedding(input).view(1, 1, -1)
349348
output = embedded
350-
for i in range(self.n_layers):
351-
output, hidden = self.gru(output, hidden)
349+
output, hidden = self.gru(output, hidden)
352350
return output, hidden
353351

354352
def initHidden(self):
@@ -387,9 +385,8 @@ def initHidden(self):
387385
#
388386

389387
class DecoderRNN(nn.Module):
390-
def __init__(self, hidden_size, output_size, n_layers=1):
388+
def __init__(self, hidden_size, output_size):
391389
super(DecoderRNN, self).__init__()
392-
self.n_layers = n_layers
393390
self.hidden_size = hidden_size
394391

395392
self.embedding = nn.Embedding(output_size, hidden_size)
@@ -399,9 +396,8 @@ def __init__(self, hidden_size, output_size, n_layers=1):
399396

400397
def forward(self, input, hidden):
401398
output = self.embedding(input).view(1, 1, -1)
402-
for i in range(self.n_layers):
403-
output = F.relu(output)
404-
output, hidden = self.gru(output, hidden)
399+
output = F.relu(output)
400+
output, hidden = self.gru(output, hidden)
405401
output = self.softmax(self.out(output[0]))
406402
return output, hidden
407403

@@ -451,11 +447,10 @@ def initHidden(self):
451447
#
452448

453449
class AttnDecoderRNN(nn.Module):
454-
def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH):
450+
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
455451
super(AttnDecoderRNN, self).__init__()
456452
self.hidden_size = hidden_size
457453
self.output_size = output_size
458-
self.n_layers = n_layers
459454
self.dropout_p = dropout_p
460455
self.max_length = max_length
461456

@@ -478,9 +473,8 @@ def forward(self, input, hidden, encoder_outputs):
478473
output = torch.cat((embedded[0], attn_applied[0]), 1)
479474
output = self.attn_combine(output).unsqueeze(0)
480475

481-
for i in range(self.n_layers):
482-
output = F.relu(output)
483-
output, hidden = self.gru(output, hidden)
476+
output = F.relu(output)
477+
output, hidden = self.gru(output, hidden)
484478

485479
output = F.log_softmax(self.out(output[0]), dim=1)
486480
return output, hidden, attn_weights
@@ -798,8 +792,8 @@ def evaluateRandomly(encoder, decoder, n=10):
798792

799793
hidden_size = 256
800794
encoder1 = EncoderRNN(input_lang.n_words, hidden_size)
801-
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words,
802-
1, dropout_p=0.1)
795+
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1)
796+
803797

804798
if use_cuda:
805799
encoder1 = encoder1.cuda()

0 commit comments

Comments
 (0)