@@ -336,9 +336,8 @@ def prepareData(lang1, lang2, reverse=False):
336
336
#
337
337
338
338
class EncoderRNN (nn .Module ):
339
- def __init__ (self , input_size , hidden_size , n_layers = 1 ):
339
+ def __init__ (self , input_size , hidden_size ):
340
340
super (EncoderRNN , self ).__init__ ()
341
- self .n_layers = n_layers
342
341
self .hidden_size = hidden_size
343
342
344
343
self .embedding = nn .Embedding (input_size , hidden_size )
@@ -347,8 +346,7 @@ def __init__(self, input_size, hidden_size, n_layers=1):
347
346
def forward (self , input , hidden ):
348
347
embedded = self .embedding (input ).view (1 , 1 , - 1 )
349
348
output = embedded
350
- for i in range (self .n_layers ):
351
- output , hidden = self .gru (output , hidden )
349
+ output , hidden = self .gru (output , hidden )
352
350
return output , hidden
353
351
354
352
def initHidden (self ):
@@ -387,9 +385,8 @@ def initHidden(self):
387
385
#
388
386
389
387
class DecoderRNN (nn .Module ):
390
- def __init__ (self , hidden_size , output_size , n_layers = 1 ):
388
+ def __init__ (self , hidden_size , output_size ):
391
389
super (DecoderRNN , self ).__init__ ()
392
- self .n_layers = n_layers
393
390
self .hidden_size = hidden_size
394
391
395
392
self .embedding = nn .Embedding (output_size , hidden_size )
@@ -399,9 +396,8 @@ def __init__(self, hidden_size, output_size, n_layers=1):
399
396
400
397
def forward (self , input , hidden ):
401
398
output = self .embedding (input ).view (1 , 1 , - 1 )
402
- for i in range (self .n_layers ):
403
- output = F .relu (output )
404
- output , hidden = self .gru (output , hidden )
399
+ output = F .relu (output )
400
+ output , hidden = self .gru (output , hidden )
405
401
output = self .softmax (self .out (output [0 ]))
406
402
return output , hidden
407
403
@@ -451,11 +447,10 @@ def initHidden(self):
451
447
#
452
448
453
449
class AttnDecoderRNN (nn .Module ):
454
- def __init__ (self , hidden_size , output_size , n_layers = 1 , dropout_p = 0.1 , max_length = MAX_LENGTH ):
450
+ def __init__ (self , hidden_size , output_size , dropout_p = 0.1 , max_length = MAX_LENGTH ):
455
451
super (AttnDecoderRNN , self ).__init__ ()
456
452
self .hidden_size = hidden_size
457
453
self .output_size = output_size
458
- self .n_layers = n_layers
459
454
self .dropout_p = dropout_p
460
455
self .max_length = max_length
461
456
@@ -478,9 +473,8 @@ def forward(self, input, hidden, encoder_outputs):
478
473
output = torch .cat ((embedded [0 ], attn_applied [0 ]), 1 )
479
474
output = self .attn_combine (output ).unsqueeze (0 )
480
475
481
- for i in range (self .n_layers ):
482
- output = F .relu (output )
483
- output , hidden = self .gru (output , hidden )
476
+ output = F .relu (output )
477
+ output , hidden = self .gru (output , hidden )
484
478
485
479
output = F .log_softmax (self .out (output [0 ]), dim = 1 )
486
480
return output , hidden , attn_weights
@@ -798,8 +792,8 @@ def evaluateRandomly(encoder, decoder, n=10):
798
792
799
793
hidden_size = 256
800
794
encoder1 = EncoderRNN (input_lang .n_words , hidden_size )
801
- attn_decoder1 = AttnDecoderRNN (hidden_size , output_lang .n_words ,
802
- 1 , dropout_p = 0.1 )
795
+ attn_decoder1 = AttnDecoderRNN (hidden_size , output_lang .n_words , dropout_p = 0.1 )
796
+
803
797
804
798
if use_cuda :
805
799
encoder1 = encoder1 .cuda ()
0 commit comments