Skip to content

Commit c3ea8f3

Browse files
committed
make some variable names more accurate
epoch -> iter, remove comment about loss init
1 parent 7b148a6 commit c3ea8f3

File tree

3 files changed

+33
-34
lines changed

3 files changed

+33
-34
lines changed

intermediate_source/char_rnn_classification_tutorial.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -274,15 +274,15 @@ def categoryFromOutput(output):
274274
def randomChoice(l):
275275
return l[random.randint(0, len(l) - 1)]
276276

277-
def randomTrainingPair():
277+
def randomTrainingExample():
278278
category = randomChoice(all_categories)
279279
line = randomChoice(category_lines[category])
280280
category_tensor = Variable(torch.LongTensor([all_categories.index(category)]))
281281
line_tensor = Variable(lineToTensor(line))
282282
return category, line, category_tensor, line_tensor
283283

284284
for i in range(10):
285-
category, line, category_tensor, line_tensor = randomTrainingPair()
285+
category, line, category_tensor, line_tensor = randomTrainingExample()
286286
print('category =', category, '/ line =', line)
287287

288288

@@ -338,14 +338,14 @@ def train(category_tensor, line_tensor):
338338
# Now we just have to run that with a bunch of examples. Since the
339339
# ``train`` function returns both the output and loss we can print its
340340
# guesses and also keep track of loss for plotting. Since there are 1000s
341-
# of examples we print only every ``print_every`` time steps, and take an
341+
# of examples we print only every ``print_every`` examples, and take an
342342
# average of the loss.
343343
#
344344

345345
import time
346346
import math
347347

348-
n_epochs = 100000
348+
n_iters = 100000
349349
print_every = 5000
350350
plot_every = 1000
351351

@@ -364,19 +364,19 @@ def timeSince(since):
364364

365365
start = time.time()
366366

367-
for epoch in range(1, n_epochs + 1):
368-
category, line, category_tensor, line_tensor = randomTrainingPair()
367+
for iter in range(1, n_iters + 1):
368+
category, line, category_tensor, line_tensor = randomTrainingExample()
369369
output, loss = train(category_tensor, line_tensor)
370370
current_loss += loss
371371

372-
# Print epoch number, loss, name and guess
373-
if epoch % print_every == 0:
372+
# Print iter number, loss, name and guess
373+
if iter % print_every == 0:
374374
guess, guess_i = categoryFromOutput(output)
375375
correct = '✓' if guess == category else '✗ (%s)' % category
376-
print('%d %d%% (%s) %.4f %s / %s %s' % (epoch, epoch / n_epochs * 100, timeSince(start), loss, line, guess, correct))
376+
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
377377

378378
# Add current loss avg to list of losses
379-
if epoch % plot_every == 0:
379+
if iter % plot_every == 0:
380380
all_losses.append(current_loss / plot_every)
381381
current_loss = 0
382382

@@ -422,7 +422,7 @@ def evaluate(line_tensor):
422422

423423
# Go through a bunch of examples and record which are correctly guessed
424424
for i in range(n_confusion):
425-
category, line, category_tensor, line_tensor = randomTrainingPair()
425+
category, line, category_tensor, line_tensor = randomTrainingExample()
426426
output = evaluate(line_tensor)
427427
guess, guess_i = categoryFromOutput(output)
428428
category_i = all_categories.index(category)

intermediate_source/char_rnn_generation_tutorial.py

+11-12
Original file line numberDiff line numberDiff line change
@@ -236,13 +236,13 @@ def targetTensor(line):
236236

237237

238238
######################################################################
239-
# For convenience during training we'll make a ``randomTrainingSet``
239+
# For convenience during training we'll make a ``randomTrainingExample``
240240
# function that fetches a random (category, line) pair and turns them into
241241
# the required (category, input, target) tensors.
242242
#
243243

244244
# Make category, input, and target tensors from a random category, line pair
245-
def randomTrainingSet():
245+
def randomTrainingExample():
246246
category, line = randomTrainingPair()
247247
category_tensor = Variable(categoryTensor(category))
248248
input_line_tensor = Variable(inputTensor(line))
@@ -259,8 +259,7 @@ def randomTrainingSet():
259259
# every step.
260260
#
261261
# The magic of autograd allows you to simply sum these losses at each step
262-
# and call backward at the end. But don't ask me why initializing loss
263-
# with 0 works.
262+
# and call backward at the end.
264263
#
265264

266265
criterion = nn.NLLLoss()
@@ -305,28 +304,28 @@ def timeSince(since):
305304
######################################################################
306305
# Training is business as usual - call train a bunch of times and wait a
307306
# few minutes, printing the current time and loss every ``print_every``
308-
# epochs, and keeping store of an average loss per ``plot_every`` epochs
307+
# examples, and keeping store of an average loss per ``plot_every`` examples
309308
# in ``all_losses`` for plotting later.
310309
#
311310

312311
rnn = RNN(n_letters, 128, n_letters)
313312

314-
n_epochs = 100000
313+
n_iters = 100000
315314
print_every = 5000
316315
plot_every = 500
317316
all_losses = []
318-
total_loss = 0 # Reset every plot_every epochs
317+
total_loss = 0 # Reset every plot_every iters
319318

320319
start = time.time()
321320

322-
for epoch in range(1, n_epochs + 1):
323-
output, loss = train(*randomTrainingSet())
321+
for iter in range(1, n_iters + 1):
322+
output, loss = train(*randomTrainingExample())
324323
total_loss += loss
325324

326-
if epoch % print_every == 0:
327-
print('%s (%d %d%%) %.4f' % (timeSince(start), epoch, epoch / n_epochs * 100, loss))
325+
if iter % print_every == 0:
326+
print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss))
328327

329-
if epoch % plot_every == 0:
328+
if iter % plot_every == 0:
330329
all_losses.append(total_loss / plot_every)
331330
total_loss = 0
332331

intermediate_source/seq2seq_translation_tutorial.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -650,10 +650,10 @@ def timeSince(since, percent):
650650
# - Start empty losses array for plotting
651651
#
652652
# Then we call ``train`` many times and occasionally print the progress (%
653-
# of epochs, time so far, estimated time) and average loss.
653+
# of examples, time so far, estimated time) and average loss.
654654
#
655655

656-
def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100, learning_rate=0.01):
656+
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
657657
start = time.time()
658658
plot_losses = []
659659
print_loss_total = 0 # Reset every print_every
@@ -662,11 +662,11 @@ def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100, le
662662
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
663663
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
664664
training_pairs = [variablesFromPair(random.choice(pairs))
665-
for i in range(n_epochs)]
665+
for i in range(n_iters)]
666666
criterion = nn.NLLLoss()
667667

668-
for epoch in range(1, n_epochs + 1):
669-
training_pair = training_pairs[epoch - 1]
668+
for iter in range(1, n_iters + 1):
669+
training_pair = training_pairs[iter - 1]
670670
input_variable = training_pair[0]
671671
target_variable = training_pair[1]
672672

@@ -675,13 +675,13 @@ def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100, le
675675
print_loss_total += loss
676676
plot_loss_total += loss
677677

678-
if epoch % print_every == 0:
678+
if iter % print_every == 0:
679679
print_loss_avg = print_loss_total / print_every
680680
print_loss_total = 0
681-
print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),
682-
epoch, epoch / n_epochs * 100, print_loss_avg))
681+
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
682+
iter, iter / n_iters * 100, print_loss_avg))
683683

684-
if epoch % plot_every == 0:
684+
if iter % plot_every == 0:
685685
plot_loss_avg = plot_loss_total / plot_every
686686
plot_losses.append(plot_loss_avg)
687687
plot_loss_total = 0
@@ -793,7 +793,7 @@ def evaluateRandomly(encoder, decoder, n=10):
793793
# .. Note::
794794
# If you run this notebook you can train, interrupt the kernel,
795795
# evaluate, and continue training later. Comment out the lines where the
796-
# encoder and decoder are initialized and run ``trainEpochs`` again.
796+
# encoder and decoder are initialized and run ``trainIters`` again.
797797
#
798798

799799
hidden_size = 256
@@ -805,7 +805,7 @@ def evaluateRandomly(encoder, decoder, n=10):
805805
encoder1 = encoder1.cuda()
806806
attn_decoder1 = attn_decoder1.cuda()
807807

808-
trainEpochs(encoder1, attn_decoder1, 75000, print_every=5000)
808+
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
809809

810810
######################################################################
811811
#

0 commit comments

Comments
 (0)