Skip to content

Commit 13ac098

Browse files
author
Yunfeng Wang
committed
fix some typos
1 parent bc8c77d commit 13ac098

File tree

10 files changed

+16
-16
lines changed

10 files changed

+16
-16
lines changed

examples/2_BasicModels/logistic_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

examples/2_BasicModels/nearest_neighbor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import numpy as np
1313
import tensorflow as tf
1414

15-
# Import MINST data
15+
# Import MNIST data
1616
from tensorflow.examples.tutorials.mnist import input_data
1717
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1818

examples/3_NeuralNetworks/autoencoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import numpy as np
1616
import matplotlib.pyplot as plt
1717

18-
# Import MINST data
18+
# Import MNIST data
1919
from tensorflow.examples.tutorials.mnist import input_data
2020
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
2121

examples/3_NeuralNetworks/bidirectional_rnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
2+
A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
33
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
44
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
55
@@ -13,12 +13,12 @@
1313
from tensorflow.python.ops import rnn, rnn_cell
1414
import numpy as np
1515

16-
# Import MINST data
16+
# Import MNIST data
1717
from tensorflow.examples.tutorials.mnist import input_data
1818
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1919

2020
'''
21-
To classify images using a bidirectional reccurent neural network, we consider
21+
To classify images using a bidirectional recurrent neural network, we consider
2222
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
2323
we will then handle 28 sequences of 28 steps for every sample.
2424
'''
@@ -41,7 +41,7 @@
4141

4242
# Define weights
4343
weights = {
44-
# Hidden layer weights => 2*n_hidden because of foward + backward cells
44+
# Hidden layer weights => 2*n_hidden because of forward + backward cells
4545
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
4646
}
4747
biases = {

examples/3_NeuralNetworks/convolutional_network.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

examples/3_NeuralNetworks/dynamic_rnn.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Dynamic Reccurent Neural Network (LSTM) implementation example using
2+
A Dynamic Recurrent Neural Network (LSTM) implementation example using
33
TensorFlow library. This example is using a toy dataset to classify linear
44
sequences. The generated sequences have variable length.
55
@@ -26,7 +26,7 @@ class ToySequenceData(object):
2626
2727
NOTICE:
2828
We have to pad each sequence to reach 'max_seq_len' for TensorFlow
29-
consistency (we cannot feed a numpy array with unconsistent
29+
consistency (we cannot feed a numpy array with inconsistent
3030
dimensions). The dynamic calculation will then be perform thanks to
3131
'seqlen' attribute that records every actual sequence length.
3232
"""
@@ -130,7 +130,7 @@ def dynamicRNN(x, seqlen, weights, biases):
130130
sequence_length=seqlen)
131131

132132
# When performing dynamic calculation, we must retrieve the last
133-
# dynamically computed output, i.e, if a sequence length is 10, we need
133+
# dynamically computed output, i.e., if a sequence length is 10, we need
134134
# to retrieve the 10th output.
135135
# However TensorFlow doesn't support advanced indexing yet, so we build
136136
# a custom op that for each sample in batch size, get its length and

examples/3_NeuralNetworks/multilayer_perceptron.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
from __future__ import print_function
1111

12-
# Import MINST data
12+
# Import MNIST data
1313
from tensorflow.examples.tutorials.mnist import input_data
1414
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1515

examples/4_Utils/save_restore_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
from __future__ import print_function
1111

12-
# Import MINST data
12+
# Import MNIST data
1313
from tensorflow.examples.tutorials.mnist import input_data
1414
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1515

examples/4_Utils/tensorboard_advanced.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

@@ -64,7 +64,7 @@ def multilayer_perceptron(x, weights, biases):
6464
}
6565

6666
# Encapsulating all ops into scopes, making Tensorboard's Graph
67-
# visualization more convenient
67+
# Visualization more convenient
6868
with tf.name_scope('Model'):
6969
# Build model
7070
pred = multilayer_perceptron(x, weights, biases)

examples/4_Utils/tensorboard_basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

0 commit comments

Comments
 (0)