Skip to content

Commit b039e07

Browse files
author
Yunfeng Wang
committed
Merge branch 'patch-1'
2 parents 2ef1c6c + 13ac098 commit b039e07

14 files changed

+32
-32
lines changed

examples/1_Introduction/helloworld.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
import tensorflow as tf
1111

12-
#Simple hello world using TensorFlow
12+
# Simple hello world using TensorFlow
1313

1414
# Create a Constant op
1515
# The op is added as a node to the default graph.

examples/2_BasicModels/linear_regression.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
for (x, y) in zip(train_X, train_Y):
5353
sess.run(optimizer, feed_dict={X: x, Y: y})
5454

55-
#Display logs per epoch step
55+
# Display logs per epoch step
5656
if (epoch+1) % display_step == 0:
5757
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
5858
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
@@ -62,7 +62,7 @@
6262
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
6363
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
6464

65-
#Graphic display
65+
# Graphic display
6666
plt.plot(train_X, train_Y, 'ro', label='Original data')
6767
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
6868
plt.legend()

examples/2_BasicModels/logistic_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

examples/2_BasicModels/nearest_neighbor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import numpy as np
1313
import tensorflow as tf
1414

15-
# Import MINST data
15+
# Import MNIST data
1616
from tensorflow.examples.tutorials.mnist import input_data
1717
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1818

examples/3_NeuralNetworks/autoencoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import numpy as np
1616
import matplotlib.pyplot as plt
1717

18-
# Import MINST data
18+
# Import MNIST data
1919
from tensorflow.examples.tutorials.mnist import input_data
2020
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
2121

examples/3_NeuralNetworks/bidirectional_rnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
2+
A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
33
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
44
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
55
@@ -13,12 +13,12 @@
1313
from tensorflow.python.ops import rnn, rnn_cell
1414
import numpy as np
1515

16-
# Import MINST data
16+
# Import MNIST data
1717
from tensorflow.examples.tutorials.mnist import input_data
1818
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1919

2020
'''
21-
To classify images using a bidirectional reccurent neural network, we consider
21+
To classify images using a bidirectional recurrent neural network, we consider
2222
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
2323
we will then handle 28 sequences of 28 steps for every sample.
2424
'''
@@ -41,7 +41,7 @@
4141

4242
# Define weights
4343
weights = {
44-
# Hidden layer weights => 2*n_hidden because of foward + backward cells
44+
# Hidden layer weights => 2*n_hidden because of forward + backward cells
4545
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
4646
}
4747
biases = {

examples/3_NeuralNetworks/convolutional_network.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

examples/3_NeuralNetworks/dynamic_rnn.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Dynamic Reccurent Neural Network (LSTM) implementation example using
2+
A Dynamic Recurrent Neural Network (LSTM) implementation example using
33
TensorFlow library. This example is using a toy dataset to classify linear
44
sequences. The generated sequences have variable length.
55
@@ -26,7 +26,7 @@ class ToySequenceData(object):
2626
2727
NOTICE:
2828
We have to pad each sequence to reach 'max_seq_len' for TensorFlow
29-
consistency (we cannot feed a numpy array with unconsistent
29+
consistency (we cannot feed a numpy array with inconsistent
3030
dimensions). The dynamic calculation will then be perform thanks to
3131
'seqlen' attribute that records every actual sequence length.
3232
"""
@@ -130,7 +130,7 @@ def dynamicRNN(x, seqlen, weights, biases):
130130
sequence_length=seqlen)
131131

132132
# When performing dynamic calculation, we must retrieve the last
133-
# dynamically computed output, i.e, if a sequence length is 10, we need
133+
# dynamically computed output, i.e., if a sequence length is 10, we need
134134
# to retrieve the 10th output.
135135
# However TensorFlow doesn't support advanced indexing yet, so we build
136136
# a custom op that for each sample in batch size, get its length and

examples/3_NeuralNetworks/multilayer_perceptron.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
from __future__ import print_function
1111

12-
# Import MINST data
12+
# Import MNIST data
1313
from tensorflow.examples.tutorials.mnist import input_data
1414
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1515

examples/3_NeuralNetworks/recurrent_network.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
2+
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
33
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
44
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
55
@@ -18,7 +18,7 @@
1818
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1919

2020
'''
21-
To classify images using a reccurent neural network, we consider every image
21+
To classify images using a recurrent neural network, we consider every image
2222
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
2323
handle 28 sequences of 28 steps for every sample.
2424
'''

examples/4_Utils/save_restore_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
from __future__ import print_function
1111

12-
# Import MINST data
12+
# Import MNIST data
1313
from tensorflow.examples.tutorials.mnist import input_data
1414
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1515

examples/4_Utils/tensorboard_advanced.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

@@ -64,7 +64,7 @@ def multilayer_perceptron(x, weights, biases):
6464
}
6565

6666
# Encapsulating all ops into scopes, making Tensorboard's Graph
67-
# visualization more convenient
67+
# Visualization more convenient
6868
with tf.name_scope('Model'):
6969
# Build model
7070
pred = multilayer_perceptron(x, weights, biases)

examples/4_Utils/tensorboard_basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import tensorflow as tf
1313

14-
# Import MINST data
14+
# Import MNIST data
1515
from tensorflow.examples.tutorials.mnist import input_data
1616
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
1717

examples/5_MultiGPU/multigpu_basics.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,10 @@
1818
import tensorflow as tf
1919
import datetime
2020

21-
#Processing Units logs
21+
# Processing Units logs
2222
log_device_placement = True
2323

24-
#num of multiplications to perform
24+
# Num of multiplications to perform
2525
n = 10
2626

2727
'''
@@ -30,11 +30,11 @@
3030
* Single GPU computation time: 0:00:11.277449
3131
* Multi GPU computation time: 0:00:07.131701
3232
'''
33-
#Create random large matrix
33+
# Create random large matrix
3434
A = np.random.rand(1e4, 1e4).astype('float32')
3535
B = np.random.rand(1e4, 1e4).astype('float32')
3636

37-
# Creates a graph to store results
37+
# Create a graph to store results
3838
c1 = []
3939
c2 = []
4040

@@ -50,7 +50,7 @@ def matpow(M, n):
5050
with tf.device('/gpu:0'):
5151
a = tf.constant(A)
5252
b = tf.constant(B)
53-
#compute A^n and B^n and store results in c1
53+
# Compute A^n and B^n and store results in c1
5454
c1.append(matpow(a, n))
5555
c1.append(matpow(b, n))
5656

@@ -59,23 +59,23 @@ def matpow(M, n):
5959

6060
t1_1 = datetime.datetime.now()
6161
with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
62-
# Runs the op.
62+
# Run the op.
6363
sess.run(sum)
6464
t2_1 = datetime.datetime.now()
6565

6666

6767
'''
6868
Multi GPU computing
6969
'''
70-
#GPU:0 computes A^n
70+
# GPU:0 computes A^n
7171
with tf.device('/gpu:0'):
72-
#compute A^n and store result in c2
72+
# Compute A^n and store result in c2
7373
a = tf.constant(A)
7474
c2.append(matpow(a, n))
7575

76-
#GPU:1 computes B^n
76+
# GPU:1 computes B^n
7777
with tf.device('/gpu:1'):
78-
#compute B^n and store result in c2
78+
# Compute B^n and store result in c2
7979
b = tf.constant(B)
8080
c2.append(matpow(b, n))
8181

@@ -84,7 +84,7 @@ def matpow(M, n):
8484

8585
t1_2 = datetime.datetime.now()
8686
with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
87-
# Runs the op.
87+
# Run the op.
8888
sess.run(sum)
8989
t2_2 = datetime.datetime.now()
9090

0 commit comments

Comments
 (0)