Skip to content

Fix format and some typos #63

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Sep 2, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/1_Introduction/helloworld.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

import tensorflow as tf

#Simple hello world using TensorFlow
# Simple hello world using TensorFlow

# Create a Constant op
# The op is added as a node to the default graph.
Expand Down
4 changes: 2 additions & 2 deletions examples/2_BasicModels/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})

#Display logs per epoch step
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
Expand All @@ -62,7 +62,7 @@
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')

#Graphic display
# Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
Expand Down
2 changes: 1 addition & 1 deletion examples/2_BasicModels/logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import tensorflow as tf

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
2 changes: 1 addition & 1 deletion examples/2_BasicModels/nearest_neighbor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import numpy as np
import tensorflow as tf

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
2 changes: 1 addition & 1 deletion examples/3_NeuralNetworks/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import numpy as np
import matplotlib.pyplot as plt

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
8 changes: 4 additions & 4 deletions examples/3_NeuralNetworks/bidirectional_rnn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
'''
A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Expand All @@ -13,12 +13,12 @@
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

'''
To classify images using a bidirectional reccurent neural network, we consider
To classify images using a bidirectional recurrent neural network, we consider
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
we will then handle 28 sequences of 28 steps for every sample.
'''
Expand All @@ -41,7 +41,7 @@

# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
# Hidden layer weights => 2*n_hidden because of forward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
Expand Down
2 changes: 1 addition & 1 deletion examples/3_NeuralNetworks/convolutional_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import tensorflow as tf

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
6 changes: 3 additions & 3 deletions examples/3_NeuralNetworks/dynamic_rnn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
'''
A Dynamic Reccurent Neural Network (LSTM) implementation example using
A Dynamic Recurrent Neural Network (LSTM) implementation example using
TensorFlow library. This example is using a toy dataset to classify linear
sequences. The generated sequences have variable length.

Expand All @@ -26,7 +26,7 @@ class ToySequenceData(object):

NOTICE:
We have to pad each sequence to reach 'max_seq_len' for TensorFlow
consistency (we cannot feed a numpy array with unconsistent
consistency (we cannot feed a numpy array with inconsistent
dimensions). The dynamic calculation will then be perform thanks to
'seqlen' attribute that records every actual sequence length.
"""
Expand Down Expand Up @@ -130,7 +130,7 @@ def dynamicRNN(x, seqlen, weights, biases):
sequence_length=seqlen)

# When performing dynamic calculation, we must retrieve the last
# dynamically computed output, i.e, if a sequence length is 10, we need
# dynamically computed output, i.e., if a sequence length is 10, we need
# to retrieve the 10th output.
# However TensorFlow doesn't support advanced indexing yet, so we build
# a custom op that for each sample in batch size, get its length and
Expand Down
2 changes: 1 addition & 1 deletion examples/3_NeuralNetworks/multilayer_perceptron.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from __future__ import print_function

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
4 changes: 2 additions & 2 deletions examples/3_NeuralNetworks/recurrent_network.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
'''
A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Expand All @@ -18,7 +18,7 @@
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

'''
To classify images using a reccurent neural network, we consider every image
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
Expand Down
2 changes: 1 addition & 1 deletion examples/4_Utils/save_restore_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from __future__ import print_function

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
4 changes: 2 additions & 2 deletions examples/4_Utils/tensorboard_advanced.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import tensorflow as tf

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down Expand Up @@ -64,7 +64,7 @@ def multilayer_perceptron(x, weights, biases):
}

# Encapsulating all ops into scopes, making Tensorboard's Graph
# visualization more convenient
# Visualization more convenient
with tf.name_scope('Model'):
# Build model
pred = multilayer_perceptron(x, weights, biases)
Expand Down
2 changes: 1 addition & 1 deletion examples/4_Utils/tensorboard_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import tensorflow as tf

# Import MINST data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Expand Down
22 changes: 11 additions & 11 deletions examples/5_MultiGPU/multigpu_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
import tensorflow as tf
import datetime

#Processing Units logs
# Processing Units logs
log_device_placement = True

#num of multiplications to perform
# Num of multiplications to perform
n = 10

'''
Expand All @@ -30,11 +30,11 @@
* Single GPU computation time: 0:00:11.277449
* Multi GPU computation time: 0:00:07.131701
'''
#Create random large matrix
# Create random large matrix
A = np.random.rand(1e4, 1e4).astype('float32')
B = np.random.rand(1e4, 1e4).astype('float32')

# Creates a graph to store results
# Create a graph to store results
c1 = []
c2 = []

Expand All @@ -50,7 +50,7 @@ def matpow(M, n):
with tf.device('/gpu:0'):
a = tf.constant(A)
b = tf.constant(B)
#compute A^n and B^n and store results in c1
# Compute A^n and B^n and store results in c1
c1.append(matpow(a, n))
c1.append(matpow(b, n))

Expand All @@ -59,23 +59,23 @@ def matpow(M, n):

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
# Runs the op.
# Run the op.
sess.run(sum)
t2_1 = datetime.datetime.now()


'''
Multi GPU computing
'''
#GPU:0 computes A^n
# GPU:0 computes A^n
with tf.device('/gpu:0'):
#compute A^n and store result in c2
# Compute A^n and store result in c2
a = tf.constant(A)
c2.append(matpow(a, n))

#GPU:1 computes B^n
# GPU:1 computes B^n
with tf.device('/gpu:1'):
#compute B^n and store result in c2
# Compute B^n and store result in c2
b = tf.constant(B)
c2.append(matpow(b, n))

Expand All @@ -84,7 +84,7 @@ def matpow(M, n):

t1_2 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
# Runs the op.
# Run the op.
sess.run(sum)
t2_2 = datetime.datetime.now()

Expand Down