Skip to content

Commit e862283

Browse files
committed
added in some really basic comments
1 parent fafe958 commit e862283

File tree

5 files changed

+25
-25
lines changed

5 files changed

+25
-25
lines changed

0_multiply.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import tensorflow as tf
22

3-
a = tf.placeholder("float")
4-
b = tf.placeholder("float")
3+
a = tf.placeholder("float") # Create a symbolic variable 'a'
4+
b = tf.placeholder("float") # Create a symbolic variable 'b'
55

6-
y = tf.mul(a, b)
6+
y = tf.mul(a, b) # multiply the symbolic variables
77

8-
sess = tf.Session()
8+
sess = tf.Session() # create a session to evaluate the symbolic expressions
99

10-
print "%f should equal 2.0" % sess.run(y, feed_dict={a: 1, b: 2})
10+
print "%f should equal 2.0" % sess.run(y, feed_dict={a: 1, b: 2}) # eval expressions with parameters for a and b
1111
print "%f should equal 9.0" % sess.run(y, feed_dict={a: 3, b: 3})

1_linear_regression.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,29 +2,29 @@
22
import numpy as np
33

44
trX = np.linspace(-1, 1, 101)
5-
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33
5+
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33 # create a y value which is approximately linear but with some random noise
66

7-
X = tf.placeholder("float")
7+
X = tf.placeholder("float") # create symbolic variables
88
Y = tf.placeholder("float")
99

1010

1111
def model(X, w):
12-
return tf.mul(X, w)
12+
return tf.mul(X, w) # lr is just X*w so this model line is pretty simple
1313

1414

15-
w = tf.Variable(0.0, name="weights")
15+
w = tf.Variable(0.0, name="weights") # create a shared variable (like theano.shared) for the weight matrix
1616
y_model = model(X, w)
1717

18-
cost = (tf.pow(Y-y_model, 2))
18+
cost = (tf.pow(Y-y_model, 2)) # use sqr error for cost function
1919

20-
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
20+
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # construct an optimizer to minimize cost and fit line to my data
2121

2222
sess = tf.Session()
23-
init = tf.initialize_all_variables()
23+
init = tf.initialize_all_variables() # you need to initialize variables (in this case just variable W)
2424
sess.run(init)
2525

2626
for i in range(100):
27-
for (x, y) in zip(trX, trY):
27+
for (x, y) in zip(trX, trY):
2828
sess.run(train_op, feed_dict={X: x, Y: y})
2929

3030
print(sess.run(w)) # something around 2

2_logistic_regression.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,22 +8,22 @@ def init_weights(shape):
88

99

1010
def model(X, w):
11-
return tf.matmul(X, w)
11+
return tf.matmul(X, w) # notice we use the same model as linear regression, this is because there is a baked in cost function which performs softmax and cross entropy
1212

1313

1414
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
1515
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
1616

17-
X = tf.placeholder("float", [None, 784])
17+
X = tf.placeholder("float", [None, 784]) # create symbolic variables
1818
Y = tf.placeholder("float", [None, 10])
1919

20-
w = init_weights([784, 10])
20+
w = init_weights([784, 10]) # like in linear regression, we need a shared variable weight matrix for logistic regression
2121

2222
py_x = model(X, w)
2323

24-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
25-
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
26-
predict_op = tf.argmax(py_x, 1)
24+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y)) # compute mean cross entropy (softmax is applied internally)
25+
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct optimizer
26+
predict_op = tf.argmax(py_x, 1) # at predict time, evaluate the argmax of the logistic regression
2727

2828
sess = tf.Session()
2929
init = tf.initialize_all_variables()

3_net.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ def init_weights(shape):
88

99

1010
def model(X, w_h, w_o):
11-
h = tf.nn.sigmoid(tf.matmul(X, w_h))
12-
return tf.matmul(h, w_o)
11+
h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions
12+
return tf.matmul(h, w_o) # note that we dont take the softmax at the end because our cost fn does that for us
1313

1414

1515
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
@@ -18,13 +18,13 @@ def model(X, w_h, w_o):
1818
X = tf.placeholder("float", [None, 784])
1919
Y = tf.placeholder("float", [None, 10])
2020

21-
w_h = init_weights([784, 625])
21+
w_h = init_weights([784, 625]) # create symbolic variables
2222
w_o = init_weights([625, 10])
2323

2424
py_x = model(X, w_h, w_o)
2525

26-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
27-
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
26+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y)) # compute costs
27+
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer
2828
predict_op = tf.argmax(py_x, 1)
2929

3030
sess = tf.Session()

4_modern_net.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ def init_weights(shape):
77
return tf.Variable(tf.random_normal(shape, stddev=0.01))
88

99

10-
def model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden):
10+
def model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden): # this network is the same as the previous one except with an extra hidden layer + dropout
1111
X = tf.nn.dropout(X, p_drop_input)
1212
h = tf.nn.relu(tf.matmul(X, w_h))
1313

0 commit comments

Comments
 (0)