Skip to content

Commit 4c8c201

Browse files
committed
add eager API examples
1 parent ef2da69 commit 4c8c201

File tree

4 files changed

+383
-0
lines changed

4 files changed

+383
-0
lines changed
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
'''
2+
Basic introduction to TensorFlow's Eager API.
3+
4+
Author: Aymeric Damien
5+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
6+
7+
What is Eager API?
8+
" Eager execution is an imperative, define-by-run interface where operations are
9+
executed immediately as they are called from Python. This makes it easier to
10+
get started with TensorFlow, and can make research and development more
11+
intuitive. A vast majority of the TensorFlow API remains the same whether eager
12+
execution is enabled or not. As a result, the exact same code that constructs
13+
TensorFlow graphs (e.g. using the layers API) can be executed imperatively
14+
by using eager execution. Conversely, most models written with Eager enabled
15+
can be converted to a graph that can be further optimized and/or extracted
16+
for deployment in production without changing code. " - Rajat Monga
17+
18+
'''
19+
from __future__ import absolute_import, division, print_function
20+
21+
import numpy as np
22+
import tensorflow as tf
23+
import tensorflow.contrib.eager as tfe
24+
25+
# Set Eager API
26+
print("Setting Eager mode...")
27+
tfe.enable_eager_execution()
28+
29+
# Define constant tensors
30+
print("Define constant tensors")
31+
a = tf.constant(2)
32+
print("a = %i" % a)
33+
b = tf.constant(3)
34+
print("b = %i" % b)
35+
36+
# Run the operation without the need for tf.Session
37+
print("Running operations, without tf.Session")
38+
c = a + b
39+
print("a + b = %i" % c)
40+
d = a * b
41+
print("a * b = %i" % d)
42+
43+
44+
# Full compatibility with Numpy
45+
print("Mixing operations with Tensors and Numpy Arrays")
46+
47+
# Define constant tensors
48+
a = tf.constant([[2., 1.],
49+
[1., 0.]], dtype=tf.float32)
50+
print("Tensor:\n a = %s" % a)
51+
b = np.array([[3., 0.],
52+
[5., 1.]], dtype=np.float32)
53+
print("NumpyArray:\n b = %s" % b)
54+
55+
# Run the operation without the need for tf.Session
56+
print("Running operations, without tf.Session")
57+
58+
c = a + b
59+
print("a + b = %s" % c)
60+
61+
d = tf.matmul(a, b)
62+
print("a * b = %s" % d)
63+
64+
print("Iterate through Tensor 'a':")
65+
for i in range(a.shape[0]):
66+
for j in range(a.shape[1]):
67+
print(a[i][j])
68+
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
'''
2+
A logistic regression learning algorithm example using TensorFlow library.
3+
This example is using the MNIST database of handwritten digits
4+
(http://yann.lecun.com/exdb/mnist/)
5+
6+
Author: Aymeric Damien
7+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
8+
'''
9+
from __future__ import absolute_import, division, print_function
10+
11+
import matplotlib.pyplot as plt
12+
import numpy as np
13+
import tensorflow as tf
14+
import tensorflow.contrib.eager as tfe
15+
16+
# Set Eager API
17+
tfe.enable_eager_execution()
18+
19+
# Training Data
20+
train_X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
21+
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]
22+
train_Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
23+
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]
24+
n_samples = len(train_X)
25+
26+
# Parameters
27+
learning_rate = 0.01
28+
display_step = 100
29+
num_steps = 1000
30+
31+
# Weight and Bias
32+
W = tfe.Variable(np.random.randn())
33+
b = tfe.Variable(np.random.randn())
34+
35+
36+
# Linear regression (Wx + b)
37+
def linear_regression(inputs):
38+
return inputs * W + b
39+
40+
41+
# Mean square error
42+
def mean_square_fn(model_fn, inputs, labels):
43+
return tf.reduce_sum(tf.pow(model_fn(inputs) - labels, 2)) / (2 * n_samples)
44+
45+
46+
# SGD Optimizer
47+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
48+
# Compute gradients
49+
grad = tfe.implicit_gradients(mean_square_fn)
50+
51+
# Initial cost, before optimizing
52+
print("Initial cost= {:.9f}".format(
53+
mean_square_fn(linear_regression, train_X, train_Y)),
54+
"W=", W.numpy(), "b=", b.numpy())
55+
56+
# Training
57+
for step in range(num_steps):
58+
59+
optimizer.apply_gradients(grad(linear_regression, train_X, train_Y))
60+
61+
if (step + 1) % display_step == 0 or step == 0:
62+
print("Epoch:", '%04d' % (step + 1), "cost=",
63+
"{:.9f}".format(mean_square_fn(linear_regression, train_X, train_Y)),
64+
"W=", W.numpy(), "b=", b.numpy())
65+
66+
# Graphic display
67+
plt.plot(train_X, train_Y, 'ro', label='Original data')
68+
plt.plot(train_X, np.array(W * train_X + b), label='Fitted line')
69+
plt.legend()
70+
plt.show()
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
'''
2+
A logistic regression learning algorithm example using TensorFlow library.
3+
This example is using the MNIST database of handwritten digits
4+
(http://yann.lecun.com/exdb/mnist/)
5+
6+
Author: Aymeric Damien
7+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
8+
'''
9+
from __future__ import absolute_import, division, print_function
10+
11+
import tensorflow as tf
12+
import tensorflow.contrib.eager as tfe
13+
14+
# Set Eager API
15+
tfe.enable_eager_execution()
16+
17+
# Import MNIST data
18+
from tensorflow.examples.tutorials.mnist import input_data
19+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
20+
21+
# Parameters
22+
learning_rate = 0.1
23+
batch_size = 128
24+
num_steps = 1000
25+
display_step = 100
26+
27+
dataset = tf.data.Dataset.from_tensor_slices(
28+
(mnist.train.images, mnist.train.labels)).batch(batch_size)
29+
dataset_iter = tfe.Iterator(dataset)
30+
31+
# Variables
32+
W = tfe.Variable(tf.zeros([784, 10]), name='weights')
33+
b = tfe.Variable(tf.zeros([10]), name='bias')
34+
35+
36+
# Logistic regression (Wx + b)
37+
def logistic_regression(inputs):
38+
return tf.matmul(inputs, W) + b
39+
40+
41+
# Cross-Entropy loss function
42+
def loss_fn(inference_fn, inputs, labels):
43+
# Using sparse_softmax cross entropy
44+
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
45+
logits=inference_fn(inputs), labels=labels))
46+
47+
48+
# Calculate accuracy
49+
def accuracy_fn(inference_fn, inputs, labels):
50+
prediction = tf.nn.softmax(inference_fn(inputs))
51+
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
52+
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
53+
54+
55+
# SGD Optimizer
56+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
57+
# Compute gradients
58+
grad = tfe.implicit_gradients(loss_fn)
59+
60+
# Training
61+
average_loss = 0.
62+
average_acc = 0.
63+
for step in range(num_steps):
64+
65+
# Iterate through the dataset
66+
try:
67+
d = dataset_iter.next()
68+
except StopIteration:
69+
# Refill queue
70+
dataset_iter = tfe.Iterator(dataset)
71+
d = dataset_iter.next()
72+
73+
# Images
74+
x_batch = d[0]
75+
# Labels
76+
y_batch = tf.cast(d[1], dtype=tf.int64)
77+
78+
# Compute the batch loss
79+
batch_loss = loss_fn(logistic_regression, x_batch, y_batch)
80+
average_loss += batch_loss
81+
# Compute the batch accuracy
82+
batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)
83+
average_acc += batch_accuracy
84+
85+
if step == 0:
86+
# Display the initial cost, before optimizing
87+
print("Initial loss= {:.9f}".format(average_loss))
88+
89+
# Update the variables following gradients info
90+
optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))
91+
92+
# Display info
93+
if (step + 1) % display_step == 0 or step == 0:
94+
if step > 0:
95+
average_loss /= display_step
96+
average_acc /= display_step
97+
print("Step:", '%04d' % (step + 1), " loss=",
98+
"{:.9f}".format(average_loss), " accuracy=",
99+
"{:.4f}".format(average_acc))
100+
average_loss = 0.
101+
average_acc = 0.
102+
103+
# Evaluate model on the test image set
104+
testX = mnist.test.images
105+
testY = mnist.test.labels
106+
107+
test_acc = accuracy_fn(logistic_regression, testX, testY)
108+
print("Testset Accuracy: {:.4f}".format(test_acc))
Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
""" Neural Network.
2+
3+
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
4+
implementation with TensorFlow. This example is using the MNIST database
5+
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
6+
7+
This example is using TensorFlow layers, see 'neural_network_raw' example for
8+
a raw implementation with variables.
9+
10+
Links:
11+
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
12+
13+
Author: Aymeric Damien
14+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
15+
"""
16+
from __future__ import print_function
17+
18+
import tensorflow as tf
19+
import tensorflow.contrib.eager as tfe
20+
21+
# Set Eager API
22+
tfe.enable_eager_execution()
23+
24+
# Import MNIST data
25+
from tensorflow.examples.tutorials.mnist import input_data
26+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
27+
28+
# Parameters
29+
learning_rate = 0.001
30+
num_steps = 1000
31+
batch_size = 128
32+
display_step = 100
33+
34+
# Network Parameters
35+
n_hidden_1 = 256 # 1st layer number of neurons
36+
n_hidden_2 = 256 # 2nd layer number of neurons
37+
num_input = 784 # MNIST data input (img shape: 28*28)
38+
num_classes = 10 # MNIST total classes (0-9 digits)
39+
40+
# Using TF Dataset to split data into batches
41+
dataset = tf.data.Dataset.from_tensor_slices(
42+
(mnist.train.images, mnist.train.labels)).batch(batch_size)
43+
dataset_iter = tfe.Iterator(dataset)
44+
45+
46+
# Define the neural network. To use eager API and tf.layers API together,
47+
# we must instantiate a tfe.Network class as follow:
48+
class NeuralNet(tfe.Network):
49+
def __init__(self):
50+
# Define each layer
51+
super(NeuralNet, self).__init__()
52+
# Hidden fully connected layer with 256 neurons
53+
self.layer1 = self.track_layer(
54+
tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
55+
# Hidden fully connected layer with 256 neurons
56+
self.layer2 = self.track_layer(
57+
tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
58+
# Output fully connected layer with a neuron for each class
59+
self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
60+
61+
def call(self, x):
62+
x = self.layer1(x)
63+
x = self.layer2(x)
64+
return self.out_layer(x)
65+
66+
67+
neural_net = NeuralNet()
68+
69+
70+
# Cross-Entropy loss function
71+
def loss_fn(inference_fn, inputs, labels):
72+
# Using sparse_softmax cross entropy
73+
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
74+
logits=inference_fn(inputs), labels=labels))
75+
76+
77+
# Calculate accuracy
78+
def accuracy_fn(inference_fn, inputs, labels):
79+
prediction = tf.nn.softmax(inference_fn(inputs))
80+
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
81+
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
82+
83+
84+
# SGD Optimizer
85+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
86+
# Compute gradients
87+
grad = tfe.implicit_gradients(loss_fn)
88+
89+
# Training
90+
average_loss = 0.
91+
average_acc = 0.
92+
for step in range(num_steps):
93+
94+
# Iterate through the dataset
95+
try:
96+
d = dataset_iter.next()
97+
except StopIteration:
98+
# Refill queue
99+
dataset_iter = tfe.Iterator(dataset)
100+
d = dataset_iter.next()
101+
102+
# Images
103+
x_batch = d[0]
104+
# Labels
105+
y_batch = tf.cast(d[1], dtype=tf.int64)
106+
107+
# Compute the batch loss
108+
batch_loss = loss_fn(neural_net, x_batch, y_batch)
109+
average_loss += batch_loss
110+
# Compute the batch accuracy
111+
batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
112+
average_acc += batch_accuracy
113+
114+
if step == 0:
115+
# Display the initial cost, before optimizing
116+
print("Initial loss= {:.9f}".format(average_loss))
117+
118+
# Update the variables following gradients info
119+
optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
120+
121+
# Display info
122+
if (step + 1) % display_step == 0 or step == 0:
123+
if step > 0:
124+
average_loss /= display_step
125+
average_acc /= display_step
126+
print("Step:", '%04d' % (step + 1), " loss=",
127+
"{:.9f}".format(average_loss), " accuracy=",
128+
"{:.4f}".format(average_acc))
129+
average_loss = 0.
130+
average_acc = 0.
131+
132+
# Evaluate model on the test image set
133+
testX = mnist.test.images
134+
testY = mnist.test.labels
135+
136+
test_acc = accuracy_fn(neural_net, testX, testY)
137+
print("Testset Accuracy: {:.4f}".format(test_acc))

0 commit comments

Comments
 (0)