Skip to content

Commit 63abe61

Browse files
Update recurrent network for TF1.0
Signed-off-by: Norman Heckscher <norman.heckscher@gmail.com>
1 parent b980473 commit 63abe61

File tree

1 file changed

+42
-123
lines changed

1 file changed

+42
-123
lines changed

notebooks/3_NeuralNetworks/recurrent_network.ipynb

Lines changed: 42 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
{
22
"cells": [
33
{
4-
"cell_type": "code",
5-
"execution_count": null,
6-
"metadata": {},
7-
"outputs": [],
4+
"cell_type": "markdown",
5+
"metadata": {
6+
"collapsed": true
7+
},
88
"source": [
99
"'''\n",
1010
"A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
@@ -18,35 +18,26 @@
1818
},
1919
{
2020
"cell_type": "code",
21-
"execution_count": 1,
22-
"metadata": {},
23-
"outputs": [
24-
{
25-
"name": "stdout",
26-
"output_type": "stream",
27-
"text": [
28-
"Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
29-
"Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
30-
"Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
31-
"Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
32-
]
33-
}
34-
],
21+
"execution_count": null,
22+
"metadata": {
23+
"collapsed": false
24+
},
25+
"outputs": [],
3526
"source": [
3627
"import tensorflow as tf\n",
37-
"from tensorflow.python.ops import rnn, rnn_cell\n",
28+
"from tensorflow.contrib import rnn\n",
3829
"import numpy as np\n",
3930
"\n",
4031
"# Import MINST data\n",
4132
"from tensorflow.examples.tutorials.mnist import input_data\n",
42-
"mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
33+
"mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
4334
]
4435
},
4536
{
46-
"cell_type": "code",
47-
"execution_count": null,
48-
"metadata": {},
49-
"outputs": [],
37+
"cell_type": "markdown",
38+
"metadata": {
39+
"collapsed": true
40+
},
5041
"source": [
5142
"'''\n",
5243
"To classify images using a reccurent neural network, we consider every image\n",
@@ -58,7 +49,9 @@
5849
{
5950
"cell_type": "code",
6051
"execution_count": 2,
61-
"metadata": {},
52+
"metadata": {
53+
"collapsed": false
54+
},
6255
"outputs": [],
6356
"source": [
6457
"# Parameters\n",
@@ -89,7 +82,9 @@
8982
{
9083
"cell_type": "code",
9184
"execution_count": 3,
92-
"metadata": {},
85+
"metadata": {
86+
"collapsed": false
87+
},
9388
"outputs": [],
9489
"source": [
9590
"def RNN(x, weights, biases):\n",
@@ -103,123 +98,38 @@
10398
" # Reshaping to (n_steps*batch_size, n_input)\n",
10499
" x = tf.reshape(x, [-1, n_input])\n",
105100
" # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
106-
" x = tf.split(0, n_steps, x)\n",
101+
" x = tf.split(x, n_steps, 0)\n",
107102
"\n",
108103
" # Define a lstm cell with tensorflow\n",
109-
" lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
104+
" lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
110105
"\n",
111106
" # Get lstm cell output\n",
112-
" outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\n",
107+
" outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n",
113108
"\n",
114109
" # Linear activation, using rnn inner loop last output\n",
115110
" return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
116111
"\n",
117112
"pred = RNN(x, weights, biases)\n",
118113
"\n",
119114
"# Define loss and optimizer\n",
120-
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
115+
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
121116
"optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
122117
"\n",
123118
"# Evaluate model\n",
124119
"correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
125120
"accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
126121
"\n",
127122
"# Initializing the variables\n",
128-
"init = tf.initialize_all_variables()"
123+
"init = tf.global_variables_initializer()"
129124
]
130125
},
131126
{
132127
"cell_type": "code",
133-
"execution_count": 4,
134-
"metadata": {},
135-
"outputs": [
136-
{
137-
"name": "stdout",
138-
"output_type": "stream",
139-
"text": [
140-
"Iter 1280, Minibatch Loss= 1.538532, Training Accuracy= 0.49219\n",
141-
"Iter 2560, Minibatch Loss= 1.462834, Training Accuracy= 0.50781\n",
142-
"Iter 3840, Minibatch Loss= 1.048393, Training Accuracy= 0.66406\n",
143-
"Iter 5120, Minibatch Loss= 0.889872, Training Accuracy= 0.71875\n",
144-
"Iter 6400, Minibatch Loss= 0.681855, Training Accuracy= 0.76562\n",
145-
"Iter 7680, Minibatch Loss= 0.987207, Training Accuracy= 0.69531\n",
146-
"Iter 8960, Minibatch Loss= 0.759543, Training Accuracy= 0.71094\n",
147-
"Iter 10240, Minibatch Loss= 0.557055, Training Accuracy= 0.80469\n",
148-
"Iter 11520, Minibatch Loss= 0.371352, Training Accuracy= 0.89844\n",
149-
"Iter 12800, Minibatch Loss= 0.661293, Training Accuracy= 0.80469\n",
150-
"Iter 14080, Minibatch Loss= 0.474259, Training Accuracy= 0.86719\n",
151-
"Iter 15360, Minibatch Loss= 0.328436, Training Accuracy= 0.88281\n",
152-
"Iter 16640, Minibatch Loss= 0.348017, Training Accuracy= 0.93750\n",
153-
"Iter 17920, Minibatch Loss= 0.340086, Training Accuracy= 0.88281\n",
154-
"Iter 19200, Minibatch Loss= 0.261532, Training Accuracy= 0.89844\n",
155-
"Iter 20480, Minibatch Loss= 0.161785, Training Accuracy= 0.94531\n",
156-
"Iter 21760, Minibatch Loss= 0.419619, Training Accuracy= 0.83594\n",
157-
"Iter 23040, Minibatch Loss= 0.120714, Training Accuracy= 0.95312\n",
158-
"Iter 24320, Minibatch Loss= 0.339519, Training Accuracy= 0.89062\n",
159-
"Iter 25600, Minibatch Loss= 0.405463, Training Accuracy= 0.88281\n",
160-
"Iter 26880, Minibatch Loss= 0.172193, Training Accuracy= 0.95312\n",
161-
"Iter 28160, Minibatch Loss= 0.256769, Training Accuracy= 0.91406\n",
162-
"Iter 29440, Minibatch Loss= 0.247753, Training Accuracy= 0.91406\n",
163-
"Iter 30720, Minibatch Loss= 0.230820, Training Accuracy= 0.91406\n",
164-
"Iter 32000, Minibatch Loss= 0.216861, Training Accuracy= 0.93750\n",
165-
"Iter 33280, Minibatch Loss= 0.236337, Training Accuracy= 0.89062\n",
166-
"Iter 34560, Minibatch Loss= 0.252351, Training Accuracy= 0.93750\n",
167-
"Iter 35840, Minibatch Loss= 0.180090, Training Accuracy= 0.92188\n",
168-
"Iter 37120, Minibatch Loss= 0.304125, Training Accuracy= 0.91406\n",
169-
"Iter 38400, Minibatch Loss= 0.114474, Training Accuracy= 0.96094\n",
170-
"Iter 39680, Minibatch Loss= 0.158405, Training Accuracy= 0.96875\n",
171-
"Iter 40960, Minibatch Loss= 0.285858, Training Accuracy= 0.92188\n",
172-
"Iter 42240, Minibatch Loss= 0.134199, Training Accuracy= 0.96094\n",
173-
"Iter 43520, Minibatch Loss= 0.235847, Training Accuracy= 0.92969\n",
174-
"Iter 44800, Minibatch Loss= 0.155971, Training Accuracy= 0.94531\n",
175-
"Iter 46080, Minibatch Loss= 0.061549, Training Accuracy= 0.99219\n",
176-
"Iter 47360, Minibatch Loss= 0.232569, Training Accuracy= 0.94531\n",
177-
"Iter 48640, Minibatch Loss= 0.270348, Training Accuracy= 0.91406\n",
178-
"Iter 49920, Minibatch Loss= 0.202416, Training Accuracy= 0.92188\n",
179-
"Iter 51200, Minibatch Loss= 0.113857, Training Accuracy= 0.96094\n",
180-
"Iter 52480, Minibatch Loss= 0.137900, Training Accuracy= 0.94531\n",
181-
"Iter 53760, Minibatch Loss= 0.052416, Training Accuracy= 0.98438\n",
182-
"Iter 55040, Minibatch Loss= 0.312064, Training Accuracy= 0.91406\n",
183-
"Iter 56320, Minibatch Loss= 0.144335, Training Accuracy= 0.93750\n",
184-
"Iter 57600, Minibatch Loss= 0.114723, Training Accuracy= 0.96875\n",
185-
"Iter 58880, Minibatch Loss= 0.193597, Training Accuracy= 0.96094\n",
186-
"Iter 60160, Minibatch Loss= 0.110877, Training Accuracy= 0.95312\n",
187-
"Iter 61440, Minibatch Loss= 0.119864, Training Accuracy= 0.96094\n",
188-
"Iter 62720, Minibatch Loss= 0.118780, Training Accuracy= 0.94531\n",
189-
"Iter 64000, Minibatch Loss= 0.082259, Training Accuracy= 0.97656\n",
190-
"Iter 65280, Minibatch Loss= 0.087364, Training Accuracy= 0.97656\n",
191-
"Iter 66560, Minibatch Loss= 0.207975, Training Accuracy= 0.92969\n",
192-
"Iter 67840, Minibatch Loss= 0.120612, Training Accuracy= 0.96875\n",
193-
"Iter 69120, Minibatch Loss= 0.070608, Training Accuracy= 0.96875\n",
194-
"Iter 70400, Minibatch Loss= 0.100786, Training Accuracy= 0.96094\n",
195-
"Iter 71680, Minibatch Loss= 0.114746, Training Accuracy= 0.94531\n",
196-
"Iter 72960, Minibatch Loss= 0.083427, Training Accuracy= 0.96875\n",
197-
"Iter 74240, Minibatch Loss= 0.089978, Training Accuracy= 0.96094\n",
198-
"Iter 75520, Minibatch Loss= 0.195322, Training Accuracy= 0.94531\n",
199-
"Iter 76800, Minibatch Loss= 0.161109, Training Accuracy= 0.96094\n",
200-
"Iter 78080, Minibatch Loss= 0.169762, Training Accuracy= 0.94531\n",
201-
"Iter 79360, Minibatch Loss= 0.054240, Training Accuracy= 0.98438\n",
202-
"Iter 80640, Minibatch Loss= 0.160100, Training Accuracy= 0.95312\n",
203-
"Iter 81920, Minibatch Loss= 0.110728, Training Accuracy= 0.96875\n",
204-
"Iter 83200, Minibatch Loss= 0.054918, Training Accuracy= 0.98438\n",
205-
"Iter 84480, Minibatch Loss= 0.104170, Training Accuracy= 0.96875\n",
206-
"Iter 85760, Minibatch Loss= 0.071871, Training Accuracy= 0.97656\n",
207-
"Iter 87040, Minibatch Loss= 0.170529, Training Accuracy= 0.96094\n",
208-
"Iter 88320, Minibatch Loss= 0.087350, Training Accuracy= 0.96875\n",
209-
"Iter 89600, Minibatch Loss= 0.079943, Training Accuracy= 0.96875\n",
210-
"Iter 90880, Minibatch Loss= 0.128451, Training Accuracy= 0.92969\n",
211-
"Iter 92160, Minibatch Loss= 0.046963, Training Accuracy= 0.98438\n",
212-
"Iter 93440, Minibatch Loss= 0.162998, Training Accuracy= 0.96875\n",
213-
"Iter 94720, Minibatch Loss= 0.122588, Training Accuracy= 0.96094\n",
214-
"Iter 96000, Minibatch Loss= 0.073954, Training Accuracy= 0.97656\n",
215-
"Iter 97280, Minibatch Loss= 0.130790, Training Accuracy= 0.96094\n",
216-
"Iter 98560, Minibatch Loss= 0.067689, Training Accuracy= 0.97656\n",
217-
"Iter 99840, Minibatch Loss= 0.186411, Training Accuracy= 0.92188\n",
218-
"Optimization Finished!\n",
219-
"Testing Accuracy: 0.976562\n"
220-
]
221-
}
222-
],
128+
"execution_count": null,
129+
"metadata": {
130+
"collapsed": false
131+
},
132+
"outputs": [],
223133
"source": [
224134
"# Launch the graph\n",
225135
"with tf.Session() as sess:\n",
@@ -250,6 +160,15 @@
250160
" print \"Testing Accuracy:\", \\\n",
251161
" sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
252162
]
163+
},
164+
{
165+
"cell_type": "code",
166+
"execution_count": null,
167+
"metadata": {
168+
"collapsed": true
169+
},
170+
"outputs": [],
171+
"source": []
253172
}
254173
],
255174
"metadata": {
@@ -261,14 +180,14 @@
261180
"language_info": {
262181
"codemirror_mode": {
263182
"name": "ipython",
264-
"version": 2.0
183+
"version": 2
265184
},
266185
"file_extension": ".py",
267186
"mimetype": "text/x-python",
268187
"name": "python",
269188
"nbconvert_exporter": "python",
270189
"pygments_lexer": "ipython2",
271-
"version": "2.7.11"
190+
"version": "2.7.13"
272191
}
273192
},
274193
"nbformat": 4,

0 commit comments

Comments
 (0)