|
10 | 10 | from __future__ import print_function
|
11 | 11 |
|
12 | 12 | import tensorflow as tf
|
13 |
| -from tensorflow.python.ops import rnn, rnn_cell |
| 13 | +from tensorflow.contrib import rnn |
14 | 14 |
|
15 | 15 | # Import MNIST data
|
16 | 16 | from tensorflow.examples.tutorials.mnist import input_data
|
@@ -58,29 +58,29 @@ def RNN(x, weights, biases):
|
58 | 58 | # Reshaping to (n_steps*batch_size, n_input)
|
59 | 59 | x = tf.reshape(x, [-1, n_input])
|
60 | 60 | # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
|
61 |
| - x = tf.split(0, n_steps, x) |
| 61 | + x = tf.split(x, n_steps, 0) |
62 | 62 |
|
63 | 63 | # Define a lstm cell with tensorflow
|
64 |
| - lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0) |
| 64 | + lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) |
65 | 65 |
|
66 | 66 | # Get lstm cell output
|
67 |
| - outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32) |
| 67 | + outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) |
68 | 68 |
|
69 | 69 | # Linear activation, using rnn inner loop last output
|
70 | 70 | return tf.matmul(outputs[-1], weights['out']) + biases['out']
|
71 | 71 |
|
72 | 72 | pred = RNN(x, weights, biases)
|
73 | 73 |
|
74 | 74 | # Define loss and optimizer
|
75 |
| -cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) |
| 75 | +cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) |
76 | 76 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
|
77 | 77 |
|
78 | 78 | # Evaluate model
|
79 | 79 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
|
80 | 80 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
|
81 | 81 |
|
82 | 82 | # Initializing the variables
|
83 |
| -init = tf.initialize_all_variables() |
| 83 | +init = tf.global_variables_initializer() |
84 | 84 |
|
85 | 85 | # Launch the graph
|
86 | 86 | with tf.Session() as sess:
|
|
0 commit comments