Skip to content

Commit a5f5e7e

Browse files
update for tf1.0
1 parent 6f91c25 commit a5f5e7e

File tree

8 files changed

+46
-16
lines changed

8 files changed

+46
-16
lines changed

ann_class2/dropout_tensorflow.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,18 @@ def fit(self, X, Y, lr=10e-7, mu=0.99, decay=0.999, epochs=300, batch_sz=100):
6060
labels = tf.placeholder(tf.int64, shape=(None,), name='labels')
6161
logits = self.forward(inputs)
6262

63-
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels))
63+
cost = tf.reduce_mean(
64+
tf.nn.sparse_softmax_cross_entropy_with_logits(
65+
logits=logits,
66+
labels=labels
67+
)
68+
)
6469
train_op = tf.train.RMSPropOptimizer(lr, decay=decay, momentum=mu).minimize(cost)
6570
prediction = self.predict(inputs)
6671

6772
n_batches = N / batch_sz
6873
costs = []
69-
init = tf.initialize_all_variables()
74+
init = tf.global_variables_initializer()
7075
with tf.Session() as session:
7176
session.run(init)
7277
for i in xrange(epochs):

ann_class2/tensorflow1.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
t = tf.Variable(0) # a scalar
5252

5353
# you need to "initialize" the variables first
54-
init = tf.initialize_all_variables()
54+
init = tf.global_variables_initializer()
5555

5656
with tf.Session() as session:
5757
out = session.run(init) # and then "run" the init operation
@@ -72,7 +72,7 @@
7272
train_op = tf.train.GradientDescentOptimizer(0.3).minimize(cost)
7373

7474
# let's run a session again
75-
init = tf.initialize_all_variables()
75+
init = tf.global_variables_initializer()
7676
with tf.Session() as session:
7777
session.run(init)
7878

ann_class2/tensorflow2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def main():
8181
predict_op = tf.argmax(Yish, 1)
8282

8383
LL = []
84-
init = tf.initialize_all_variables()
84+
init = tf.global_variables_initializer()
8585
with tf.Session() as session:
8686
session.run(init)
8787

ann_class2/tf_with_save.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def fit(self, X, Y, Xtest, Ytest):
5959
train_op = tf.train.MomentumOptimizer(lr, momentum=mu).minimize(cost)
6060

6161
costs = []
62-
init = tf.initialize_all_variables()
62+
init = tf.global_variables_initializer()
6363
with tf.Session() as session:
6464
session.run(init)
6565

cnn_class/benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def main():
106106

107107
t0 = datetime.now()
108108
LL = []
109-
init = tf.initialize_all_variables()
109+
init = tf.global_variables_initializer()
110110
with tf.Session() as session:
111111
session.run(init)
112112

cnn_class/cnn_tf.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,12 @@ def main():
130130
Z3 = tf.nn.relu( tf.matmul(Z2r, W3) + b3 )
131131
Yish = tf.matmul(Z3, W4) + b4
132132

133-
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(Yish, T))
133+
cost = tf.reduce_sum(
134+
tf.nn.softmax_cross_entropy_with_logits(
135+
logits=Yish,
136+
labels=T
137+
)
138+
)
134139

135140
train_op = tf.train.RMSPropOptimizer(0.0001, decay=0.99, momentum=0.9).minimize(cost)
136141

@@ -139,7 +144,7 @@ def main():
139144

140145
t0 = datetime.now()
141146
LL = []
142-
init = tf.initialize_all_variables()
147+
init = tf.global_variables_initializer()
143148
with tf.Session() as session:
144149
session.run(init)
145150

cnn_class/edge_benchmark.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,15 +102,20 @@ def main():
102102
Z2 = tf.nn.relu( tf.matmul(Z1, W2) + b2 )
103103
Yish = tf.matmul(Z2, W3) + b3
104104

105-
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(Yish, T))
105+
cost = tf.reduce_sum(
106+
tf.nn.softmax_cross_entropy_with_logits(
107+
logits=Yish,
108+
labels=T
109+
)
110+
)
106111

107112
train_op = tf.train.RMSPropOptimizer(0.0001, decay=0.99, momentum=0.9).minimize(cost)
108113

109114
# we'll use this to calculate the error rate
110115
predict_op = tf.argmax(Yish, 1)
111116

112117
LL = []
113-
init = tf.initialize_all_variables()
118+
init = tf.global_variables_initializer()
114119
with tf.Session() as session:
115120
session.run(init)
116121

rnn_class/tf_parity.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,17 @@
55
import matplotlib.pyplot as plt
66

77
# from tensorflow.python.ops import rnn as rnn_module
8-
from tensorflow.python.ops.rnn import rnn as get_rnn_output
9-
from tensorflow.python.ops.rnn_cell import BasicRNNCell, GRUCell
8+
9+
######## This only works for pre-1.0 versions ##########
10+
# from tensorflow.python.ops.rnn import rnn as get_rnn_output
11+
# from tensorflow.python.ops.rnn_cell import BasicRNNCell, GRUCell
12+
########################################################
13+
14+
########## This works for TensorFlow v1.0 ##############
15+
from tensorflow.contrib.rnn import static_rnn as get_rnn_output
16+
from tensorflow.contrib.rnn import BasicRNNCell, GRUCell
17+
########################################################
18+
1019
from sklearn.utils import shuffle
1120
from util import init_weight, all_parity_pairs_with_sequence_labels, all_parity_pairs
1221

@@ -17,7 +26,8 @@ def x2sequence(x, T, D, batch_sz):
1726
# Reshaping to (n_steps*batch_size, n_input)
1827
x = tf.reshape(x, (T*batch_sz, D))
1928
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
20-
x = tf.split(0, T, x)
29+
# x = tf.split(0, T, x) # v0.1
30+
x = tf.split(x, T) # v1.0
2131
# print "type(x):", type(x)
2232
return x
2333

@@ -66,13 +76,18 @@ def fit(self, X, Y, batch_sz=20, learning_rate=10e-1, mu=0.99, activation=tf.nn.
6676
predict_op = tf.argmax(logits, 1)
6777
targets = tf.reshape(tfY, (T*batch_sz,))
6878

69-
cost_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets))
79+
cost_op = tf.reduce_mean(
80+
tf.nn.sparse_softmax_cross_entropy_with_logits(
81+
logits=logits,
82+
labels=targets
83+
)
84+
)
7085
train_op = tf.train.MomentumOptimizer(learning_rate, momentum=mu).minimize(cost_op)
7186

7287
costs = []
7388
n_batches = N / batch_sz
7489

75-
init = tf.initialize_all_variables()
90+
init = tf.global_variables_initializer()
7691
with tf.Session() as session:
7792
session.run(init)
7893
for i in xrange(epochs):

0 commit comments

Comments
 (0)