Skip to content

Commit f11b375

Browse files
author
User
committed
update
1 parent 95538d5 commit f11b375

File tree

8 files changed

+128
-107
lines changed

8 files changed

+128
-107
lines changed

nlp_class2/glove_tf.py

+11-8
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,9 @@
2222
from rnn_class.util import get_wikipedia_data
2323
from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx
2424

25+
if tf.__version__.startswith('2'):
26+
tf.compat.v1.disable_eager_execution()
27+
2528

2629

2730
class Glove:
@@ -119,22 +122,22 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100,
119122
tfb = tf.Variable(b.reshape(V, 1).astype(np.float32))
120123
tfU = tf.Variable(U.astype(np.float32))
121124
tfc = tf.Variable(c.reshape(1, V).astype(np.float32))
122-
tfLogX = tf.placeholder(tf.float32, shape=(V, V))
123-
tffX = tf.placeholder(tf.float32, shape=(V, V))
125+
tfLogX = tf.compat.v1.placeholder(tf.float32, shape=(V, V))
126+
tffX = tf.compat.v1.placeholder(tf.float32, shape=(V, V))
124127

125-
delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX
126-
cost = tf.reduce_sum(tffX * delta * delta)
128+
delta = tf.matmul(tfW, tf.transpose(a=tfU)) + tfb + tfc + mu - tfLogX
129+
cost = tf.reduce_sum(input_tensor=tffX * delta * delta)
127130
regularized_cost = cost
128131
for param in (tfW, tfU):
129-
regularized_cost += reg*tf.reduce_sum(param * param)
132+
regularized_cost += reg*tf.reduce_sum(input_tensor=param * param)
130133

131-
train_op = tf.train.MomentumOptimizer(
134+
train_op = tf.compat.v1.train.MomentumOptimizer(
132135
learning_rate,
133136
momentum=0.9
134137
).minimize(regularized_cost)
135138
# train_op = tf.train.AdamOptimizer(1e-3).minimize(regularized_cost)
136-
init = tf.global_variables_initializer()
137-
session = tf.InteractiveSession()
139+
init = tf.compat.v1.global_variables_initializer()
140+
session = tf.compat.v1.InteractiveSession()
138141
session.run(init)
139142

140143
costs = []

nlp_class2/recursive_tensorflow.py

+12-9
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
from datetime import datetime
1818
from util import init_weight, get_ptb_data, display_tree
1919

20+
if tf.__version__.startswith('2'):
21+
tf.compat.v1.disable_eager_execution()
22+
2023

2124
def get_labels(tree):
2225
# must be returned in the same order as tree logits are returned
@@ -73,22 +76,22 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5):
7376
cost = self.get_cost(logits, labels, reg)
7477
costs.append(cost)
7578

76-
prediction = tf.argmax(logits, 1)
79+
prediction = tf.argmax(input=logits, axis=1)
7780
predictions.append(prediction)
7881

79-
train_op = tf.train.MomentumOptimizer(lr, mu).minimize(cost)
82+
train_op = tf.compat.v1.train.MomentumOptimizer(lr, mu).minimize(cost)
8083
train_ops.append(train_op)
8184

8285
# save for later so we don't have to recompile
8386
self.predictions = predictions
8487
self.all_labels = all_labels
85-
self.saver = tf.train.Saver()
88+
self.saver = tf.compat.v1.train.Saver()
8689

87-
init = tf.initialize_all_variables()
90+
init = tf.compat.v1.initialize_all_variables()
8891
actual_costs = []
8992
per_epoch_costs = []
9093
correct_rates = []
91-
with tf.Session() as session:
94+
with tf.compat.v1.Session() as session:
9295
session.run(init)
9396

9497
for i in range(epochs):
@@ -136,7 +139,7 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5):
136139

137140
def get_cost(self, logits, labels, reg):
138141
cost = tf.reduce_mean(
139-
tf.nn.sparse_softmax_cross_entropy_with_logits(
142+
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
140143
logits=logits,
141144
labels=labels
142145
)
@@ -150,7 +153,7 @@ def get_cost(self, logits, labels, reg):
150153
def get_output_recursive(self, tree, list_of_logits, is_root=True):
151154
if tree.word is not None:
152155
# this is a leaf node
153-
x = tf.nn.embedding_lookup(self.We, [tree.word])
156+
x = tf.nn.embedding_lookup(params=self.We, ids=[tree.word])
154157
else:
155158
# this node has children
156159
x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False)
@@ -197,12 +200,12 @@ def score(self, trees):
197200
labels = get_labels(t)
198201
all_labels.append(labels)
199202

200-
prediction = tf.argmax(logits, 1)
203+
prediction = tf.argmax(input=logits, axis=1)
201204
predictions.append(prediction)
202205

203206
n_correct = 0
204207
n_total = 0
205-
with tf.Session() as session:
208+
with tf.compat.v1.Session() as session:
206209
self.saver.restore(session, "recursive.ckpt")
207210
for prediction, y in zip(predictions, all_labels):
208211
p = session.run(prediction)

nlp_class2/rntn_tensorflow_rnn.py

+21-18
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
from datetime import datetime
1818
from sklearn.metrics import f1_score
1919

20+
if tf.__version__.startswith('2'):
21+
tf.compat.v1.disable_eager_execution()
22+
2023

2124

2225
class RecursiveNN:
@@ -54,10 +57,10 @@ def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False):
5457
self.weights = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo]
5558

5659

57-
words = tf.placeholder(tf.int32, shape=(None,), name='words')
58-
left_children = tf.placeholder(tf.int32, shape=(None,), name='left_children')
59-
right_children = tf.placeholder(tf.int32, shape=(None,), name='right_children')
60-
labels = tf.placeholder(tf.int32, shape=(None,), name='labels')
60+
words = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='words')
61+
left_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='left_children')
62+
right_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='right_children')
63+
labels = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='labels')
6164

6265
# save for later
6366
self.words = words
@@ -89,9 +92,9 @@ def recurrence(hiddens, n):
8992
# any non-word will have index -1
9093

9194
h_n = tf.cond(
92-
w >= 0,
93-
lambda: tf.nn.embedding_lookup(self.We, w),
94-
lambda: recursive_net_transform(hiddens, n)
95+
pred=w >= 0,
96+
true_fn=lambda: tf.nn.embedding_lookup(params=self.We, ids=w),
97+
false_fn=lambda: recursive_net_transform(hiddens, n)
9598
)
9699
hiddens = hiddens.write(n, h_n)
97100
n = tf.add(n, 1)
@@ -100,7 +103,7 @@ def recurrence(hiddens, n):
100103

101104
def condition(hiddens, n):
102105
# loop should continue while n < len(words)
103-
return tf.less(n, tf.shape(words)[0])
106+
return tf.less(n, tf.shape(input=words)[0])
104107

105108

106109
hiddens = tf.TensorArray(
@@ -112,44 +115,44 @@ def condition(hiddens, n):
112115
)
113116

114117
hiddens, _ = tf.while_loop(
115-
condition,
116-
recurrence,
117-
[hiddens, tf.constant(0)],
118+
cond=condition,
119+
body=recurrence,
120+
loop_vars=[hiddens, tf.constant(0)],
118121
parallel_iterations=1
119122
)
120123
h = hiddens.stack()
121124
logits = tf.matmul(h, self.Wo) + self.bo
122125

123-
prediction_op = tf.argmax(logits, axis=1)
126+
prediction_op = tf.argmax(input=logits, axis=1)
124127
self.prediction_op = prediction_op
125128

126129
rcost = reg*sum(tf.nn.l2_loss(p) for p in self.weights)
127130
if train_inner_nodes:
128131
# filter out -1s
129-
labeled_indices = tf.where(labels >= 0)
132+
labeled_indices = tf.compat.v1.where(labels >= 0)
130133

131134
cost_op = tf.reduce_mean(
132-
tf.nn.sparse_softmax_cross_entropy_with_logits(
135+
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
133136
logits=tf.gather(logits, labeled_indices),
134137
labels=tf.gather(labels, labeled_indices),
135138
)
136139
) + rcost
137140
else:
138141
cost_op = tf.reduce_mean(
139-
tf.nn.sparse_softmax_cross_entropy_with_logits(
142+
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
140143
logits=logits[-1],
141144
labels=labels[-1],
142145
)
143146
) + rcost
144147

145-
train_op = tf.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op)
148+
train_op = tf.compat.v1.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op)
146149
# train_op = tf.train.MomentumOptimizer(learning_rate=8e-3, momentum=0.9).minimize(cost_op)
147150

148151
# NOTE: If you're using GPU, InteractiveSession breaks
149152
# AdagradOptimizer and some other optimizers
150153
# change to tf.Session() if so.
151-
self.session = tf.Session()
152-
init_op = tf.global_variables_initializer()
154+
self.session = tf.compat.v1.Session()
155+
init_op = tf.compat.v1.global_variables_initializer()
153156
self.session.run(init_op)
154157

155158

nlp_class2/word2vec_tf.py

+16-13
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@
2525
import sys
2626
import string
2727

28+
if tf.__version__.startswith('2'):
29+
tf.compat.v1.disable_eager_execution()
30+
2831

2932

3033
# unfortunately these work different ways
@@ -131,36 +134,36 @@ def train_model(savedir):
131134

132135

133136
# create the model
134-
tf_input = tf.placeholder(tf.int32, shape=(None,))
135-
tf_negword = tf.placeholder(tf.int32, shape=(None,))
136-
tf_context = tf.placeholder(tf.int32, shape=(None,)) # targets (context)
137+
tf_input = tf.compat.v1.placeholder(tf.int32, shape=(None,))
138+
tf_negword = tf.compat.v1.placeholder(tf.int32, shape=(None,))
139+
tf_context = tf.compat.v1.placeholder(tf.int32, shape=(None,)) # targets (context)
137140
tfW = tf.Variable(W)
138141
tfV = tf.Variable(V.T)
139142
# biases = tf.Variable(np.zeros(vocab_size, dtype=np.float32))
140143

141144
def dot(A, B):
142145
C = A * B
143-
return tf.reduce_sum(C, axis=1)
146+
return tf.reduce_sum(input_tensor=C, axis=1)
144147

145148
# correct middle word output
146-
emb_input = tf.nn.embedding_lookup(tfW, tf_input) # 1 x D
147-
emb_output = tf.nn.embedding_lookup(tfV, tf_context) # N x D
149+
emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_input) # 1 x D
150+
emb_output = tf.nn.embedding_lookup(params=tfV, ids=tf_context) # N x D
148151
correct_output = dot(emb_input, emb_output) # N
149152
# emb_input = tf.transpose(emb_input, (1, 0))
150153
# correct_output = tf.matmul(emb_output, emb_input)
151154
pos_loss = tf.nn.sigmoid_cross_entropy_with_logits(
152-
labels=tf.ones(tf.shape(correct_output)), logits=correct_output)
155+
labels=tf.ones(tf.shape(input=correct_output)), logits=correct_output)
153156

154157
# incorrect middle word output
155-
emb_input = tf.nn.embedding_lookup(tfW, tf_negword)
158+
emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_negword)
156159
incorrect_output = dot(emb_input, emb_output)
157160
# emb_input = tf.transpose(emb_input, (1, 0))
158161
# incorrect_output = tf.matmul(emb_output, emb_input)
159162
neg_loss = tf.nn.sigmoid_cross_entropy_with_logits(
160-
labels=tf.zeros(tf.shape(incorrect_output)), logits=incorrect_output)
163+
labels=tf.zeros(tf.shape(input=incorrect_output)), logits=incorrect_output)
161164

162165
# total loss
163-
loss = tf.reduce_mean(pos_loss) + tf.reduce_mean(neg_loss)
166+
loss = tf.reduce_mean(input_tensor=pos_loss) + tf.reduce_mean(input_tensor=neg_loss)
164167

165168
# output = hidden.dot(tfV)
166169

@@ -179,12 +182,12 @@ def dot(A, B):
179182

180183
# optimizer
181184
# train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
182-
train_op = tf.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss)
185+
train_op = tf.compat.v1.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss)
183186
# train_op = tf.train.AdamOptimizer(1e-2).minimize(loss)
184187

185188
# make session
186-
session = tf.Session()
187-
init_op = tf.global_variables_initializer()
189+
session = tf.compat.v1.Session()
190+
init_op = tf.compat.v1.global_variables_initializer()
188191
session.run(init_op)
189192

190193

recommenders/rbm_tf_k.py

+17-14
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@
1414
from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz
1515
from datetime import datetime
1616

17+
if tf.__version__.startswith('2'):
18+
tf.compat.v1.disable_eager_execution()
19+
1720

1821
# is it possible to one-hot encode the data prior to feeding it
1922
# into the neural network, so that we don't have to do it on the fly?
@@ -84,13 +87,13 @@ def __init__(self, D, M, K):
8487

8588
def build(self, D, M, K):
8689
# params
87-
self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M))
90+
self.W = tf.Variable(tf.random.normal(shape=(D, K, M)) * np.sqrt(2.0 / M))
8891
self.c = tf.Variable(np.zeros(M).astype(np.float32))
8992
self.b = tf.Variable(np.zeros((D, K)).astype(np.float32))
9093

9194
# data
92-
self.X_in = tf.placeholder(tf.float32, shape=(None, D, K))
93-
self.mask = tf.placeholder(tf.float32, shape=(None, D, K))
95+
self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K))
96+
self.mask = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K))
9497

9598
# conditional probabilities
9699
# NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2
@@ -99,39 +102,39 @@ def build(self, D, M, K):
99102
self.p_h_given_v = p_h_given_v # save for later
100103

101104
# draw a sample from p(h | v)
102-
r = tf.random_uniform(shape=tf.shape(p_h_given_v))
103-
H = tf.to_float(r < p_h_given_v)
105+
r = tf.random.uniform(shape=tf.shape(input=p_h_given_v))
106+
H = tf.cast(r < p_h_given_v, dtype=tf.float32)
104107

105108
# draw a sample from p(v | h)
106109
# note: we don't have to actually do the softmax
107110
logits = dot2(H, self.W) + self.b
108-
cdist = tf.distributions.Categorical(logits=logits)
111+
cdist = tf.compat.v1.distributions.Categorical(logits=logits)
109112
X_sample = cdist.sample() # shape is (N, D)
110113
X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K)
111114
X_sample = X_sample * self.mask # missing ratings shouldn't contribute to objective
112115

113116

114117
# build the objective
115-
objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample))
116-
self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective)
118+
objective = tf.reduce_mean(input_tensor=self.free_energy(self.X_in)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample))
119+
self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective)
117120
# self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective)
118121

119122
# build the cost
120123
# we won't use this to optimize the model parameters
121124
# just to observe what happens during training
122125
logits = self.forward_logits(self.X_in)
123126
self.cost = tf.reduce_mean(
124-
tf.nn.softmax_cross_entropy_with_logits(
125-
labels=self.X_in,
127+
input_tensor=tf.nn.softmax_cross_entropy_with_logits(
128+
labels=tf.stop_gradient(self.X_in),
126129
logits=logits,
127130
)
128131
)
129132

130133
# to get the output
131134
self.output_visible = self.forward_output(self.X_in)
132135

133-
initop = tf.global_variables_initializer()
134-
self.session = tf.Session()
136+
initop = tf.compat.v1.global_variables_initializer()
137+
self.session = tf.compat.v1.Session()
135138
self.session.run(initop)
136139

137140
def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True):
@@ -202,10 +205,10 @@ def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True
202205
plt.show()
203206

204207
def free_energy(self, V):
205-
first_term = -tf.reduce_sum(dot1(V, self.b))
208+
first_term = -tf.reduce_sum(input_tensor=dot1(V, self.b))
206209
second_term = -tf.reduce_sum(
207210
# tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)),
208-
tf.nn.softplus(dot1(V, self.W) + self.c),
211+
input_tensor=tf.nn.softplus(dot1(V, self.W) + self.c),
209212
axis=1
210213
)
211214
return first_term + second_term

0 commit comments

Comments
 (0)