Skip to content

Commit 16faec6

Browse files
test single autoencoder
1 parent 65ef351 commit 16faec6

File tree

1 file changed

+46
-5
lines changed

1 file changed

+46
-5
lines changed

unsupervised_class2/autoencoder_tf.py

+46-5
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python
22
# https://www.udemy.com/unsupervised-deep-learning-in-python
33
from __future__ import print_function, division
4-
from builtins import range
4+
from builtins import range, input
55
# Note: you may need to update your version of future
66
# sudo pip install -U future
77

@@ -45,7 +45,7 @@ def build(self, D, M):
4545
self.train_op = tf.train.AdamOptimizer(1e-1).minimize(self.cost)
4646
# self.train_op = tf.train.MomentumOptimizer(10e-4, momentum=0.9).minimize(self.cost)
4747

48-
def fit(self, X, learning_rate=0.5, mu=0.99, epochs=1, batch_sz=100, show_fig=False):
48+
def fit(self, X, epochs=1, batch_sz=100, show_fig=False):
4949
N, D = X.shape
5050
n_batches = N // batch_sz
5151

@@ -70,6 +70,12 @@ def transform(self, X):
7070
# which deal with tensorflow variables
7171
return self.session.run(self.Z, feed_dict={self.X_in: X})
7272

73+
def predict(self, X):
74+
# accepts and returns a real numpy array
75+
# unlike forward_hidden and forward_output
76+
# which deal with tensorflow variables
77+
return self.session.run(self.X_hat, feed_dict={self.X_in: X})
78+
7379
def forward_hidden(self, X):
7480
Z = tf.nn.sigmoid(tf.matmul(X, self.W) + self.bh)
7581
return Z
@@ -118,7 +124,7 @@ def build_final_layer(self, D, M, K):
118124
self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost)
119125
self.prediction = tf.argmax(logits, 1)
120126

121-
def fit(self, X, Y, Xtest, Ytest, pretrain=True, learning_rate=0.01, mu=0.99, reg=0.1, epochs=1, batch_sz=100):
127+
def fit(self, X, Y, Xtest, Ytest, pretrain=True, epochs=1, batch_sz=100):
122128
N = len(X)
123129

124130
# greedy layer-wise training of autoencoders
@@ -168,7 +174,7 @@ def forward(self, X):
168174
return logits
169175

170176

171-
def main():
177+
def test_pretraining_dnn():
172178
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
173179
# dnn = DNN([1000, 750, 500])
174180
# dnn.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=3)
@@ -185,5 +191,40 @@ def main():
185191
dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10)
186192

187193

194+
def test_single_autoencoder():
195+
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
196+
Xtrain = Xtrain.astype(np.float32)
197+
Xtest = Xtest.astype(np.float32)
198+
199+
_, D = Xtrain.shape
200+
autoencoder = AutoEncoder(D, 300, 0)
201+
init_op = tf.global_variables_initializer()
202+
with tf.Session() as session:
203+
session.run(init_op)
204+
autoencoder.set_session(session)
205+
autoencoder.fit(Xtrain, show_fig=True)
206+
207+
done = False
208+
while not done:
209+
i = np.random.choice(len(Xtest))
210+
x = Xtest[i]
211+
y = autoencoder.predict([x])
212+
plt.subplot(1,2,1)
213+
plt.imshow(x.reshape(28,28), cmap='gray')
214+
plt.title('Original')
215+
216+
plt.subplot(1,2,2)
217+
plt.imshow(y.reshape(28,28), cmap='gray')
218+
plt.title('Reconstructed')
219+
220+
plt.show()
221+
222+
ans = input("Generate another?")
223+
if ans and ans[0] in ('n' or 'N'):
224+
done = True
225+
226+
227+
188228
if __name__ == '__main__':
189-
main()
229+
# test_single_autoencoder()
230+
test_pretraining_dnn()

0 commit comments

Comments
 (0)