forked from tensorflow/models
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
574c981
commit 70702f7
Showing
12 changed files
with
480 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
autoencoder/MNIST_data/* | ||
*.pyc |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Very simple implementations of some autoencoder variations |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
import numpy as np | ||
|
||
import sklearn.preprocessing as prep | ||
import tensorflow as tf | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
from autoencoder.autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder | ||
|
||
mnist = input_data.read_data_sets('MNIST_data', one_hot = True) | ||
|
||
def standard_scale(X_train, X_test): | ||
preprocessor = prep.StandardScaler().fit(X_train) | ||
X_train = preprocessor.transform(X_train) | ||
X_test = preprocessor.transform(X_test) | ||
return X_train, X_test | ||
|
||
def get_random_block_from_data(data, batch_size): | ||
start_index = np.random.randint(0, len(data) - batch_size) | ||
return data[start_index:(start_index + batch_size)] | ||
|
||
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) | ||
|
||
n_samples = int(mnist.train.num_examples) | ||
training_epochs = 20 | ||
batch_size = 128 | ||
display_step = 1 | ||
|
||
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784, | ||
n_hidden = 200, | ||
transfer_function = tf.nn.softplus, | ||
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), | ||
scale = 0.01) | ||
|
||
for epoch in range(training_epochs): | ||
avg_cost = 0. | ||
total_batch = int(n_samples / batch_size) | ||
# Loop over all batches | ||
for i in range(total_batch): | ||
batch_xs = get_random_block_from_data(X_train, batch_size) | ||
|
||
# Fit training using batch data | ||
cost = autoencoder.partial_fit(batch_xs) | ||
# Compute average loss | ||
avg_cost += cost / n_samples * batch_size | ||
|
||
# Display logs per epoch step | ||
if epoch % display_step == 0: | ||
print "Epoch:", '%04d' % (epoch + 1), \ | ||
"cost=", "{:.9f}".format(avg_cost) | ||
|
||
print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
import numpy as np | ||
|
||
import sklearn.preprocessing as prep | ||
import tensorflow as tf | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
from autoencoder.autoencoder_models.Autoencoder import Autoencoder | ||
|
||
mnist = input_data.read_data_sets('MNIST_data', one_hot = True) | ||
|
||
def standard_scale(X_train, X_test): | ||
preprocessor = prep.StandardScaler().fit(X_train) | ||
X_train = preprocessor.transform(X_train) | ||
X_test = preprocessor.transform(X_test) | ||
return X_train, X_test | ||
|
||
def get_random_block_from_data(data, batch_size): | ||
start_index = np.random.randint(0, len(data) - batch_size) | ||
return data[start_index:(start_index + batch_size)] | ||
|
||
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) | ||
|
||
n_samples = int(mnist.train.num_examples) | ||
training_epochs = 20 | ||
batch_size = 128 | ||
display_step = 1 | ||
|
||
autoencoder = Autoencoder(n_input = 784, | ||
n_hidden = 200, | ||
transfer_function = tf.nn.softplus, | ||
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)) | ||
|
||
for epoch in range(training_epochs): | ||
avg_cost = 0. | ||
total_batch = int(n_samples / batch_size) | ||
# Loop over all batches | ||
for i in range(total_batch): | ||
batch_xs = get_random_block_from_data(X_train, batch_size) | ||
|
||
# Fit training using batch data | ||
cost = autoencoder.partial_fit(batch_xs) | ||
# Compute average loss | ||
avg_cost += cost / n_samples * batch_size | ||
|
||
# Display logs per epoch step | ||
if epoch % display_step == 0: | ||
print "Epoch:", '%04d' % (epoch + 1), \ | ||
"cost=", "{:.9f}".format(avg_cost) | ||
|
||
print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,49 @@ | ||
import numpy as np | ||
|
||
import sklearn.preprocessing as prep | ||
import tensorflow as tf | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder | ||
|
||
mnist = input_data.read_data_sets('MNIST_data', one_hot = True) | ||
|
||
def standard_scale(X_train, X_test): | ||
preprocessor = prep.StandardScaler().fit(X_train) | ||
X_train = preprocessor.transform(X_train) | ||
X_test = preprocessor.transform(X_test) | ||
return X_train, X_test | ||
|
||
def get_random_block_from_data(data, batch_size): | ||
start_index = np.random.randint(0, len(data) - batch_size) | ||
return data[start_index:(start_index + batch_size)] | ||
|
||
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) | ||
|
||
|
||
n_samples = int(mnist.train.num_examples) | ||
training_epochs = 100 | ||
batch_size = 128 | ||
display_step = 1 | ||
|
||
autoencoder = MaskingNoiseAutoencoder(n_input = 784, | ||
n_hidden = 200, | ||
transfer_function = tf.nn.softplus, | ||
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), | ||
dropout_probability = 0.95) | ||
|
||
for epoch in range(training_epochs): | ||
avg_cost = 0. | ||
total_batch = int(n_samples / batch_size) | ||
for i in range(total_batch): | ||
batch_xs = get_random_block_from_data(X_train, batch_size) | ||
|
||
cost = autoencoder.partial_fit(batch_xs) | ||
|
||
avg_cost += cost / n_samples * batch_size | ||
|
||
if epoch % display_step == 0: | ||
print "Epoch:", '%04d' % (epoch + 1), \ | ||
"cost=", "{:.9f}".format(avg_cost) | ||
|
||
print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
import numpy as np | ||
import tensorflow as tf | ||
|
||
def xavier_init(fan_in, fan_out, constant = 1): | ||
low = -constant * np.sqrt(6.0 / (fan_in + fan_out)) | ||
high = constant * np.sqrt(6.0 / (fan_in + fan_out)) | ||
return tf.random_uniform((fan_in, fan_out), | ||
minval = low, maxval = high, | ||
dtype = tf.float32) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
import numpy as np | ||
|
||
import sklearn.preprocessing as prep | ||
import tensorflow as tf | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
from autoencoder.autoencoder_models.VariationalAutoencoder import VariationalAutoencoder | ||
|
||
mnist = input_data.read_data_sets('MNIST_data', one_hot = True) | ||
|
||
|
||
def standard_scale(X_train, X_test): | ||
preprocessor = prep.StandardScaler().fit(X_train) | ||
X_train = preprocessor.transform(X_train) | ||
X_test = preprocessor.transform(X_test) | ||
return X_train, X_test | ||
|
||
|
||
def get_random_block_from_data(data, batch_size): | ||
start_index = np.random.randint(0, len(data) - batch_size) | ||
return data[start_index:(start_index + batch_size)] | ||
|
||
|
||
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) | ||
|
||
n_samples = int(mnist.train.num_examples) | ||
training_epochs = 20 | ||
batch_size = 128 | ||
display_step = 1 | ||
|
||
autoencoder = VariationalAutoencoder(n_input = 784, | ||
n_hidden = 200, | ||
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), | ||
gaussian_sample_size = 128) | ||
|
||
for epoch in range(training_epochs): | ||
avg_cost = 0. | ||
total_batch = int(n_samples / batch_size) | ||
# Loop over all batches | ||
for i in range(total_batch): | ||
batch_xs = get_random_block_from_data(X_train, batch_size) | ||
|
||
# Fit training using batch data | ||
cost = autoencoder.partial_fit(batch_xs) | ||
# Compute average loss | ||
avg_cost += cost / n_samples * batch_size | ||
|
||
# Display logs per epoch step | ||
if epoch % display_step == 0: | ||
print "Epoch:", '%04d' % (epoch + 1), \ | ||
"cost=", "{:.9f}".format(avg_cost) | ||
|
||
print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import tensorflow as tf | ||
import numpy as np | ||
import autoencoder.Utils | ||
|
||
class Autoencoder(object): | ||
|
||
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()): | ||
self.n_input = n_input | ||
self.n_hidden = n_hidden | ||
self.transfer = transfer_function | ||
|
||
network_weights = self._initialize_weights() | ||
self.weights = network_weights | ||
|
||
# model | ||
self.x = tf.placeholder(tf.float32, [None, self.n_input]) | ||
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])) | ||
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) | ||
|
||
# cost | ||
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0)) | ||
self.optimizer = optimizer.minimize(self.cost) | ||
|
||
init = tf.initialize_all_variables() | ||
self.sess = tf.Session() | ||
self.sess.run(init) | ||
|
||
|
||
def _initialize_weights(self): | ||
all_weights = dict() | ||
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden)) | ||
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) | ||
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) | ||
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) | ||
return all_weights | ||
|
||
def partial_fit(self, X): | ||
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) | ||
return cost | ||
|
||
def calc_total_cost(self, X): | ||
return self.sess.run(self.cost, feed_dict = {self.x: X}) | ||
|
||
def transform(self, X): | ||
return self.sess.run(self.hidden, feed_dict={self.x: X}) | ||
|
||
def generate(self, hidden = None): | ||
if hidden is None: | ||
hidden = np.random.normal(size=self.weights["b1"]) | ||
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden}) | ||
|
||
def reconstruct(self, X): | ||
return self.sess.run(self.reconstruction, feed_dict={self.x: X}) | ||
|
||
def getWeights(self): | ||
return self.sess.run(self.weights['w1']) | ||
|
||
def getBiases(self): | ||
return self.sess.run(self.weights['b1']) | ||
|
Oops, something went wrong.