Skip to content
This repository has been archived by the owner on Nov 12, 2021. It is now read-only.

Commit

Permalink
Merge branch 'exp2'
Browse files Browse the repository at this point in the history
  • Loading branch information
baudm committed May 28, 2018
2 parents 87229ab + d994f9b commit 40c6889
Show file tree
Hide file tree
Showing 8 changed files with 705 additions and 177 deletions.
177 changes: 66 additions & 111 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,146 +1,101 @@
#!/usr/bin/env python3

import sys
import pickle

import matplotlib.pyplot as plt
import numpy as np

from keras import Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
from keras.optimizers import RMSprop

from vaegan.models import create_models

import cv2


def main():
encoder, decoder, discriminator, vae, vae_loss = create_models()
#
# encoder.compile('rmsprop', 'mse')
#
# x = np.random.uniform(-1.0, 1.0, size=[1, 64, 64, 1])
# y1 = np.random.uniform(-1.0, 1.0, size=[1, 128])
# y2 = np.random.uniform(-1.0, 1.0, size=[1, 128])
#
# encoder.fit(x, [y1, y2], callbacks=[TensorBoard()])
#
# return



batch_size = 32
from vaegan.models import create_models, build_graph
from vaegan.training import fit_models
from vaegan.data import celeba_loader, encoder_loader, decoder_loader, discriminator_loader, NUM_SAMPLES, mnist_loader
from vaegan.callbacks import DecoderSnapshot, ModelsCheckpoint

(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Resize to 64x64
x_train_new = np.zeros((x_train.shape[0], 64, 64), dtype='int32')
for i, img in enumerate(x_train):
x_train_new[i] = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)
def set_trainable(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable


x_train = x_train_new
del x_train_new

# Normalize to [-1, 1]
#x_train = np.pad(x_train, ((0, 0), (18, 18), (18, 18)), mode='constant', constant_values=0)
x_train = np.expand_dims(x_train, -1)
x_train = (x_train.astype('float32') - 127.5) / 127.5
x_train = np.clip(x_train, -1., 1.)


# Assume images in x_train
# x_train = np.zeros((100, 64, 64, 3))

discriminator.compile('rmsprop', 'binary_crossentropy', ['accuracy'])
discriminator.trainable = False

model = Model(vae.inputs, discriminator(vae.outputs), name='vaegan')
model.add_loss(vae_loss)
model.compile('rmsprop', 'binary_crossentropy', ['accuracy'])

import keras.callbacks as cbks
import os.path

verbose = True
checkpoint = cbks.ModelCheckpoint(os.path.join('.', 'model.{epoch:02d}.h5'), save_weights_only=True)
def main():
encoder, decoder, discriminator = create_models()
encoder_train, decoder_train, discriminator_train, vae, vaegan = build_graph(encoder, decoder, discriminator)

callbacks = [TensorBoard(batch_size=batch_size), checkpoint]
try:
initial_epoch = int(sys.argv[1])
except (IndexError, ValueError):
initial_epoch = 0

epochs = 100
steps_per_epoch = x_train.shape[0] // batch_size
do_validation = False
epoch_format = '.{epoch:03d}.h5'

callback_metrics = ['disc_loss', 'disc_accuracy', 'vaegan_loss', 'vaegan_accuracy']
if initial_epoch != 0:
suffix = epoch_format.format(epoch=initial_epoch)
encoder.load_weights('encoder' + suffix)
decoder.load_weights('decoder' + suffix)
discriminator.load_weights('discriminator' + suffix)

model.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
batch_size = 64
rmsprop = RMSprop(lr=0.0003)

# it's possible to callback a different model than self:
if hasattr(model, 'callback_model') and model.callback_model:
callback_model = model.callback_model
else:
callback_model = model
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
set_trainable(encoder, False)
set_trainable(decoder, False)
discriminator_train.compile(rmsprop, ['binary_crossentropy'] * 3, ['acc'] * 3)
discriminator_train.summary()

epoch_logs = {}
set_trainable(discriminator, False)
set_trainable(decoder, True)
decoder_train.compile(rmsprop, ['binary_crossentropy'] * 2, ['acc'] * 2)
decoder_train.summary()

for epoch in range(epochs):
set_trainable(decoder, False)
set_trainable(encoder, True)
encoder_train.compile(rmsprop)
encoder_train.summary()

callbacks.on_epoch_begin(epoch)
set_trainable(vaegan, True)

for batch_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
checkpoint = ModelsCheckpoint(epoch_format, encoder, decoder, discriminator)
decoder_sampler = DecoderSnapshot()

callbacks = [checkpoint, decoder_sampler, TensorBoard()]

rand_indexes = np.random.randint(0, x_train.shape[0], size=batch_size)
real_images = x_train[rand_indexes]
epochs = 250

fake_images = vae.predict(real_images)
# print(fake_images.shape)
half_batch = batch_size // 2
inputs = np.concatenate([real_images[:half_batch], fake_images[:half_batch]])
steps_per_epoch = NUM_SAMPLES // batch_size

# Label real and fake images
y = np.ones([batch_size, 1], dtype='float32')
y[half_batch:, :] = 0
seed = np.random.randint(2**32 - 1)

# Train the Discriminator network
metrics = discriminator.train_on_batch(inputs, y)
# print('discriminator', metrics)
img_loader = celeba_loader(batch_size, num_child=3, seed=seed)
dis_loader = discriminator_loader(img_loader, seed=seed)
dec_loader = decoder_loader(img_loader, seed=seed)
enc_loader = encoder_loader(img_loader)

y = np.ones([batch_size, 1], dtype='float32')
vg_metrics = model.train_on_batch(fake_images, y)
# print('full', metrics)
models = [discriminator_train, decoder_train, encoder_train]
generators = [dis_loader, dec_loader, enc_loader]
metrics = [{'di_l': 1, 'di_l_t': 2, 'di_l_p': 3, 'di_a': 4, 'di_a_t': 7, 'di_a_p': 10}, {'de_l_t': 1, 'de_l_p': 2, 'de_a_t': 3, 'de_a_p': 5}, {'en_l': 0}]

batch_logs['disc_loss'] = metrics[0]
batch_logs['disc_accuracy'] = metrics[1]
batch_logs['vaegan_loss'] = vg_metrics[0]
batch_logs['vaegan_accuracy'] = vg_metrics[1]
histories = fit_models(vaegan, models, generators, metrics, batch_size,
steps_per_epoch=steps_per_epoch, callbacks=callbacks,
epochs=epochs, initial_epoch=initial_epoch)

callbacks.on_batch_end(batch_index, batch_logs)
with open('histories.pickle', 'wb') as f:
pickle.dump(histories, f)

callbacks.on_epoch_end(epoch, epoch_logs)
x = next(celeba_loader(1))

rand_indexes = np.random.randint(0, x_train.shape[0], size=1)
real_images = x_train[rand_indexes]
x_tilde = vae.predict(x)

model.save_weights('trained.h5')
plt.subplot(211)
plt.imshow((x[0].squeeze() + 1.) / 2.)

a = encoder.predict(real_images)
print(a)
plt.subplot(212)
plt.imshow((x_tilde[0].squeeze() + 1.) / 2.)

plt.show()


if __name__ == '__main__':
Expand Down
95 changes: 95 additions & 0 deletions train_adagrad.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
#!/usr/bin/env python3

import os
import sys

import matplotlib.pyplot as plt
import numpy as np

from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.optimizers import Adagrad

from vaegan.models import create_models, build_graph
from vaegan.training import fit_models
from vaegan.data import celeba_loader, encoder_loader, decoder_loader, discriminator_loader, NUM_SAMPLES, mnist_loader
from vaegan.callbacks import DecoderSnapshot


def set_trainable(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable


def main():
encoder, decoder, discriminator = create_models()
encoder_train, decoder_train, discriminator_train, vae, vaegan = build_graph(encoder, decoder, discriminator)

if len(sys.argv) == 3:
vaegan.load_weights(sys.argv[1])
initial_epoch = int(sys.argv[2])
else:
initial_epoch = 0

batch_size = 64

opt = Adagrad(lr=0.01, epsilon=None, decay=0.0)

set_trainable(encoder, False)
set_trainable(decoder, False)
discriminator_train.compile(opt, ['binary_crossentropy'] * 3, ['acc'] * 3)
discriminator_train.summary()

set_trainable(discriminator, False)
set_trainable(decoder, True)
decoder_train.compile(opt, ['binary_crossentropy'] * 2, ['acc'] * 2)
decoder_train.summary()

set_trainable(decoder, False)
set_trainable(encoder, True)
encoder_train.compile(opt)
encoder_train.summary()

set_trainable(vaegan, True)

checkpoint = ModelCheckpoint(os.path.join('.', 'model.{epoch:02d}.h5'), save_weights_only=True)
decoder_sampler = DecoderSnapshot()

callbacks = [checkpoint, decoder_sampler, TensorBoard()]

epochs = 100

steps_per_epoch = NUM_SAMPLES // batch_size

seed = np.random.randint(2**32 - 1)

img_loader = celeba_loader(batch_size, num_child=3, seed=seed)
dis_loader = discriminator_loader(img_loader, seed=seed)
dec_loader = decoder_loader(img_loader, seed=seed)
enc_loader = encoder_loader(img_loader)

models = [discriminator_train, decoder_train, encoder_train]
generators = [dis_loader, dec_loader, enc_loader]
metrics = [{'di_l': 1, 'di_l_t': 2, 'di_l_p': 3, 'di_a': 4, 'di_a_t': 7, 'di_a_p': 10}, {'de_l_t': 1, 'de_l_p': 2, 'de_a_t': 3, 'de_a_p': 5}, {'en_l': 0}]

histories = fit_models(vaegan, models, generators, metrics, batch_size,
steps_per_epoch=steps_per_epoch, callbacks=callbacks,
epochs=epochs, initial_epoch=initial_epoch)

vaegan.save_weights('trained.h5')

x = next(celeba_loader(1))

x_tilde = vae.predict(x)

plt.subplot(211)
plt.imshow((x[0].squeeze() + 1.) / 2.)

plt.subplot(212)
plt.imshow((x_tilde[0].squeeze() + 1.) / 2.)

plt.show()


if __name__ == '__main__':
main()
Loading

0 comments on commit 40c6889

Please sign in to comment.