Skip to content

Autoencoder updates #44

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Autoencoder/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,15 @@

## Features

- `Autoencoder is densenet layered with batchnorm layers and multiple skip connections`

- `Select the structure for the DenseNet and see the performance of the model. `

- `Sample autoencoded MNIST Digits can be seen`

- `2D Visualize the encoded space of the autoencoder, see the decoded digit for the corresponding latent point`

- `Autoencode your digit drawing`
- `Autoencode your own digit drawing`

## Installation and execution

Expand Down
43 changes: 23 additions & 20 deletions Autoencoder/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -49,25 +49,28 @@
}
</style>
</head>

<body style="text-align:center;background-color:white;font-size:3vh;top:0%; font-family:Georgia;margin-top:0%;margin-left:0%;">
<p style="background-color:#111111E9;font-size:3vw;top:0%;font-family:Times;margin-top:0%;padding-bottom:1%;padding-top:1%;position:fixed;width:100%;color:#EEEEFF;">TensorFlow.js: MNIST Autoencoder</p>
<p style="background-color:#7777AA;font-size:3vw;top:0%;font-family:Times;margin-top:0%;padding-bottom:1%;padding-top:1%;visibility: hidden;width:100%;margin-bottom:0%;">TensorFlow.js: MNIST Autoencoder</p>
<div id="body" style="text-align:center;height:100%;width:55%;margin-left:22%;background-color:white;padding-left:1vw;padding-top:1vh;">
<br>


<section class='title-area' >
<p style="text-align:left;color:#111111;margin-top:0%;">Train a model to autoencode handwritten digits from the MNIST database using the tf.layers
api.
<br>
This examples lets you train a MNIST Autoencoder using a Fully Connected Neural Network (also known as a DenseNet).<br><br>
<p style="text-align:left;color:#111111;margin-top:0%;">
This examples lets you train a MNIST Autoencoder using a Fully Connected Neural Network (also known as a DenseNet) in written in Tfjs<br><br>
You can select the structure for the DenseNet and see the performance of the model.
<br>The MNIST dataset is used as training data.
<br>
<br>
Set latent space dimension to 2 for 2d Exploration of the latent space. Otherwise set it high for accurate autoencoding
<br>
Visualization scale determines the scale of 2d pane
</p>
</section>


<div style="width:100%;height:5px;background-color:#EFEFEF;"></div>
<section style="text-align:center;">
<p class='section-head' >
Expand All @@ -76,7 +79,7 @@ <h1 style="background-color:#EFEFFF;padding-top:0.5%;padding-bottom:1%;">Trainin
<div style="font-family:Times;width:20vw;background-color:#EFEFEF;text-align:left;padding-left:3%;padding-top:3%;padding-bottom:2%;display:inline-block;">
<div>
<label>N hidden layers in encoder and decoder</label>
<input id="n_layers" value="2">
<input id="n_layers" value="3">
</div>
<div>
<label>Output dimension of each layer</label>
Expand All @@ -88,29 +91,29 @@ <h1 style="background-color:#EFEFFF;padding-top:0.5%;padding-bottom:1%;">Trainin
</div>
<button id="Create">Create model</button>
</div>

<div style="font-family:Times;width:20vw;background-color:#EFEFEF;text-align:left;padding-left:3%;padding-top:1.3%;padding-bottom:2%;display:inline-block;">
<div>
<label># Batch size:</label>
<input id="batchsize" value="300">
</div>
<div>
<label># LearnRate:</label>
<input id="lr" value="0.3">
<input id="lr" value="0.1">
</div>
<div>
<label># Training epochs:</label>
<input id="train-epochs" value="1">
</div>
<div>
<label># Visualization scale</label>
<input id="vis" value="50">
<input id="vis" value="0.1">
</div>
<button id="train">Train Model</button>
</div>
</section>


<br>
<div style="width:100%;height:5px;background-color:#EFEFEF;"></div>
</div>
Expand All @@ -124,11 +127,11 @@ <h2>This will show the examples of autoencoder once it its trained</h2>
</div>
</div>
<br><br><br>


<div style="width:100%;background-color:white;height:15px;"></div>
<div style="text-align:center;">
<h2>This is for 2d plot visualization of latent space of autoencoder.<br> If your latent space dimension is set to 2D<br></h2>
<h2>This is for 2d plot visualization of latent space of autoencoder.<br> Drag in the 2d Pane below slowly<br></h2>
<div id="cn" style="display:none;margin-left:35%;text-align:center;">
<canvas id="mot" style="height:80px;width:80px;display:block;margin-left:20%;border:solid 3px black;"></canvas>
<br>
Expand All @@ -146,8 +149,8 @@ <h2>This is for autoencoding your drawing on the canvas<br></h2>
<button id="clear" style="display:inline;">Clear</button>
</div>
</div>


<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"></script>
<script src='https://cdnjs.cloudflare.com/ajax/libs/tensorflow/1.2.7/tf.min.js'></script>
Expand Down
76 changes: 47 additions & 29 deletions Autoencoder/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,52 +8,63 @@
// for arbitrary data though. It's worth a look :)
import {IMAGE_H, IMAGE_W, MnistData} from './datas.js';

// This is a helper class for drawing loss graphs and MNIST images to the
// window. For the purposes of understanding the machine learning bits, you can
// largely ignore it
import * as ui from './ui.js';


function createConvModel(n_layers,n_units,hidden) {

function createConvModel(n_layers,n_units,hidden) { //resnet-densenet-batchnorm
this.latent_dim = Number(hidden); //final dimension of hidden layer
this.n_layers = Number(n_layers); //how many hidden layers in encoder and decoder
this.n_units = Number(n_units); //output dimension of each layer
this.img_shape = [28,28];
this.img_units = this.img_shape[0] * this.img_shape[1];
// build the encoder

var i = tf.input({shape: this.img_shape});
var h = tf.layers.flatten().apply(i);

for (var j=0; j<this.n_layers; j++) {
h=tf.layers.batchNormalization(-1).apply(h);
h = tf.layers.dense({units: this.n_units, activation:'relu'}).apply(h);
for (var j=0; j<this.n_layers-1; j++) {
var tm=h;
const addLayer = tf.layers.add();
var h = tf.layers.dense({units: this.n_units, activation:'relu'}).apply(h); //n hidden
h=addLayer.apply([tm,h]);
h=tf.layers.batchNormalization(0).apply(h);
}

var o = tf.layers.dense({units: this.latent_dim}).apply(h); //1 final
var o = tf.layers.dense({units: this.latent_dim}).apply(h);
//1 final
this.encoder = tf.model({inputs: i, outputs: o});

// build the decoder
var i = h = tf.input({shape: this.latent_dim});
for (var j=0; j<this.n_layers; j++) { //n hidden
h = tf.layers.dense({units: this.n_units, activation:'relu'}).apply(h);
for (var j=0; j<this.n_layers-1; j++) {
var tm=h;
const addLayer = tf.layers.add(); //n hidden
var h = tf.layers.dense({units: this.n_units, activation:'relu'}).apply(h);
h=addLayer.apply([tm,h]);
}
var o = tf.layers.dense({units: this.img_units}).apply(h) ; //1 final

var o = tf.layers.dense({units: this.img_units}).apply(h); //1 final
var o = tf.layers.reshape({targetShape: this.img_shape}).apply(o);
this.decoder = tf.model({inputs: i, outputs: o});

// stack the autoencoder
var i = tf.input({shape: this.img_shape});
var z = this.encoder.apply(i); //z is hidden code

var o = this.decoder.apply(z);
this.auto = tf.model({inputs: i, outputs: o});

}


let epochs=0,trainEpochs,batch;
var trainData;
var testData;
var b;var model;



async function train(model) {

const e=document.getElementById('batchsize');
Expand Down Expand Up @@ -84,8 +95,6 @@ await showPredictions(model,epochs); //Triv

}



async function showPredictions(model,epochs) { //Trivial Samples of autoencoder
const testExamples = 10;
const examples = data.getTestData(testExamples);
Expand All @@ -106,14 +115,15 @@ async function run(){
testData = data.getTestData();
}

document.getElementById('vis').oninput=function(){vis=Number(document.getElementById('vis').value);console.log(vis);};

async function load() {
var ele=document.getElementById('barc');
ele.style.display="none";
const n_units=document.getElementById('n_units').value;
const n_layers=document.getElementById('n_layers').value;
const hidden=document.getElementById('hidden').value;
model = new createConvModel(n_layers,n_units,hidden);
model = new createConvModel(n_layers,n_units,hidden); //load model
const elem=document.getElementById('new')
elem.innerHTML="Model Created!!!"
epochs=0;
Expand All @@ -122,13 +132,15 @@ async function load() {

load();



async function runtrain(){
var ele=document.getElementById('barc');
ele.style.display="block";
var elem=document.getElementById('new');
elem.innerHTML="";
b=0;
await train(model);
await train(model); //start training
vis=Number(document.getElementById('vis').value);
}

Expand All @@ -151,7 +163,7 @@ function normaltensor(prediction){
prediction= prediction.sub(inputMin).div(inputMax.sub(inputMin));
return prediction;}
function normal(prediction){
const inputMax = prediction.max();
const inputMax = prediction.max(); //normailization
const inputMin = prediction.min();
prediction= prediction.sub(inputMin).div(inputMax.sub(inputMin));
return prediction;
Expand All @@ -163,22 +175,27 @@ const canvas=document.getElementById('celeba-scene');
const mot=document.getElementById('mot');
var cont=mot.getContext('2d');










function sample(obj) { //plotting
obj.x = (obj.x) * vis;
obj.y = (obj.y) * vis;
// convert 10, 50 into a vector
var y = tf.tensor2d([[obj.x, obj.y]]);
// sample from region 10, 50 in latent space

var prediction = model.decoder.predict(y).dataSync();

//scaling
//scaling
prediction=normaltensor(prediction);
prediction=prediction.reshape([28,28]);

prediction=prediction.mul(255).toInt();


prediction=prediction.mul(255).toInt(); //for2dplot
// log the prediction to the browser console
tf.browser.toPixels(prediction, canvas);
}
Expand All @@ -190,7 +207,7 @@ cont.fillRect(0,0,mot.width,mot.height);
mot.addEventListener('mousemove', function(e) {
mouse.x = (e.pageX - this.offsetLeft)*3.43;
mouse.y = (e.pageY - this.offsetTop)*1.9;
}, false);
}, false); //mouse movement for 2dplot

mot.addEventListener('mousedown', function(e) {
mot.addEventListener('mousemove', on, false);
Expand All @@ -209,11 +226,6 @@ var on= function() {
};







function plot2d(){
load();
const decision=Number(document.getElementById("hidden").value);
Expand Down Expand Up @@ -241,6 +253,12 @@ document.addEventListener('DOMContentLoaded',plot2d);









const canv=document.getElementById('canv');
const outcanv=document.getElementById('outcanv');
var ct = outcanv.getContext('2d');
Expand All @@ -250,7 +268,7 @@ var ctx = canv.getContext('2d');
function clear(){
ctx.clearRect(0, 0, canv.width, canv.height);
ctx.fillStyle = "black";
ctx.fillRect(0, 0, canv.width, canv.height);
ctx.fillRect(0, 0, canv.width, canv.height); //for canvas autoencoding
ct.clearRect(0, 0, outcanv.width, outcanv.height);
ct.fillStyle = "#DDDDDD";
ct.fillRect(0, 0, outcanv.width, outcanv.height);
Expand Down