Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
**/__pycache__/*
example/*.h5
5 changes: 5 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM tensorflow/tensorflow:2.3.0rc2-gpu-jupyter

RUN python -m pip install --upgrade pip
COPY requirements.txt .
RUN pip install -r requirements.txt
16 changes: 8 additions & 8 deletions parametric_tSNE/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
import numpy as np

import tensorflow as tf
from tensorflow.contrib.keras import models
from tensorflow.contrib.keras import layers
from tensorflow.keras import models
from tensorflow.keras import layers

from .utils import calc_betas_loop
from .utils import get_squared_cross_diff_np
Expand Down Expand Up @@ -147,8 +147,8 @@ def _get_normed_sym_tf(X_, batch_size):
symmetric probabilities, making the assumption that P(i|j) = P(j|i)
Diagonals are all 0s."""
toset = tf.constant(0, shape=[batch_size], dtype=X_.dtype)
X_ = tf.matrix_set_diag(X_, toset)
norm_facs = tf.reduce_sum(X_, axis=0, keep_dims=True)
X_ = tf.linalg.set_diag(X_, toset)
norm_facs = tf.reduce_sum(X_, axis=0, keepdims=True)
X_ = X_ / norm_facs
X_ = 0.5*(X_ + tf.transpose(X_))

Expand Down Expand Up @@ -216,12 +216,12 @@ def kl_loss(y_true, y_pred, alpha=1.0, batch_size=None, num_perplexities=None, _
#yrange = tf.range(zz*batch_size, (zz+1)*batch_size)
#cur_beta_P = tf.slice(P_, [zz*batch_size, [-1, batch_size])
#cur_beta_P = P_
kl_matr = tf.multiply(cur_beta_P, tf.log(cur_beta_P + _tf_eps) - tf.log(Q_ + _tf_eps), name='kl_matr')
kl_matr = tf.math.multiply(cur_beta_P, tf.math.log(cur_beta_P + _tf_eps) - tf.math.log(Q_ + _tf_eps), name='kl_matr')
toset = tf.constant(0, shape=[batch_size], dtype=kl_matr.dtype)
kl_matr_keep = tf.matrix_set_diag(kl_matr, toset)
kl_matr_keep = tf.linalg.set_diag(kl_matr, toset)
kl_total_cost_cur_beta = tf.reduce_sum(kl_matr_keep)
kls_per_beta.append(kl_total_cost_cur_beta)
kl_total_cost = tf.add_n(kls_per_beta)
kl_total_cost = tf.math.add_n(kls_per_beta)
#kl_total_cost = kl_total_cost_cur_beta

return kl_total_cost
Expand Down Expand Up @@ -272,7 +272,7 @@ def __init__(self, num_inputs, num_outputs, perplexities,
self.do_pretrain = do_pretrain
self._loss_func = None

tf.set_random_seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)

# If no layers provided, use the same architecture as van der maaten 2009 paper
Expand Down
10 changes: 3 additions & 7 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
numpy
# Tensorflow API is rapidly evolving (last time I checked)
# This is the version I designed against
tensorflow~=1.4.0
# For saving/loading trained model
h5py
# Optional, for example script
matplotlib~=2.0.0
seaborn~=0.8.0
scikit-learn~=0.19.1
matplotlib>=2.0.0
seaborn>=0.8.0
scikit-learn>=0.19.1