Skip to content

Commit

Permalink
replace all uses to reduce_log*exp
Browse files Browse the repository at this point in the history
  • Loading branch information
dustinvtran committed Mar 5, 2017
1 parent 3d5bb49 commit 9a6565c
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 101 deletions.
4 changes: 2 additions & 2 deletions edward/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@
RandomVariable
from edward.util import copy, dot, get_ancestors, get_children, \
get_descendants, get_dims, get_parents, get_session, get_siblings, \
get_variables, hessian, log_sum_exp, logit, multivariate_rbf, \
placeholder, random_variables, rbf, set_seed, to_simplex
get_variables, hessian, logit, multivariate_rbf, placeholder, \
random_variables, rbf, reduce_logmeanexp, set_seed, to_simplex
from edward.version import __version__
4 changes: 2 additions & 2 deletions edward/inferences/klpq.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from edward.inferences.variational_inference import VariationalInference
from edward.models import RandomVariable, Normal
from edward.util import copy, log_sum_exp
from edward.util import copy


class KLpq(VariationalInference):
Expand Down Expand Up @@ -134,7 +134,7 @@ def build_loss_and_gradients(self, var_list):
q_log_prob = tf.stack(q_log_prob)

log_w = p_log_prob - q_log_prob
log_w_norm = log_w - log_sum_exp(log_w)
log_w_norm = log_w - tf.reduce_logsumexp(log_w)
w_norm = tf.exp(log_w_norm)

if var_list is None:
Expand Down
80 changes: 29 additions & 51 deletions edward/util/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,57 +110,6 @@ def hessian(y, xs):
return tf.stack(mat)


def log_mean_exp(input_tensor, axis=None, keep_dims=False):
"""Computes log(mean(exp(elements across dimensions of a tensor))).
Parameters
----------
input_tensor : tf.Tensor
The tensor to reduce. Should have numeric type.
axis : int or list of int, optional
The dimensions to reduce. If `None` (the default), reduces all
dimensions.
keep_dims : bool, optional
If true, retains reduced dimensions with length 1.
Returns
-------
tf.Tensor
The reduced tensor.
"""
logsumexp = tf.reduce_logsumexp(input_tensor, axis, keep_dims)
input_tensor = tf.convert_to_tensor(input_tensor)
n = input_tensor.get_shape().as_list()
if axis is None:
n = tf.cast(tf.reduce_prod(n), logsumexp.dtype)
else:
n = tf.cast(tf.reduce_prod(n[axis]), logsumexp.dtype)

return -tf.log(n) + logsumexp


def log_sum_exp(input_tensor, axis=None, keep_dims=False, name=None):
"""Compute the ``log_sum_exp`` of elements in a tensor, taking
the sum across axes given by ``axis``.
Parameters
----------
input_tensor : tf.Tensor
The tensor to reduce. Should have numeric type.
axis : int or list of int, optional
The dimensions to reduce. If `None` (the default), reduces all
dimensions.
keep_dims : bool, optional
If true, retains reduced dimensions with length 1.
Returns
-------
tf.Tensor
The reduced tensor.
"""
return tf.reduce_logsumexp(input_tensor, axis, keep_dims, name)


def logit(x):
"""Evaluate :math:`\log(x / (1 - x))` elementwise.
Expand Down Expand Up @@ -290,6 +239,35 @@ def rbf(x, y=0.0, sigma=1.0, l=1.0):
tf.exp(-1.0 / (2.0 * tf.pow(l, 2.0)) * tf.pow(x - y, 2.0))


def reduce_logmeanexp(input_tensor, axis=None, keep_dims=False):
"""Computes log(mean(exp(elements across dimensions of a tensor))).
Parameters
----------
input_tensor : tf.Tensor
The tensor to reduce. Should have numeric type.
axis : int or list of int, optional
The dimensions to reduce. If `None` (the default), reduces all
dimensions.
keep_dims : bool, optional
If true, retains reduced dimensions with length 1.
Returns
-------
tf.Tensor
The reduced tensor.
"""
logsumexp = tf.reduce_logsumexp(input_tensor, axis, keep_dims)
input_tensor = tf.convert_to_tensor(input_tensor)
n = input_tensor.get_shape().as_list()
if axis is None:
n = tf.cast(tf.reduce_prod(n), logsumexp.dtype)
else:
n = tf.cast(tf.reduce_prod(n[axis]), logsumexp.dtype)

return -tf.log(n) + logsumexp


def to_simplex(x):
"""Transform real vector of length ``(K-1)`` to a simplex of dimension ``K``
using a backward stick breaking construction.
Expand Down
4 changes: 2 additions & 2 deletions examples/iwvi.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from edward.inferences import VariationalInference
from edward.models import Bernoulli, Normal, RandomVariable
from edward.util import copy, log_mean_exp
from edward.util import copy, reduce_logmeanexp
from scipy.special import expit


Expand Down Expand Up @@ -78,7 +78,7 @@ def build_loss_and_gradients(self, var_list):

log_w += [p_log_prob - q_log_prob]

loss = -log_mean_exp(log_w)
loss = -reduce_logmeanexp(log_w)
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
Expand Down
6 changes: 3 additions & 3 deletions examples/tf_mixture_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from edward.models import Dirichlet, Normal, InverseGamma
from edward.stats import dirichlet, invgamma, multivariate_normal_diag, norm
from edward.util import get_dims, log_sum_exp
from edward.util import get_dims

plt.style.use('ggplot')

Expand Down Expand Up @@ -68,9 +68,9 @@ def log_prob(self, xs, zs):
sigmas[(k * self.D):((k + 1) * self.D)])]

matrix = tf.stack(matrix)
# log_sum_exp() along the rows is a vector, whose nth
# log sum exp along the rows is a vector, whose nth
# element is the log-likelihood of data point x_n.
vector = log_sum_exp(matrix, 0)
vector = tf.reduce_logsumexp(matrix, 0)
# Sum over data points to get the full log-likelihood.
log_lik = tf.reduce_sum(vector)

Expand Down
6 changes: 3 additions & 3 deletions examples/tf_mixture_gaussian_laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

from edward.models import PointMass
from edward.stats import dirichlet, invgamma, multivariate_normal_diag, norm
from edward.util import get_dims, log_sum_exp
from edward.util import get_dims


class MixtureGaussian:
Expand Down Expand Up @@ -64,9 +64,9 @@ def log_prob(self, xs, zs):
sigmas[(k * self.D):((k + 1) * self.D)])]

matrix = tf.stack(matrix)
# log_sum_exp() along the rows is a vector, whose nth
# log sum exp along the rows is a vector, whose nth
# element is the log-likelihood of data point x_n.
vector = log_sum_exp(matrix, 0)
vector = tf.reduce_logsumexp(matrix, 0)
# Sum over data points to get the full log-likelihood.
log_lik = tf.reduce_sum(vector)

Expand Down
6 changes: 3 additions & 3 deletions examples/tf_mixture_gaussian_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

from edward.models import PointMass
from edward.stats import dirichlet, invgamma, multivariate_normal_diag, norm
from edward.util import get_dims, log_sum_exp
from edward.util import get_dims


class MixtureGaussian:
Expand Down Expand Up @@ -64,9 +64,9 @@ def log_prob(self, xs, zs):
sigmas[(k * self.D):((k + 1) * self.D)])]

matrix = tf.stack(matrix)
# log_sum_exp() along the rows is a vector, whose nth
# log sum exp along the rows is a vector, whose nth
# element is the log-likelihood of data point x_n.
vector = log_sum_exp(matrix, 0)
vector = tf.reduce_logsumexp(matrix, 0)
# Sum over data points to get the full log-likelihood.
log_lik = tf.reduce_sum(vector)

Expand Down
35 changes: 0 additions & 35 deletions tests/test-util/test_log_sum_exp.py

This file was deleted.

0 comments on commit 9a6565c

Please sign in to comment.