From cedf2edd5a7e3ed7adade93741910fe896ce0160 Mon Sep 17 00:00:00 2001 From: Dustin Tran Date: Sat, 14 May 2016 00:05:56 -0400 Subject: [PATCH] vectorize calls to log densities in examples; #73 --- examples/beta_bernoulli_map.py | 2 +- examples/beta_bernoulli_tf.py | 2 +- examples/convolutional_vae.py | 2 +- examples/mixture_gaussian.py | 44 ++++++++++++--------------- examples/mixture_gaussian_map.py | 52 +++++++++++++++----------------- examples/normal.py | 3 +- examples/normal_idiomatic_tf.py | 3 +- examples/normal_map.py | 3 +- examples/normal_two.py | 3 +- 9 files changed, 51 insertions(+), 63 deletions(-) diff --git a/examples/beta_bernoulli_map.py b/examples/beta_bernoulli_map.py index a0e1195d8..48000a4ac 100644 --- a/examples/beta_bernoulli_map.py +++ b/examples/beta_bernoulli_map.py @@ -21,7 +21,7 @@ def __init__(self): def log_prob(self, xs, zs): log_prior = beta.logpdf(zs, a=1.0, b=1.0) - log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) \ + log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) for z in tf.unpack(zs)]) return log_lik + log_prior diff --git a/examples/beta_bernoulli_tf.py b/examples/beta_bernoulli_tf.py index ddf9eaef3..0ab69ffbc 100644 --- a/examples/beta_bernoulli_tf.py +++ b/examples/beta_bernoulli_tf.py @@ -23,7 +23,7 @@ def __init__(self): def log_prob(self, xs, zs): log_prior = beta.logpdf(zs, a=1.0, b=1.0) - log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) \ + log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) for z in tf.unpack(zs)]) return log_lik + log_prior diff --git a/examples/convolutional_vae.py b/examples/convolutional_vae.py index e55225745..41ad32920 100644 --- a/examples/convolutional_vae.py +++ b/examples/convolutional_vae.py @@ -16,7 +16,7 @@ import tensorflow as tf from convolutional_vae_util import deconv2d -from edward import Variational, Normal +from edward.variationals import Variational, Normal from progressbar import ETA, Bar, Percentage, ProgressBar from scipy.misc import imsave from tensorflow.examples.tutorials.mnist import input_data diff --git a/examples/mixture_gaussian.py b/examples/mixture_gaussian.py index 5513ba825..fa4439a29 100644 --- a/examples/mixture_gaussian.py +++ b/examples/mixture_gaussian.py @@ -57,38 +57,34 @@ def __init__(self, K, D): self.c = 10 self.alpha = tf.ones([K]) - def unpack_params(self, z): - """Unpack parameters from a flattened vector.""" - pi = z[0:self.K] - mus = z[self.K:(self.K+self.K*self.D)] - sigmas = z[(self.K+self.K*self.D):(self.K+2*self.K*self.D)] + def unpack_params(self, zs): + """Unpack sets of parameters from a flattened matrix.""" + pi = zs[:, 0:self.K] + mus = zs[:, self.K:(self.K+self.K*self.D)] + sigmas = zs[:, (self.K+self.K*self.D):(self.K+2*self.K*self.D)] return pi, mus, sigmas def log_prob(self, xs, zs): """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])].""" N = get_dims(xs)[0] # Loop over each mini-batch zs[b,:] - log_prob = [] - for z in tf.unpack(zs): - pi, mus, sigmas = self.unpack_params(z) - log_prior = dirichlet.logpdf(pi, self.alpha) + pi, mus, sigmas = self.unpack_params(zs) + log_prior = dirichlet.logpdf(pi, self.alpha) + log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1) + log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1) + + log_lik = [] + n_minibatch = get_dims(zs)[0] + for s in xrange(n_minibatch): + log_lik_z = N*tf.reduce_sum(tf.log(pi), 1) for k in xrange(self.K): - log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c)) - log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c)) - log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b) - log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b) + log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs, + mus[s, (k*self.D):((k+1)*self.D)], + sigmas[s, (k*self.D):((k+1)*self.D)])) - log_lik = tf.constant(0.0, dtype=tf.float32) - for x in tf.unpack(xs): - for k in xrange(self.K): - log_lik += tf.log(pi[k]) - log_lik += multivariate_normal.logpdf(x, - mus[(k*self.D):((k+1)*self.D)], - sigmas[(k*self.D):((k+1)*self.D)]) + log_lik += [log_lik_z] - log_prob += [log_prior + log_lik] - - return tf.pack(log_prob) + return log_prior + tf.pack(log_lik) ed.set_seed(42) x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',') @@ -101,4 +97,4 @@ def log_prob(self, xs, zs): variational.add(InvGamma(model.K*model.D)) inference = ed.MFVI(model, variational, data) -inference.run(n_iter=10000, n_minibatch=5, n_data=5) +inference.run(n_iter=500, n_minibatch=5, n_data=5) diff --git a/examples/mixture_gaussian_map.py b/examples/mixture_gaussian_map.py index 5b8a6e60e..9ad3a7e3a 100644 --- a/examples/mixture_gaussian_map.py +++ b/examples/mixture_gaussian_map.py @@ -56,43 +56,39 @@ def __init__(self, K, D): self.c = 10 self.alpha = tf.ones([K]) - def unpack_params(self, z): - """Unpack parameters from a flattened vector.""" - pi = z[0:self.K] - mus = z[self.K:(self.K+self.K*self.D)] - sigmas = z[(self.K+self.K*self.D):(self.K+2*self.K*self.D)] + def unpack_params(self, zs): + """Unpack sets of parameters from a flattened matrix.""" + pi = zs[:, 0:self.K] + mus = zs[:, self.K:(self.K+self.K*self.D)] + sigmas = zs[:, (self.K+self.K*self.D):(self.K+2*self.K*self.D)] + # Do the unconstrained to constrained transformation for MAP here. + pi = tf.sigmoid(pi) + pi = tf.concat(1, [pi[:, 0:(self.K-1)], + tf.expand_dims(1.0 - tf.reduce_sum(pi[:, 0:(self.K-1)], 1), 0)]) + sigmas = tf.nn.softplus(sigmas) return pi, mus, sigmas def log_prob(self, xs, zs): """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])].""" N = get_dims(xs)[0] # Loop over each mini-batch zs[b,:] - log_prob = [] - for z in tf.unpack(zs): - # Do the unconstrained to constrained transformation for MAP here. - pi, mus, sigmas = self.unpack_params(z) - pi = tf.sigmoid(pi) - pi = tf.concat(0, [pi[0:(self.K-1)], - tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K-1)]), 0)]) - sigmas = tf.nn.softplus(sigmas) - log_prior = dirichlet.logpdf(pi, self.alpha) - for k in xrange(self.K): - log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c)) - log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c)) - log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b) - log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b) + pi, mus, sigmas = self.unpack_params(zs) + log_prior = dirichlet.logpdf(pi, self.alpha) + log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c))) + log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b)) - log_lik = tf.constant(0.0, dtype=tf.float32) - for x in tf.unpack(xs): - for k in xrange(self.K): - log_lik += tf.log(pi[k]) - log_lik += multivariate_normal.logpdf(x, - mus[(k*self.D):((k+1)*self.D)], - sigmas[(k*self.D):((k+1)*self.D)]) + log_lik = [] + n_minibatch = get_dims(zs)[0] + for s in xrange(n_minibatch): + log_lik_z = N*tf.reduce_sum(tf.log(pi)) + for k in xrange(self.K): + log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs, + mus[s, (k*self.D):((k+1)*self.D)], + sigmas[s, (k*self.D):((k+1)*self.D)])) - log_prob += [log_prior + log_lik] + log_lik += [log_lik_z] - return tf.pack(log_prob) + return log_prior + tf.pack(log_lik) ed.set_seed(42) x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',') diff --git a/examples/normal.py b/examples/normal.py index a0283dbc2..605e86caa 100644 --- a/examples/normal.py +++ b/examples/normal.py @@ -21,8 +21,7 @@ def __init__(self, mu, std): self.num_vars = 1 def log_prob(self, xs, zs): - return tf.pack([norm.logpdf(z, self.mu, self.std) - for z in tf.unpack(zs)]) + return norm.logpdf(zs, self.mu, self.std) ed.set_seed(42) mu = tf.constant(1.0) diff --git a/examples/normal_idiomatic_tf.py b/examples/normal_idiomatic_tf.py index 7a618fd0e..c21b18a61 100644 --- a/examples/normal_idiomatic_tf.py +++ b/examples/normal_idiomatic_tf.py @@ -25,8 +25,7 @@ def __init__(self, mu, std): self.num_vars = 1 def log_prob(self, xs, zs): - return tf.pack([norm.logpdf(z, self.mu, self.std) - for z in tf.unpack(zs)]) + return norm.logpdf(zs, self.mu, self.std) ed.set_seed(42) mu = tf.constant(1.0) diff --git a/examples/normal_map.py b/examples/normal_map.py index bb92f68ad..33b49e836 100644 --- a/examples/normal_map.py +++ b/examples/normal_map.py @@ -19,8 +19,7 @@ def __init__(self, mu, Sigma): self.num_vars = 1 def log_prob(self, xs, zs): - log_prior = tf.pack([norm.logpdf(z, mu, Sigma) - for z in tf.unpack(zs)]) + log_prior = norm.logpdf(zs, mu, Sigma) log_lik = tf.pack([tf.reduce_sum(norm.logpdf(xs, z, Sigma)) for z in tf.unpack(zs)]) return log_lik + log_prior diff --git a/examples/normal_two.py b/examples/normal_two.py index f024e5493..faea2fffd 100644 --- a/examples/normal_two.py +++ b/examples/normal_two.py @@ -22,8 +22,7 @@ def __init__(self, mu, Sigma): self.num_vars = get_dims(mu)[0] def log_prob(self, xs, zs): - return tf.pack([multivariate_normal.logpdf(z, self.mu, self.Sigma) - for z in tf.unpack(zs)]) + return multivariate_normal.logpdf(zs, self.mu, self.Sigma) ed.set_seed(42) mu = tf.constant([1.0, 1.0])