Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix TF deprecation warnings #11

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions batch_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ def _set_default_initializer(self, var_name):

def _build_statistics_variance(self, input_batch,
reduction_indices, use_batch_stats):
self._moving_mean = tf.get_variable(
self._moving_mean = tf.compat.v1.get_variable(
"moving_mean",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.VARIABLES],
initializer=tf.zeros_initializer,
trainable=False)

self._moving_variance = tf.get_variable(
self._moving_variance = tf.compat.v1.get_variable(
"moving_variance",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
Expand Down Expand Up @@ -81,15 +81,15 @@ def build_moving_stats():

def _build_statistics_second_moment(self, input_batch,
reduction_indices, use_batch_stats):
self._moving_mean = tf.get_variable(
self._moving_mean = tf.compat.v1.get_variable(
"moving_mean",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.VARIABLES],
initializer=tf.zeros_initializer,
trainable=False)

self._moving_second_moment = tf.get_variable(
self._moving_second_moment = tf.compat.v1.get_variable(
"moving_second_moment",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
Expand Down Expand Up @@ -252,7 +252,7 @@ def _build(self, input_batch, is_training=True, test_local_stats=True):
# Set up optional scale and offset factors.
if self._offset:
self._set_default_initializer(self.BETA)
self._beta = tf.get_variable(
self._beta = tf.compat.v1.get_variable(
self.BETA,
shape=self._mean_shape,
initializer=self._initializers[self.BETA])
Expand All @@ -261,7 +261,7 @@ def _build(self, input_batch, is_training=True, test_local_stats=True):

if self._scale:
self._set_default_initializer(self.GAMMA)
self._gamma = tf.get_variable(
self._gamma = tf.compat.v1.get_variable(
self.GAMMA,
shape=self._mean_shape,
initializer=self._initializers[self.GAMMA])
Expand Down
2 changes: 1 addition & 1 deletion eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def main():
config = json.load(config_file,
object_hook=lambda d:namedtuple('x', d.keys())(*d.values()))
num_unrolls = config.num_steps // config.unroll_length
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
model = util.load_model(sess, config, logger)
all_y = []
for i in range(10):
Expand Down
4 changes: 2 additions & 2 deletions gmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,10 @@ def test_tf():
xr = list(np.arange(0, 1, 0.02))
X = np.array(list(product(xr, repeat=2)))
Y = []
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
gmm = tf_GMM(batch_size=1, ncoef=6, num_dims=2, cov=0.5)
y = gmm(tf.placeholder(tf.float32, shape=[1, 2], name='x'))
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
for x in X:
Y.append(sess.run(y, feed_dict={'x:0':x.reshape((1, 2))}))

Expand Down
6 changes: 3 additions & 3 deletions infer_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, cell, func, ndim, nsteps, ckpt_path, logger, constraints):
self.init_state = self.cell.get_initial_state(1, tf.float32)
self.results = self.build_graph()

self.saver = tf.train.Saver(tf.global_variables())
self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())

def get_state_shapes(self):
return [(s[0].get_shape().as_list(), s[1].get_shape().as_list())
Expand Down Expand Up @@ -78,7 +78,7 @@ def get_init(self):
return x, y, init_state

def run(self):
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
self.load(sess, self.ckpt_path)
x, y, state = self.get_init()
x_array = np.zeros((self.nsteps + 1, self.ndim))
Expand Down Expand Up @@ -147,6 +147,6 @@ def main():
ax2.plot(x_array[:, 0], x_array[:, 1], x_array[:, 2])
fig2.show()
plt.show()

if __name__ == '__main__':
main()
2 changes: 1 addition & 1 deletion lets_start.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def main():
logger.info(str(json.load(config_file)))
config_file.close()
num_unrolls = config.num_steps // config.unroll_length
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
# tf.get_default_graph().finalize()
model = util.create_model(sess, config, logger)
step, loss, reset, fx_array, x_array = model.step()
Expand Down
14 changes: 7 additions & 7 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,29 +17,29 @@ def __init__(self, cell, logger, func, ndim, batch_size, unroll_len,
self.make_loss(func, ndim, batch_size, unroll_len)
loss_func = self.get_loss_func(loss_type, direction)
self.loss = loss_func(self.fx_array)
optimizer = getattr(tf.train, optimizer + 'Optimizer')(lr)
optimizer = getattr(tf.compat.v1.train, optimizer + 'Optimizer')(lr)
gvs = optimizer.compute_gradients(self.loss)
capped_gvs = [(tf.clip_by_value(grad, -0.1, 0.1), var) for grad, var in gvs]
self.opt = optimizer.apply_gradients(capped_gvs)

# self.opt = optimizer.minimize(self.loss)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=3)
logger.info('model variable:')
logger.info(str([var.name for var in tf.global_variables()]))
logger.info(str([var.name for var in tf.compat.v1.global_variables()]))
logger.info('trainable variables:')
logger.info(str([var.name for var in tf.trainable_variables()]))
logger.info(str([var.name for var in tf.compat.v1.trainable_variables()]))
self.fx_array = self.fx_array.stack()
self.x_array = self.x_array.stack()


def make_discount(self, gamma, unroll_len):
df = [(gamma ** (unroll_len - i)) for i in range(unroll_len + 1)]
return tf.constant(df, shape=[unroll_len + 1, 1], dtype=tf.float32)


def make_loss(self, func, ndim, batch_size, unroll_len):
self.unroll_len = unroll_len
x = tf.get_variable('x', shape=[batch_size, ndim],
x = tf.compat.v1.get_variable('x', shape=[batch_size, ndim],
initializer=tf.truncated_normal_initializer(mean=0.5, stddev=0.2),
trainable=self.trainable_init)
constants = func.get_parameters()
Expand Down Expand Up @@ -81,7 +81,7 @@ def step(t, x, state, fx_array, x_array):

variables = [x,] + constants
# Empty array as part of the reset process.
self.reset = [tf.variables_initializer(variables),
self.reset = [tf.compat.v1.variables_initializer(variables),
self.fx_array.close(), self.x_array.close()]

return self.fx_array, self.x_array
Expand Down
2 changes: 1 addition & 1 deletion ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

def wrap_variable_creation(func, custom_getter):
"""Provides a custom getter for all variable creations."""
original_get_variable = tf.get_variable
original_get_variable = tf.compat.v1.get_variable
def custom_get_variable(*args, **kwargs):
if hasattr(kwargs, 'custom_getter'):
raise AttributeError('Custom getters are not supported for '
Expand Down
31 changes: 16 additions & 15 deletions reactions.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np

class ConstraintQuadratic:
"""Quadratic problem: f(x) = ||Wx - y||."""
def __init__(self, batch_size=128, num_dims=3, ptype='convex',
random=0.05, dtype=tf.float32):
self.ptype = ptype
self.w = tf.get_variable('w', shape=[batch_size, num_dims, num_dims],
self.w = tf.compat.v1.get_variable('w', shape=[batch_size, num_dims, num_dims],
dtype=dtype, initializer=tf.random_normal_initializer(),
trainable=False)

self.a = tf.get_variable('y', shape=[batch_size, num_dims],
self.a = tf.compat.v1.get_variable('y', shape=[batch_size, num_dims],
dtype=dtype, initializer=tf.random_uniform_initializer(minval=0.01, maxval=0.99),
trainable=False)

Expand Down Expand Up @@ -41,7 +42,7 @@ def _barrier(self, var):

def __call__(self, x):
'''
x = tf.get_variable('x', shape=[batch_size, num_dims],
x = tf.compat.v1.get_variable('x', shape=[batch_size, num_dims],
dtype=dtype, initializer=tf.random_normal_initializer(stddev=stdev))
'''
res = (self._func(x) / self.normalizer + self.e + self._barrier(x))
Expand All @@ -56,21 +57,21 @@ def __init__(self, batch_size=128, ncoef=6, num_dims=3, random=None,
self.num_dim = num_dims
self.batch_size = batch_size
self.dtype = dtype
with tf.variable_scope('func_gmm'):
self.m = [tf.get_variable('mu_{}'.format(i), shape=[batch_size, num_dims],
with tf.compat.v1.variable_scope('func_gmm'):
self.m = [tf.compat.v1.get_variable('mu_{}'.format(i), shape=[batch_size, num_dims],
dtype=dtype,
initializer=tf.random_uniform_initializer(minval=0.01, maxval=0.99),
trainable=False)
for i in range(ncoef)]

self.cov = [tf.get_variable('cov_{}'.format(i), shape=[batch_size, num_dims],
self.cov = [tf.compat.v1.get_variable('cov_{}'.format(i), shape=[batch_size, num_dims],
dtype=dtype,
initializer=tf.truncated_normal_initializer(
mean=cov, stddev=cov/5),
trainable=False)
for i in range(ncoef)]

self.coef = tf.get_variable('coef', shape=[ncoef, 1], dtype=dtype,
self.coef = tf.compat.v1.get_variable('coef', shape=[ncoef, 1], dtype=dtype,
initializer=tf.random_normal_initializer(stddev=0.2),
trainable=False)

Expand All @@ -91,7 +92,7 @@ def get_parameters(self):
return self.m + self.cov + [self.coef]

def __call__(self, x):
dist = [tf.contrib.distributions.MultivariateNormalDiag(
dist = [tfp.distributions.MultivariateNormalDiag(
self.m[i], self.cov[i], name='MultVarNorm_{}'.format(i))
for i in range(self.ncoef)]
p = tf.concat([tf.reshape(dist[i].prob(x), [-1, 1])
Expand All @@ -101,7 +102,7 @@ def __call__(self, x):
result = (fx / self.cst - self.bots) / (self.tops - self.bots)
# import pdb; pdb.set_trace()
if self.random:
result = result + tf.random_normal(shape=[self.batch_size, 1],
result = result + tf.random_normal(shape=[self.batch_size, 1],
stddev=self.random,
dtype=self.dtype, name='error')
return result
Expand All @@ -113,11 +114,11 @@ class Quadratic:
def __init__(self, batch_size=128, num_dims=3, ptype='convex',
random=0.05, dtype=tf.float32):
self.ptype = ptype
self.w = tf.get_variable('w', shape=[batch_size, num_dims, num_dims],
self.w = tf.compat.v1.get_variable('w', shape=[batch_size, num_dims, num_dims],
dtype=dtype, initializer=tf.random_normal_initializer(),
trainable=False)

self.a = tf.get_variable('y', shape=[batch_size, num_dims],
self.a = tf.compat.v1.get_variable('y', shape=[batch_size, num_dims],
dtype=dtype, initializer=tf.truncated_normal_initializer(mean=0.5, stddev=0.2),
trainable=False)

Expand All @@ -144,7 +145,7 @@ def _func(self, var):

def __call__(self, x):
'''
x = tf.get_variable('x', shape=[batch_size, num_dims],
x = tf.compat.v1.get_variable('x', shape=[batch_size, num_dims],
dtype=dtype, initializer=tf.random_normal_initializer(stddev=stdev))
'''
res = (self._func(x) / self.normalizer + self.e)
Expand Down Expand Up @@ -195,7 +196,7 @@ def __call__(self, x):
if self.record:
self.history['x'].append(x)
self.history['y'].append(res)
return res
return res

class ConstraintQuadraticEval:
def __init__(self, num_dim=3, random=0.5, ptype='convex',
Expand Down Expand Up @@ -233,7 +234,7 @@ def __call__(self, x):
res = 1 - res
print('Output:')
print(res)
return res
return res


class RealReaction:
Expand Down Expand Up @@ -264,4 +265,4 @@ def __call__(self, x):
result = float(input('Input the reaction yield:'))
return self.y_convert(result)


8 changes: 4 additions & 4 deletions realreaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __init__(self, cell, func, ndim, nsteps, ckpt_path, logger, constraints):
self.init_state = self.cell.get_initial_state(1, tf.float32)
self.results = self.build_graph()

self.saver = tf.train.Saver(tf.global_variables())
self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())

def get_state_shapes(self):
return [(s[0].get_shape().as_list(), s[1].get_shape().as_list())
Expand Down Expand Up @@ -75,7 +75,7 @@ def get_init(self):
return x, y, init_state

def run(self):
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
self.load(sess, self.ckpt_path)
x, y, state = self.get_init()
x_array = np.zeros((self.nsteps + 1, self.ndim))
Expand Down Expand Up @@ -110,10 +110,10 @@ def main():
constraints=config.constraints)
x_array, y_array = optimizer.run()


# plt.figure(1)
# plt.plot(y_array)
# plt.show()

if __name__ == '__main__':
main()
Loading