Skip to content
This repository was archived by the owner on Nov 10, 2022. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion SCGExperiment.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@
" ], {x: samples})\n",
"\n",
" if t % 100 == 0:\n",
" print 'Step: %d / %d, Loss: %.2e, Acceptance sample: %.2f, LR: %.5f' % (t, n_steps, loss_, np.mean(px_), lr_)"
" print('Step: {0} / {1}, Loss: {2:.2e}, Acceptance sample: {3:.2f}, LR: {4:.5f}'.format(t, n_steps, loss_, np.mean(px_), lr_))"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions utils/notebook_utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import tensorflow as tf
import numpy as np
from dynamics import Dynamics
from sampler import propose
from utils.dynamics import Dynamics
from utils.sampler import propose
import matplotlib.pyplot as plt

def plot_grid(S, width=8):
Expand Down
41 changes: 21 additions & 20 deletions utils/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,27 +26,27 @@
TF_FLOAT = tf.float32

def propose(x, dynamics, init_v=None, aux=None, do_mh_step=False, log_jac=False):
if dynamics.hmc:
Lx, Lv, px = dynamics.forward(x, init_v=init_v, aux=aux)
return Lx, Lv, px, [tf_accept(x, Lx, px)]
else:
# sample mask for forward/backward
mask = tf.cast(tf.random_uniform((tf.shape(x)[0], 1), maxval=2, dtype=tf.int32), TF_FLOAT)
Lx1, Lv1, px1 = dynamics.forward(x, aux=aux, log_jac=log_jac)
Lx2, Lv2, px2 = dynamics.backward(x, aux=aux, log_jac=log_jac)
if dynamics.hmc:
Lx, Lv, px = dynamics.forward(x, init_v=init_v, aux=aux)
return Lx, Lv, px, [tf_accept(x, Lx, px)]
else:
# sample mask for forward/backward
mask = tf.cast(tf.random_uniform((tf.shape(x)[0], 1), maxval=2, dtype=tf.int32), TF_FLOAT)
Lx1, Lv1, px1 = dynamics.forward(x, aux=aux, log_jac=log_jac)
Lx2, Lv2, px2 = dynamics.backward(x, aux=aux, log_jac=log_jac)

Lx = mask * Lx1 + (1 - mask) * Lx2

Lv = None
if init_v is not None:
Lv = mask * Lv1 + (1 - mask) * Lv2
Lv = mask * Lv1 + (1 - mask) * Lv2

px = tf.squeeze(mask, axis=1) * px1 + tf.squeeze(1 - mask, axis=1) * px2

outputs = []

if do_mh_step:
outputs.append(tf_accept(x, Lx, px))
outputs.append(tf_accept(x, Lx, px))

return Lx, Lv, px, outputs

Expand All @@ -66,15 +66,15 @@ def body(x, v, log_jac, t):
return Lx, Lv, log_jac+px, t+1

final_x, final_v, log_jac, _ = tf.while_loop(
cond=cond,
body=body,
loop_vars=[
init_x,
init_v,
tf.zeros((tf.shape(init_x)[0],)),
tf.constant(0.),
]
)
cond=cond,
body=body,
loop_vars=[
init_x,
init_v,
tf.zeros((tf.shape(init_x)[0],)),
tf.constant(0.),
]
)

p_accept = dynamics.p_accept(init_x, init_v, final_x, final_v, log_jac, aux=aux)

Expand All @@ -83,4 +83,5 @@ def body(x, v, log_jac, t):
outputs.append(tf_accept(init_x, final_x, p_accept))

return final_x, final_v, p_accept, outputs


1