Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion torchkit/flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def sample(self, n=1, context=None, **kwargs):
if self.gpu:
spl = spl.cuda()
lgd = lgd.cuda()
context = context.gpu()
context = context.cuda()

return self.forward((spl, lgd, context))

Expand Down
14 changes: 8 additions & 6 deletions torchkit/model_iaf_toy.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(self, target_energy):
# flows.IAF(2, 128, 1, 3),
# flows.FlipFlow(1),
# flows.IAF(2, 128, 1, 3))
self.mdl = flows.IAF_DSF(2, 64, 1, 4, realify=nn_.softplus,
self.mdl = flows.IAF_DSF(2, 64, 1, 4, #realify=nn_.softplus,
num_ds_dim=5, num_ds_layers=2)

self.optim = optim.Adam(self.mdl.parameters(), lr=0.0005,
Expand All @@ -41,7 +41,7 @@ def __init__(self, target_energy):

def train(self):

total = 10000
total = 1000

for it in range(total):

Expand All @@ -56,7 +56,7 @@ def train(self):

if ((it + 1) % 1000) == 0:
print 'Iteration: [%4d/%4d] loss: %.8f' % \
(it+1, total, loss.data[0])
(it+1, total, loss.item())



Expand All @@ -76,20 +76,22 @@ def train(self):
X = np.concatenate((xx.reshape(n**2,1),yy.reshape(n**2,1)),1)
X = X.astype('float32')
X = Variable(torch.from_numpy(X))
Z = ef(X).data.numpy().reshape(n,n)
Z = ef(X).data.cpu().numpy().reshape(n,n)
ax.pcolormesh(xx,yy,np.exp(Z))
ax.axis('off')
plt.xlim((-10,10))
plt.ylim((-10,10))

ax = fig.add_subplot(1,2,2)
data = mdl.mdl.sample(n**2)[0].data.numpy()
data = mdl.mdl.sample(n**2)[0].data.cpu().numpy()
XX = data[:,0]
YY = data[:,1]
plot = ax.hist2d(XX,YY,200,range=np.array([(-10, 10), (-10, 10)]))
#plot = ax.hist2d(XX,YY,200,range=np.array([(-10, 10), (-10, 10)]))
plt.scatter(XX, YY, s=1, c='r', alpha=0.01)
plt.xlim((-10,10))
plt.ylim((-10,10))
plt.axis('off')

plt.show()


64 changes: 43 additions & 21 deletions torchkit/model_maf_toy.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

class model(object):

def __init__(self, sampler, n=64):
def __init__(self, sampler, n=64, cuda=False):
# self.mdl = nn_.SequentialFlow(
# flows.IAF(2, 64, 1, 2),
# flows.FlipFlow(1),
Expand All @@ -36,10 +36,10 @@ def __init__(self, sampler, n=64):
# flows.IAF(2, 64, 1, 2),
# flows.FlipFlow(1),
# flows.IAF(2, 64, 1, 2))
# self.mdl = flows.IAF_DDSF(2, 64, 1, 3,
# num_ds_dim=2, num_ds_layers=2)
self.mdl = flows.IAF_DSF(2, 64, 1, 3,
num_ds_dim=4)
self.mdl = flows.IAF_DDSF(2, 16, 1, 2,
num_ds_dim=16, num_ds_layers=2)
# self.mdl = flows.IAF_DSF(2, 64, 1, 3,
# num_ds_dim=4)

self.optim = optim.Adam(self.mdl.parameters(), lr=0.005,
betas=(0.9, 0.999))
Expand All @@ -50,12 +50,28 @@ def __init__(self, sampler, n=64):
self.context = Variable(torch.FloatTensor(n, 1).zero_()) + 2.0
self.lgd = Variable(torch.FloatTensor(n).zero_())
self.zeros = Variable(torch.FloatTensor(n, 2).zero_())




self.gpu = cuda
if self.gpu:
self.mdl.cuda()

self.context = self.context.cuda()
self.lgd = self.lgd.cuda()
self.zeros = self.zeros.cuda()


def density(self, spl, lgd=None, context=None, zeros=None):
lgd = self.lgd if lgd is None else lgd
context = self.context if context is None else context
zeros = self.zeros if zeros is None else zeros

if self.gpu:
spl = spl.cuda()
lgd = lgd.cuda()
context = context.cuda()
zeros = zeros.cuda()

z, logdet, _ = self.mdl((spl, lgd, context))
losses = - utils.log_normal(z, zeros, zeros+1.0).sum(1) - logdet
return - losses
Expand All @@ -80,7 +96,7 @@ def train(self, total=2000):

if ((it + 1) % 100) == 0:
print 'Iteration: [%4d/%4d] loss: %.8f' % \
(it+1, total, loss.data[0])
(it+1, total, loss.item())

#self.mdl.made.randomize()

Expand Down Expand Up @@ -119,21 +135,21 @@ def rvs(self, n):
grid = np.concatenate([grid[0].reshape(nmodesperdim**2,1),
grid[1].reshape(nmodesperdim**2,1)],1)

#mix = Mixture(
# np.ones(nmodesperdim**2) / float(nmodesperdim**2),
# [multivariate_normal(mean, 1/float(nmodesperdim*np.log(nmodesperdim))) for mean in grid] )
mix = Mixture(
np.ones(nmodesperdim**2) / float(nmodesperdim**2),
[multivariate_normal(mean, 1/float(nmodesperdim*np.log(nmodesperdim))) for mean in grid] )
#mix = Mixture(
# [0.6, 0.4],
# [multivariate_normal((2.0,2.0), 1.0), multivariate_normal((-3.0,-3.0), 0.5)])
mix = Mixture([0.1, 0.3, 0.4, 0.2], [
multivariate_normal([-5., 0]),
multivariate_normal([5., 0]),
multivariate_normal([0, 5.]),
multivariate_normal([0, -5.])])
# mix = Mixture([0.1, 0.3, 0.4, 0.2], [
# multivariate_normal([-5., 0]),
# multivariate_normal([5., 0]),
# multivariate_normal([0, 5.]),
# multivariate_normal([0, -5.])])

mdl = model(mix.rvs, n=64)
mdl = model(mix.rvs, n=256, cuda=True)
#input('x')
mdl.train()
mdl.train(2000)


# plot figure
Expand All @@ -160,7 +176,7 @@ def rvs(self, n):


ax = fig.add_subplot(1,2,2)
Z = mdl.density(X, lgd, context, zeros).data.numpy().reshape(n,n)
Z = mdl.density(X, lgd, context, zeros).data.cpu().numpy().reshape(n,n)
ax.pcolormesh(xx,yy,np.exp(Z))
ax.axis('off')
plt.xlim((-10,10))
Expand All @@ -185,14 +201,20 @@ def rvs(self, n):
zeros = Variable(torch.FloatTensor(n**2, 2).zero_())


Z = mdl.density(X, lgd, context, zeros).data.numpy().reshape(n,n)
Z = mdl.density(X, lgd, context, zeros).data.cpu().numpy().reshape(n,n)
ax.pcolormesh(xx,yy,np.exp(Z))

mdl.mdl.eval()
nspl, logdet, context = mdl.mdl.sample(10000)
nspl = nspl.detach().cpu().numpy()
ax.scatter(nspl[:,0], nspl[:,1], s=1, alpha=0.2, c='r')

ax.axis('off')
plt.xlim((-10,10))
plt.ylim((-10,10))



plt.show()



Loading