-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathnormal_models.py
More file actions
124 lines (106 loc) · 4.08 KB
/
normal_models.py
File metadata and controls
124 lines (106 loc) · 4.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from torch import nn
# 1D Generator
class Normal_Generator(nn.Module):
def __init__(self, latent_dim):
"""A generator for the normal distribution with variable latent dimensions.
Args:
latent_dim (int): latent dimension
"""
super(Normal_Generator, self).__init__()
self.latent_dim = latent_dim
self.map1 = nn.Linear(latent_dim, 64)
self.map2 = nn.Linear(64, 32)
self.map3 = nn.Linear(32, 1)
self.a = nn.LeakyReLU()
def forward(self, x):
"""Forward pass to map noize z to target y"""
x = self.map1(x)
x = self.a(x)
x = self.map2(x)
x = self.a(x)
x = self.map3(x)
return x
# 1D Discriminator
class Normal_Discriminator(nn.Module):
def __init__(self, input_dim):
"""A discriminator for discerning real from generated samples.
Args:
input_dim (int): width of the input (output of generator)
"""
super(Normal_Discriminator, self).__init__()
self.input_dim = input_dim
self.map1 = nn.Linear(input_dim, 64)
self.map2 = nn.Linear(64, 32)
self.map3 = nn.Linear(32, 1)
self.a = nn.LeakyReLU()
self.f = nn.Sigmoid()
def forward(self, input_tensor):
"""Forward pass; output confidence probability of sample being real"""
x = self.map1(x)
x = self.a(x)
x = self.map2(x)
x = self.a(x)
x = self.map3(x)
x = self.f(x)
return x
# 1D Variable Generator
class V_Generator(nn.Module):
def __init__(self, latent_dim, layers, output_activation=None):
"""A generator for mapping a latent space to a sample space.
Args:
latent_dim (int): latent dimension ("noise vector")
layers (List[int]): A list of layer widths including output width
output_activation: torch activation function or None
"""
super(V_Generator, self).__init__()
self.latent_dim = latent_dim
self.output_activation = output_activation
self._init_layers(layers)
def _init_layers(self, layers):
"""Initialize the layers and store as self.module_list."""
self.module_list = nn.ModuleList()
last_layer = self.latent_dim
for index, width in enumerate(layers):
self.module_list.append(nn.Linear(last_layer, width))
last_layer = width
if index + 1 != len(layers):
self.module_list.append(nn.LeakyReLU())
else:
if self.output_activation is not None:
self.module_list.append(self.output_activation())
def forward(self, input_tensor):
"""Forward pass; map latent vectors to samples."""
intermediate = input_tensor
for layer in self.module_list:
intermediate = layer(intermediate)
return intermediate
# 1D Variable Discriminator
class V_Discriminator(nn.Module):
def __init__(self, input_dim, layers):
"""A discriminator for discerning real from generated samples.
params:
input_dim (int): width of the input
layers (List[int]): A list of layer widths including output width
Output activation is Sigmoid.
"""
super(V_Discriminator, self).__init__()
self.input_dim = input_dim
self._init_layers(layers)
def _init_layers(self, layers):
"""Initialize the layers and store as self.module_list."""
self.module_list = nn.ModuleList()
last_layer = self.input_dim
for index, width in enumerate(layers):
self.module_list.append(nn.Linear(last_layer, width))
last_layer = width
if index + 1 != len(layers):
self.module_list.append(nn.LeakyReLU())
else:
self.module_list.append(nn.Sigmoid())
def forward(self, input_tensor):
"""Forward pass; map samples to confidence they are real [0, 1]."""
intermediate = input_tensor
for layer in self.module_list:
intermediate = layer(intermediate)
return intermediate
# 2D Generator