forked from AlexTsai1618/VICEROY
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtorch_model.py
More file actions
68 lines (53 loc) · 2.29 KB
/
torch_model.py
File metadata and controls
68 lines (53 loc) · 2.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import torch
import torch.nn as nn
import torch.nn.functional as F
class torch_organization_model(nn.Module):
def __init__(self, input_dim=89, hidden_units=[128], out_dim = 64):
super(torch_organization_model, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_units[0])
hidden_layers = []
for i in range(1,len(hidden_units)):
hidden_layers.append(nn.Linear(hidden_units[i-1], hidden_units[i]))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(hidden_units[-1], out_dim)
def forward(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
x = self.output_layer(x)
return x
class torch_top_model(nn.Module):
def __init__(self, input_dim=89, hidden_units=[128], num_classes=2):
super(torch_top_model, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_units[0])
hidden_layers = []
for i in range(1,len(hidden_units)):
hidden_layers.append(nn.Linear(hidden_units[i-1], hidden_units[i]))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(hidden_units[-1], num_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
x = self.output_layer(x)
# x = self.softmax(x)
return x
class MlpModel(nn.Module):
def __init__(self, input_dim=89, hidden_units=[128], num_classes=2):
super(MlpModel, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_units[0])
hidden_layers = []
for i in range(1,len(hidden_units)):
hidden_layers.append(nn.Linear(hidden_units[i-1], hidden_units[i]))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(hidden_units[-1], num_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
x = self.output_layer(x)
# x = self.softmax(x)
# x = torch.sigmoid(x)
return x