-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
104 lines (96 loc) · 3.44 KB
/
models.py
File metadata and controls
104 lines (96 loc) · 3.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# -*- coding: utf-8 -*-
"""models.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11kWP4UUh-KZHaplClmGbxoguCCNQxkJy
"""
from keras import initializers
from keras.models import Model
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import Input
from keras.layers import Conv1D
from keras.layers import MaxPooling1D
from keras.layers import GlobalMaxPooling1D
from keras.layers import Dropout
from keras.layers import concatenate
def create_mlp(dim, class_names):
# define our MLP network
model = Sequential()
# hidden layer with 40 nodes, input = 5
model.add(Dense(40, input_dim=dim, activation="relu"))
# output layer with 15 nodes
model.add(Dense(len(class_names), activation="softmax"))
# return our model
return model
def create_mlp_multi(dim):
# define our MLP network
model = Sequential()
# hidden layer with 40 nodes, input = 5
model.add(Dense(40, input_dim=dim, activation="relu"))
# return our model
return model
def create_cnn(num_tokens, embedding_dim, embedding_matrix, class_names, filter):
# define the model input
int_sequences_input = Input(shape=(None,), dtype="int64")
# embedding layer
embedding_layer = Embedding(
num_tokens,
embedding_dim,
embeddings_initializer=initializers.Constant(embedding_matrix),
trainable=False,
)
embedded_sequences = embedding_layer(int_sequences_input)
# CONV => RELU => MP => CONV => RELU => MP => CONV => RELU => GMP
x = Conv1D(filter, 5, activation="relu")(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(filter, 5, activation="relu")(x)
x = MaxPooling1D(5)(x)
x = Conv1D(filter, 5, activation="relu")(x)
x = GlobalMaxPooling1D()(x)
# FC => RELU => DROPOUT
x = Dense(filter, activation="relu")(x)
x = Dropout(0.5)(x)
# FC => SOFTMAX
preds = Dense(len(class_names), activation="softmax")(x)
# construct the CNN
model = Model(int_sequences_input, preds)
# return the CNN
return model
def create_cnn_multi(num_tokens, embedding_dim, embedding_matrix, class_names, filter):
# define the model input
int_sequences_input = Input(shape=(None,), dtype="int64")
# embedding layer
embedding_layer = Embedding(
num_tokens,
embedding_dim,
embeddings_initializer=initializers.Constant(embedding_matrix),
trainable=False,
)
embedded_sequences = embedding_layer(int_sequences_input)
# CONV => RELU => MP => CONV => RELU => MP => CONV => RELU => GMP
x = Conv1D(filter, 5, activation="relu")(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(filter, 5, activation="relu")(x)
x = MaxPooling1D(5)(x)
x = Conv1D(filter, 5, activation="relu")(x)
x = GlobalMaxPooling1D()(x)
# FC => RELU => DROPOUT
x = Dense(filter, activation="relu")(x)
x = Dropout(0.5)(x)
# construct the CNN
model = Model(int_sequences_input, x)
# return the CNN
return model
def create_multi(mlp, cnn, class_names):
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])
# FC => SOFTMAX
preds = Dense(len(class_names), activation="softmax")(combinedInput)
# our final model will accept categorical/numerical data on the MLP
# input and descriptions on the CNN input, outputting a single value (the
# predicted category of the purchase)
model = Model(inputs=[mlp.input, cnn.input], outputs=preds)
return model