-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_callbacks.py
More file actions
86 lines (76 loc) · 3.09 KB
/
model_callbacks.py
File metadata and controls
86 lines (76 loc) · 3.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import numpy as np
from keras import callbacks
from sklearn import metrics
# local code:
from model_training import get_validation_data, make_training_data_generator
from model_architecture import my_model
class MyCallback(callbacks.Callback):
"""
Custom Keras callback to print validation AUC metric during training.
Allowable over-writable methods:
on_epoch_begin, on_epoch_end, on_batch_begin, on_batch_end,
on_train_begin, on_train_end
"""
def on_epoch_end(self, epoch, logs={}):
validation_labels = self.validation_data[1]
validation_scores = self.model.predict(self.validation_data[0])
# flatten the scores:
validation_scores = [el[0] for el in validation_scores]
fpr, tpr, thres = metrics.roc_curve(y_true=validation_labels,
y_score=validation_scores)
auc = metrics.auc(fpr, tpr)
print('\n\tEpoch {}, Validation AUC = {}'.format(epoch,
np.round(auc, 6)))
if __name__ == '__main__':
# first, let's use some code from previous sections to get our model
# and data up and running:
features_length = 1024
num_obs_per_epoch = 5000
batch_size = 128
# create the model using the function from the model architecture section:
model = my_model(input_length=features_length)
# make the training data generator:
training_generator = make_training_data_generator(
batch_size=batch_size,
features_length=features_length
)
# and the validation data:
validation_data = get_validation_data(features_length=features_length,
n_validation_files=1000)
##########################################################################
# now for some new code:
# first, use a built-in callback to save the best model over training:
model.fit_generator(
generator=training_generator,
steps_per_epoch=num_obs_per_epoch / batch_size,
epochs=5,
validation_data=validation_data,
callbacks=[
callbacks.ModelCheckpoint('best_model.h5',
monitor='val_loss',
save_best_only=True)
],
)
# next, use the built-in callback to record the model after every epoch.
model.fit_generator(
generator=training_generator,
steps_per_epoch=num_obs_per_epoch / batch_size,
epochs=5,
validation_data=validation_data,
callbacks=[
callbacks.ModelCheckpoint('model_epoch_{epoch}.h5',
monitor='val_loss',
save_best_only=False)
],
)
# now try using our custom callback with AUC logging!
model.fit_generator(
generator=training_generator,
steps_per_epoch=num_obs_per_epoch / batch_size,
# making the training artificially fast to showcase the validation logs!
epochs=5,
validation_data=validation_data,
callbacks=[
MyCallback()
],
)