-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathDCASE_training_functions.py
More file actions
141 lines (108 loc) · 4.53 KB
/
DCASE_training_functions.py
File metadata and controls
141 lines (108 loc) · 4.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import keras
from keras import backend as K
import numpy as np
import threading
#for implementing warm restarts in learning rate
class LR_WarmRestart(keras.callbacks.Callback):
def __init__(self,nbatch,initial_lr,min_lr,epochs_restart,Tmult):
self.initial_lr = initial_lr
self.min_lr = min_lr
self.epochs_restart = epochs_restart
self.nbatch = nbatch
self.currentEP=0
self.startEP=0
self.Tmult=Tmult
def on_epoch_begin(self, epoch, logs={}):
if epoch+1<self.epochs_restart[0]:
self.currentEP = epoch
else:
self.currentEP = epoch+1
if np.isin(self.currentEP,self.epochs_restart):
self.startEP=self.currentEP
self.Tmult=2*self.Tmult
def on_epoch_end(self, epochs, logs={}):
lr = K.get_value(self.model.optimizer.lr)
print ('\nLearningRate:{:.6f}'.format(lr))
def on_batch_begin(self, batch, logs={}):
pts = self.currentEP + batch/self.nbatch - self.startEP
decay = 1+np.cos(pts/self.Tmult*np.pi)
lr = self.min_lr+0.5*(self.initial_lr-self.min_lr)*decay
K.set_value(self.model.optimizer.lr,lr)
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
class MixupGenerator():
def __init__(self, X_train, y_train, batch_size=32, alpha=0.2, shuffle=True, crop_length=400): #datagen=None):
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.alpha = alpha
self.shuffle = shuffle
self.sample_num = len(X_train)
self.lock = threading.Lock()
self.NewLength = crop_length
self.swap_inds = [1,0,3,2,5,4]
def __iter__(self):
return self
@threadsafe_generator
def __call__(self):
with self.lock:
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
_, h, w, c = self.X_train.shape
l = np.random.beta(self.alpha, self.alpha, self.batch_size)
X_l = l.reshape(self.batch_size, 1, 1, 1)
y_l = l.reshape(self.batch_size, 1)
X1 = self.X_train[batch_ids[:self.batch_size]]
X2 = self.X_train[batch_ids[self.batch_size:]]
for j in range(X1.shape[0]):
StartLoc1 = np.random.randint(0,X1.shape[2]-self.NewLength)
StartLoc2 = np.random.randint(0,X2.shape[2]-self.NewLength)
X1[j,:,0:self.NewLength,:] = X1[j,:,StartLoc1:StartLoc1+self.NewLength,:]
X2[j,:,0:self.NewLength,:] = X2[j,:,StartLoc2:StartLoc2+self.NewLength,:]
if X1.shape[-1]==6:
#randomly swap left and right channels
if np.random.randint(2) == 1:
X1[j,:,:,:] = X1[j:j+1,:,:,self.swap_inds]
if np.random.randint(2) == 1:
X2[j,:,:,:] = X2[j:j+1,:,:,self.swap_inds]
X1 = X1[:,:,0:self.NewLength,:]
X2 = X2[:,:,0:self.NewLength,:]
X = X1 * X_l + X2 * (1.0 - X_l)
if isinstance(self.y_train, list):
y = []
for y_train_ in self.y_train:
y1 = y_train_[batch_ids[:self.batch_size]]
y2 = y_train_[batch_ids[self.batch_size:]]
y.append(y1 * y_l + y2 * (1.0 - y_l))
else:
y1 = self.y_train[batch_ids[:self.batch_size]]
y2 = self.y_train[batch_ids[self.batch_size:]]
y = y1 * y_l + y2 * (1.0 - y_l)
return X, y