-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathimplementations.py
More file actions
404 lines (298 loc) · 10.2 KB
/
implementations.py
File metadata and controls
404 lines (298 loc) · 10.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
import numpy as np
import matplotlib.pyplot as plt
#------------------------------------------------------------------------------------------
#MANDATORY FUNCTIONS
def mean_squared_error_gd(y, tx, initial_w, max_iters, gamma):
"""
The Gradient Descent (GD) algorithm.
------------
Arguments:
y : array, shape = (N, )
tx : array, shape = (N, D)
initial_w : shape = (D, ). The initial guess (or the initialization) for the model parameters
max_iters : a scalar denoting the total number of iterations of GD
gamma : a scalar denoting the stepsize
------------
Return :
loss : scalar, loss value for the last iteration of GD
w : array, shape = (N, ). The vector of model parameters after the last iteration of GD
"""
w = initial_w
loss=compute_loss(y, tx, w)
for n_iter in range(max_iters):
gradient = compute_gradient(y, tx, w)
w = w - np.dot(gamma, gradient)
loss = compute_loss(y, tx, w)
return w, loss
def mean_squared_error_sgd(y, tx, initial_w, max_iters, gamma, batch_size = 1):
"""
Stochastic Gradient Descent algorithm (SGD).
------------
Arguments:
y : shape = (N, )
tx : shape = (N, D)
initial_w : shape=(2, ). The initial guess (or the initialization) for the model parameters
max_iters : a scalar denoting the total number of iterations of SGD
gamma : a scalar denoting the stepsize
batch_size : a scalar denoting the number of data points in a mini-batch used for computing the stochastic gradient
------------
Return:
loss : scalar, loss value for the last iteration of SGD
w : array, shape = (N, ). The vector of model parameters after the last iteration of SGD
"""
w = initial_w
for n_iter in range(max_iters):
for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size):
w = w - np.multiply(gamma, compute_stoch_gradient(minibatch_y, minibatch_tx, w))
loss = compute_loss(minibatch_y, minibatch_tx, w)
return w, loss
def least_squares(y, tx):
"""
Calculate the least squares solution
------------
Arguments:
y : array, shape = (N,), N is the number of samples.
tx : array, shape = (N, D), D is the number of features.
------------
Return:
w : optimal weights, numpy array of shape(D,), D is the number of features.
loss : scalar, loss value computed with MSE
"""
a = tx.T.dot(tx)
b = tx.T.dot(y)
w = np.linalg.solve(a,b)
loss = compute_loss(y, tx, w)
return w, loss
def ridge_regression(y, tx, lambda_):
"""
Implement ridge regression algorithm
------------
Arguments:
y : array, shape = (N, ), N is the number of samples.
tx : array, shape = (N, D), D is the number of features.
lambda_ : scalar.
------------
Return:
w : array, shape = (D, ), optimal weights
loss : scalar, loss value computed with MSE
"""
lambda_prime = 2 * tx.shape[0] * lambda_
w = np.linalg.solve(tx.T.dot(tx) + lambda_prime * np.identity(tx.shape[1]), tx.T.dot(y) )
loss = compute_loss(y, tx, w)
return w, loss
def logistic_regression(y, tx, initial_w, max_iters, gamma):
"""
Compute one step of gradient descent using logistic regression
------------
Arguments :
y : array, shape = (N, )
tx : shape = (N, D)
initial_w : shape=(D, )
max_iters :a scalar denoting the total number of iterations of SGD
gamma: float
------------
Return :
loss: scalar, loss value computed with logistic loss
w : array, shape = (D, ), final w after the logistic regression
"""
w = initial_w
loss = calculate_logistic_loss(y, tx, w)
for step in range(max_iters):
gradient = calculate_logistic_gradient(y, tx, w)
w = w - gamma * gradient
loss = calculate_logistic_loss(y, tx, w)
return w, loss
def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):
"""
Compute one step of gradient descent, using the penalized logistic regression.
------------
Arguments :
y : shape = (N, )
tx : shape = (N, D)
lambda_ : scalar
initial_w : shape = (D, )
max_iters :a scalar denoting the total number of iterations of SGD
gamma : scalar
------------
Return :
w : shape = (D, )
loss : scalar number
"""
w = initial_w
loss = np.mean(((-y)*(np.log(sigmoid(tx.dot(w)))) - ((1-y)*np.log(1-sigmoid(tx.dot(w))))))
for step in range(max_iters):
gradient = (1/len(tx))*tx.T.dot(sigmoid(tx.dot(w))-y) + 2 * lambda_ * w
w = w - gamma * gradient
loss = np.mean(((-y)*(np.log(sigmoid(tx.dot(w)))) - ((1-y)*np.log(1-sigmoid(tx.dot(w))))))
return w, loss
#------------------------------------------------------------------------------------------
#OTHER USEFUL FUNCTIONS
def compute_loss(y, tx, w):
"""
Calculate the loss using Mean Square Error
------------
Arguments :
y : array, shape = (N, )
tx : array, shape = (N,D)
w : array, shape = (D,). The vector of model parameters.
------------
Return:
loss : value of the loss (a scalar), corresponding to the input parameter w.
"""
loss = (1/ 2) * np.mean((y-tx.dot(w))**2)
return loss
def compute_loss_pred(y, y_pred):
"""
Calculate the loss between the predicted value and the true value
------------
Arguments :
y : array, shape = (N, )
y_pred : array, shape = (N,), predicted values from a model
S
------------
Return :
loss : value of the loss (a scalar)
"""
loss = (1/ 2) * np.mean((y-y_pred)**2)
return loss
def compute_gradient(y, tx, w):
"""
Computes the gradient at w
------------
Arguments :
y : array, shape = (N, )
tx : array, shape = (N, D)
w : array, shape = (N, ). The vector of model parameters.
------------
Return :
gradient : an array of shape (D, ) (same shape as w), containing the gradient of the loss at w.
"""
error = y - tx.dot(w)
gradient = (-1/y.shape[0]) * tx.T.dot(error)
return gradient
## MEAN SQUARED ERROR STOCHASTIC GRADIENT
def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
"""
Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
------------
Arguments :
y :array, shape = (N, )
tx : array, shape = (N, D)
batch_size : scalar, the size of each batch
num_batches : int, number of batches
shuffle : bool, shuffle the dataset if True
------------
Return :
"""
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
def compute_stoch_gradient(y, tx, w):
"""
Compute a stochastic gradient
------------
Arguments:
y : shape = (N, )
tx : shape = (N, D)
w : shape = (D, ). The vector of model parameters.
------------
Return:
stoch_gradient : array, shape = (D, ) (same shape as w), containing the stochastic gradient of the loss at w.
"""
error = y - tx.dot(w)
stoch_gradient = (-1 / y.shape[0]) * tx.T.dot(error)
return stoch_gradient
## LEAST SQUARES
def least_squares(y, tx):
"""
Calculate the least squares solution
------------
Arguments:
y : array, shape = (N,), N is the number of samples.
tx : array, shape = (N, D), D is the number of features.
------------
Return:
w : optimal weights, numpy array of shape(D,), D is the number of features.
loss : scalar, loss value computed with MSE
"""
a = tx.T.dot(tx)
b = tx.T.dot(y)
w = np.linalg.solve(a,b)
loss = compute_loss(y, tx, w)
return w, loss
## LOGISTIC REGRESSION
def sigmoid(t):
"""
Apply sigmoid function on t.
------------
Arguments :
t : scalar or numpy array
------------
Return :
scalar or numpy array
"""
return 1/ (1 + np.exp(-t))
def calculate_logistic_loss(y, tx, w):
"""
Compute the cost by negative log likelihood
------------
Arguments :
y : array, shape = (N, )
tx : array, shape = (N, D)
w : array, shape = (D, )
------------
Return :
loss : scalar, non-negative loss value
"""
assert y.shape[0] == tx.shape[0]
assert tx.shape[1] == w.shape[0]
y_predicted = sigmoid(tx.dot(w))
loss = np.linalg.norm((-1/(tx.shape[0]))*(y.T.dot(np.log(y_predicted))+(1-y).T.dot(np.log(1-y_predicted))))
return loss
def calculate_logistic_gradient(y, tx, w):
"""
Compute the gradient of loss.
------------
Arguments :
y : array, shape = (N, )
tx : array, shape = (N, D)
w : array, shape = (D, )
------------
Returns :
grad : array, shape (D, 1), logistic gradient
"""
y_predicted = sigmoid(tx.dot(w))
grad = (1 / (tx.shape[0])) * tx.T.dot(y_predicted - y)
return grad
## REGULARIZED LOGISTIC REGRESSION
def compute_reg_logistic_regression(y, tx, w, lambda_):
"""
Return the loss and gradient.
------------
Arguments :
y : shape = (N, )
tx : shape = (N, D)
w : shape = (D, )
lambda_ : scalar
------------
Return :
loss : scalar, loss value computed with MSE
gradient : array, shape = (D, 1)
"""
loss = compute_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w) )
gradient = compute_gradient(y, tx, w) + 2 * lambda_ * w
return loss, gradient
# -