-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrainLinearModel.py
More file actions
107 lines (73 loc) · 2.84 KB
/
trainLinearModel.py
File metadata and controls
107 lines (73 loc) · 2.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
'''
A simple experiment for linear classifiers' robustness during training, to verify the implementation of Lipchitz constant estimation.
'''
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torch.utils.data.dataset as Dataset
from utils import *
import argparse
import os
from model import MNISTLR
from model import CIFARLR
from estimateLipschitzBound import estimateLipschitzBound
def calculateLinearDistance(model, dataset, q):
'''
Take a linear model as input. Output the exact distance to decision boundary.
q: We are computing Lp distance. q is the dual of p.
'''
W, b = list(model.parameters())
W = W.detach().numpy()
W = W
b = b.detach().numpy().reshape((10, 1))
dist = []
for i in range(len(dataset)):
img, label = dataset[i]
x0 = img.reshape((-1, 1))
dist.append(distance(W, b, x0, q = q))
return dist
def calculateLinearMargin(model, dataset, q):
'''
Take a linear model as input. Output the exact margin.
q: We are computing Lp distance. q is the dual of p.
'''
W, b = list(model.parameters())
W = W.detach().numpy()
W = W
b = b.detach().numpy().reshape((10, 1))
dist = []
for i in range(len(dataset)):
img, label = dataset[i]
x0 = img.reshape((-1, 1))
dist.append(margin(W, b, x0, label, q = q))
return dist
def main():
parser = argparse.ArgumentParser()
parser.add_argument('modelname')
parser.add_argument('dataset')
parser.add_argument('-p', type = int, default = 1)
args = parser.parse_args()
model = modelname2model(args.modelname)
trainingset, testset = makeDataset(args.dataset)
trainloader, testloader = makeLoader((trainingset, testset), batch_size = 32)
p = args.p if args.p < 1e10 else np.inf
q = dual(p)
print('p = %f, q = %f' % (p, q))
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum = 0.9, nesterov = True)
train(model, torch.device('cpu'), trainloader, testloader, F.cross_entropy, optimizer, epochs = 0, verbose = 2)
margin_list = calculateLinearMargin(model, [testset[i] for i in range(100)], q = q)
'''
Be cautious about setting the radius parameter R. It should be large enough so that the distances for linear classifiers won't be truncated.
'''
estimated_margin_list, _, _ = estimateLipschitzBound(model, torch.device('cpu'), [testset[i] for i in range(100)], Nb = 1, Ns = 1, p = p, q = q, R = 100)
for i in range(len(margin_list)):
assert(np.abs(margin_list[i] - estimated_margin_list[i]) < 1e-5)
# print(margin_list)
# print(estimated_margin_list)
print(np.mean(margin_list))
print(np.mean(estimated_margin_list))
if __name__ == '__main__':
main()