-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathpruner.py
More file actions
207 lines (166 loc) · 7.45 KB
/
pruner.py
File metadata and controls
207 lines (166 loc) · 7.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import torch
import numpy as np
class Pruner:
def __init__(self, masked_parameters):
self.masked_parameters = list(masked_parameters)
self.scores = {}
def score(self, model, loss, dataloader, device):
raise NotImplementedError
def _global_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level globally."""
# # Set score for masked parameters to -inf
# for mask, param in self.masked_parameters:
# score = self.scores[id(param)]
# score[mask == 0.0] = -np.inf
# Threshold scores
global_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
k = int((1.0 - sparsity) * global_scores.numel())
if not k < 1:
threshold, _ = torch.kthvalue(global_scores, k)
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def _local_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level parameter-wise."""
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
k = int((1.0 - sparsity) * score.numel())
if not k < 1:
threshold, _ = torch.kthvalue(torch.flatten(score), k)
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def mask(self, sparsity, scope):
r"""Updates masks of model with scores by sparsity according to scope."""
if scope == "global":
self._global_mask(sparsity)
if scope == "local":
self._local_mask(sparsity)
@torch.no_grad()
def apply_mask(self):
r"""Applies mask to prunable parameters."""
for mask, param in self.masked_parameters:
param.mul_(mask)
def alpha_mask(self, alpha):
r"""Set all masks to alpha in model."""
for mask, _ in self.masked_parameters:
mask.fill_(alpha)
# Based on https://github.com/facebookresearch/open_lth/blob/master/utils/tensor_utils.py#L43
def shuffle(self):
for mask, param in self.masked_parameters:
shape = mask.shape
perm = torch.randperm(mask.nelement())
mask = mask.reshape(-1)[perm].reshape(shape)
def invert(self):
for v in self.scores.values():
v.div_(v**2)
def stats(self):
r"""Returns remaining and total number of prunable parameters."""
remaining_params, total_params = 0, 0
for mask, _ in self.masked_parameters:
remaining_params += mask.detach().cpu().numpy().sum()
total_params += mask.numel()
return remaining_params, total_params
class Rand(Pruner):
def __init__(self, masked_parameters):
super(Rand, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.randn_like(p)
class Mag(Pruner):
def __init__(self, masked_parameters):
super(Mag, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.data).detach().abs_()
# Based on https://github.com/mi-lad/snip/blob/master/snip.py#L18
class SNIP(Pruner):
def __init__(self, masked_parameters):
super(SNIP, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
# allow masks to have gradient
for m, p in self.masked_parameters:
m.requires_grad = True
# compute gradient
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
loss(output, target).backward()
# calculate score |g * theta|
for m, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(m.grad).detach().abs_()
p.grad.data.zero_()
m.grad.data.zero_()
m.requires_grad = False
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.sum(all_scores)
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
# Based on https://github.com/alecwangcq/GraSP/blob/master/pruner/GraSP.py#L49
class GraSP(Pruner):
def __init__(self, masked_parameters):
super(GraSP, self).__init__(masked_parameters)
self.temp = 200
self.eps = 1e-10
def score(self, model, loss, dataloader, device):
# first gradient vector without computational graph
stopped_grads = 0
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=False
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
stopped_grads += flatten_grads
# second gradient vector with computational graph
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=True
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
gnorm = (stopped_grads * flatten_grads).sum()
gnorm.backward()
# calculate score Hg * theta (negate to remove top percent)
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p.data).detach()
p.grad.data.zero_()
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.abs(torch.sum(all_scores)) + self.eps
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
class SynFlow(Pruner):
def __init__(self, masked_parameters):
super(SynFlow, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
@torch.no_grad()
def linearize(model):
# model.double()
signs = {}
for name, param in model.state_dict().items():
signs[name] = torch.sign(param)
param.abs_()
return signs
@torch.no_grad()
def nonlinearize(model, signs):
# model.float()
for name, param in model.state_dict().items():
param.mul_(signs[name])
signs = linearize(model)
(data, _) = next(iter(dataloader))
input_dim = list(data[0, :].shape)
input = data # torch.ones([1],dtype=torch.long).to(device)
output = model(input)
torch.sum(output).backward()
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p).detach().abs_()
p.grad.data.zero_()
nonlinearize(model, signs)