forked from twosixlabs/armory-example
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathproxy_model_attack_model.py
More file actions
56 lines (40 loc) · 1.59 KB
/
proxy_model_attack_model.py
File metadata and controls
56 lines (40 loc) · 1.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import torch
import torch.nn as nn
from typing import Optional
from art.attacks.evasion import ProjectedGradientDescent
from armory.baseline_models.pytorch.cifar import Net
from art.classifiers import PyTorchClassifier
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ModifiedNet(nn.Module):
def __init__(self):
super().__init__()
self.net = Net()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net.forward(x)
def make_modified_model(**kwargs) -> ModifiedNet:
return ModifiedNet()
def get_art_model(
model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None
) -> PyTorchClassifier:
model = make_modified_model(**model_kwargs)
model.to(DEVICE)
if weights_path:
checkpoint = torch.load(weights_path, map_location=DEVICE)
model.load_state_dict(checkpoint)
wrapped_model = PyTorchClassifier(
model,
loss=nn.CrossEntropyLoss(),
optimizer=torch.optim.Adam(model.parameters(), lr=0.003),
input_shape=(32, 32, 3),
nb_classes=10,
clip_values=(0.0, 1.0),
**wrapper_kwargs,
)
return wrapped_model
class CustomAttack(ProjectedGradientDescent):
def __init__(self, estimator, **kwargs):
# Create copy of the model (to avoid overwriting loss_gradient_framework of original model)
new_estimator = get_art_model(model_kwargs={}, wrapper_kwargs={})
new_estimator.model.load_state_dict(estimator.model.state_dict())
# Point attack to copy of model
super().__init__(new_estimator, **kwargs)