Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
414 commits
Select commit Hold shift + click to select a range
d6605a1
Add GradientModifier to LitModular
dxoigmn Apr 14, 2023
b5d5442
Adversary consumes a OptimizerFactory
dxoigmn Apr 14, 2023
d2b4483
Better Composer and Projector type logic
dxoigmn Apr 14, 2023
f99d1ea
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
857d8f6
spelling
dxoigmn Apr 14, 2023
34c24f8
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
9420a00
bugfix
dxoigmn Apr 14, 2023
69b6575
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
30f0945
Remove UniversalAdversary
dxoigmn Apr 14, 2023
86bff99
Remove Universal attack
dxoigmn Apr 14, 2023
56014e6
Replace tuple with Iterable[torch.Tensor]
dxoigmn Apr 14, 2023
280609a
Merge branch 'iterable_instead_of_tuple' into adversary_as_lightningm…
dxoigmn Apr 14, 2023
ef0cea1
cleanup
dxoigmn Apr 14, 2023
1c47cc0
Fix tests
dxoigmn Apr 14, 2023
a009fd7
Merge branch 'iterable_instead_of_tuple' into adversary_as_lightningm…
dxoigmn Apr 14, 2023
b89802e
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
e8f249e
better type
dxoigmn Apr 14, 2023
70cc36a
Cleanup
dxoigmn Apr 14, 2023
8e632e0
Merge branch 'iterable_instead_of_tuple' into adversary_as_lightningm…
dxoigmn Apr 14, 2023
7507c38
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
53ee7f4
Make GradientModifier accept Iterable[torch.Tensor]
dxoigmn Apr 14, 2023
89a6ce2
Merge branch 'iterable_instead_of_tuple' into adversary_as_lightningm…
dxoigmn Apr 14, 2023
3de9d58
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
1e9526a
Revert changes to LitModular
dxoigmn Apr 14, 2023
80edd83
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
f7345da
Revert Adversary consumes a OptimizerFactory
dxoigmn Apr 14, 2023
a85b987
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 14, 2023
5a49f82
Remove Callback base
dxoigmn Apr 14, 2023
1fbae00
Fix tests
dxoigmn Apr 17, 2023
8493da1
bugfix
dxoigmn Apr 17, 2023
3068bc3
bugfix
dxoigmn Apr 17, 2023
af0bb9b
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Apr 17, 2023
0bc36af
bugfix
dxoigmn Apr 17, 2023
b6bf10c
update docs
dxoigmn Apr 17, 2023
841f813
style
dxoigmn Apr 17, 2023
802b45b
style
dxoigmn Apr 17, 2023
58e9d9f
Move mart.attack.callbacks to mart.callbacks
dxoigmn Apr 18, 2023
d3b2f95
Disable default EarlyStopping
dxoigmn Apr 18, 2023
b58f9a9
Add PerturbationVisualizer callback
dxoigmn Apr 18, 2023
874a9da
Add train end perturbation
dxoigmn Apr 18, 2023
f916f21
style
dxoigmn Apr 18, 2023
ef13803
bugfix
dxoigmn Apr 18, 2023
36ef23d
Update ShapeShifter
dxoigmn Apr 18, 2023
d017a62
Generalize RandomAffine to Warp
dxoigmn Apr 19, 2023
4285097
Add drop block
dxoigmn Apr 19, 2023
72f4e45
Use pre-multiplied alpha in WarpOverlay
dxoigmn Apr 19, 2023
f1cb0f2
Remove drop block in favor of RandomErasing
dxoigmn Apr 19, 2023
c4e5bb7
Dont make weights_fpath and num_classes mandatory since they're usual…
dxoigmn Apr 20, 2023
ec41697
cleanup
dxoigmn Apr 20, 2023
61a2155
style
dxoigmn Apr 20, 2023
316582b
Add GradientMonitor
dxoigmn Apr 20, 2023
e32f94a
Turn off BatchNorm buffer updating in freeze
dxoigmn Apr 20, 2023
def63f9
Add TV loss
dxoigmn Apr 21, 2023
fc92076
Add Adam loss
dxoigmn Apr 21, 2023
09619ea
Add weights to mart.nn.Sum
dxoigmn Apr 21, 2023
3bae94f
style
dxoigmn Apr 21, 2023
14a362b
Add callback that freezes specified module
dxoigmn May 5, 2023
07179e7
Make Composer a Module and cleanup Composers
dxoigmn May 5, 2023
9e6939a
Add composer configs
dxoigmn May 5, 2023
ae2acca
Add mask-aware ColorJitter transform
dxoigmn May 5, 2023
8af2118
Remove AttackInEvalMode in favor of FreezeCallback
dxoigmn May 5, 2023
557bcd2
bugfix
dxoigmn May 5, 2023
5a3b6b4
Remove freeze from LitModular
dxoigmn May 5, 2023
5774b69
bugfix
dxoigmn May 5, 2023
fff368d
Make attack callbacks normal callbacks
dxoigmn May 5, 2023
01f4caa
Merge branch 'cleanup_callbacks' into shapeshifter
dxoigmn May 5, 2023
afc9f75
Merge branch 'cleanup_callbacks' into freeze_callback
dxoigmn May 5, 2023
715a73d
Remove config
dxoigmn May 5, 2023
b6a0e45
Add gradient monitor callback
dxoigmn May 5, 2023
557d878
Remove NoGradMode callback
dxoigmn May 5, 2023
ee4a367
Move load_state_dict into LitModular
dxoigmn May 5, 2023
b82fac7
Fix configs
dxoigmn May 5, 2023
af2af42
Undo changes to mart/nn/nn.py and friends
dxoigmn May 5, 2023
977711d
bugfix
dxoigmn May 5, 2023
ac0bb0d
Merge branch 'cleanup_callbacks' into shapeshifter
dxoigmn May 5, 2023
e08423a
bugfix
dxoigmn May 5, 2023
7f04242
bugfix
dxoigmn May 5, 2023
10106df
Fix annotations
dxoigmn May 5, 2023
5cd900e
Make Perturber more flexible
dxoigmn May 5, 2023
1953938
bugfix
dxoigmn May 5, 2023
c6dc5a4
Add GradientModifier and fix tests
dxoigmn May 5, 2023
0055b10
Fix configs
dxoigmn May 5, 2023
e492e70
Get adversary tests from adversary_as_lightningmodule
dxoigmn May 5, 2023
c9c8429
Merge branch 'better_perturber' into adversary_as_lightningmodule
dxoigmn May 5, 2023
e8dadcb
Make attack callbacks normal callbacks
dxoigmn May 5, 2023
80fe8b1
style
dxoigmn May 5, 2023
92b3545
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn May 5, 2023
d801fe2
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn May 5, 2023
27a3f80
Move attack optimizers to optimizers
dxoigmn May 5, 2023
b60057e
Merge branch 'better_optimizer' into adversary_as_lightningmodule
dxoigmn May 5, 2023
c48f410
bugfix
dxoigmn May 5, 2023
025b28f
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn May 5, 2023
754570d
style
dxoigmn May 5, 2023
b7c14b1
comment
dxoigmn May 5, 2023
559bcfe
Merge branch 'main' into better_perturber
dxoigmn May 16, 2023
6fd4943
fix test
dxoigmn May 16, 2023
c71cba6
style
dxoigmn May 16, 2023
43f4520
Merge branch 'better_perturber' into better_optimizer
dxoigmn May 16, 2023
58f91dc
style
dxoigmn May 16, 2023
bc03a87
Perturber is no longer a callback
dxoigmn May 16, 2023
b9af839
fix tests
dxoigmn May 16, 2023
a8d7201
fix tests
dxoigmn May 16, 2023
7826e39
fix tests
dxoigmn May 16, 2023
fa3545b
fix tests
dxoigmn May 16, 2023
57edc03
fix tests
dxoigmn May 16, 2023
83938a9
fix tests
dxoigmn May 16, 2023
0556e35
Merge branch 'better_perturber' into better_optimizer
dxoigmn May 16, 2023
f5ee114
fix tests
dxoigmn May 16, 2023
a631fa1
bugfix
dxoigmn May 16, 2023
2115597
bugfix
dxoigmn May 16, 2023
1679feb
Merge branch 'better_optimizer' into adversary_as_lightningmodule
dxoigmn May 16, 2023
4edbdfa
fix tests
dxoigmn May 16, 2023
ef15c53
add missing tests
dxoigmn May 16, 2023
dcf7599
return tests to original tests
dxoigmn May 16, 2023
46ed57f
style
dxoigmn May 16, 2023
41eb387
Set optimizer to maximize in attacks
dxoigmn May 16, 2023
5932223
Revert "Set optimizer to maximize in attacks"
dxoigmn May 16, 2023
3bf7353
Adversary optimizer maximizes gain
dxoigmn May 16, 2023
bd438b5
Merge branch 'better_optimizer' into adversary_as_lightningmodule
dxoigmn May 16, 2023
8eb2809
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn May 16, 2023
e32477f
Merge remote-tracking branch 'origin/add_load_state_dict_to_litmodula…
dxoigmn May 16, 2023
ca911a2
bugfix
dxoigmn May 16, 2023
e91327a
Merge remote-tracking branch 'origin/freeze_callback' into shapeshifter
dxoigmn May 16, 2023
d9ca00a
style
dxoigmn May 16, 2023
537928a
bugfix
dxoigmn May 16, 2023
19e12cb
Use freeze callback
dxoigmn May 16, 2023
982ad63
bugfix and comment
dxoigmn May 16, 2023
1489e7a
Merge branch 'adversary_as_lightningmodule' into freeze_callback
dxoigmn May 17, 2023
9972b99
Merge branch 'freeze_callback' into shapeshifter
dxoigmn May 17, 2023
f76a9b7
Merge branch 'adversary_as_lightningmodule' into gradient_monitor_cal…
dxoigmn May 17, 2023
bee5221
comments
dxoigmn May 17, 2023
ea22015
remove yaml files
dxoigmn May 17, 2023
08b3d00
Only set eval mode for BatchNorm and Dropout modules
dxoigmn May 17, 2023
64858c4
Merge branch 'freeze_callback' into shapeshifter
dxoigmn May 17, 2023
823863e
cleanup
dxoigmn May 18, 2023
a1f301f
Move Composer from Perturber and into Attacker
dxoigmn May 22, 2023
f6b367b
cleanup
dxoigmn May 22, 2023
6a7673c
bugfix
dxoigmn May 22, 2023
c998de7
Merge branch 'better_perturber' into better_optimizer
dxoigmn May 22, 2023
2d4366e
Merge branch 'better_optimizer' into adversary_as_lightningmodule
dxoigmn May 22, 2023
98fdcc7
Add Composer to Adversary
dxoigmn May 22, 2023
2db13a0
cleanup
dxoigmn May 22, 2023
6d8473f
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn May 22, 2023
25a89d9
Add Composer to Adversary
dxoigmn May 22, 2023
8455236
Better module names
dxoigmn May 23, 2023
e2458cd
Generalize PerturbationVisualizer as ImageVisualizer
dxoigmn May 23, 2023
de40c0c
fix config
dxoigmn May 23, 2023
803098f
style
dxoigmn May 23, 2023
5f6a47b
Fix Composer to always use gs_coords when present in target
dxoigmn May 23, 2023
fcee388
Remove ColorJitterWarpComposite
dxoigmn May 23, 2023
35e7a1d
Remove MaskAdditive
dxoigmn May 23, 2023
c2f1f77
projector -> projector_
dxoigmn May 31, 2023
48520cf
Hide adversarial parameters from model checkpoint. (#150)
mzweilin Jun 1, 2023
8a1ca12
Merge branch 'better_perturber' into better_optimizer
mzweilin Jun 1, 2023
3ff5796
Merge branch 'better_perturber' into adversary_as_lightningmodule
mzweilin Jun 1, 2023
1c0eb27
Merge branch 'better_optimizer' into adversary_as_lightningmodule
mzweilin Jun 1, 2023
773b2cb
Merge branch 'main' into adversary_as_lightningmodule
dxoigmn Jun 1, 2023
1293a89
Fix merge error
dxoigmn Jun 1, 2023
2a58f5c
Merge branch 'adversary_as_lightningmodule' into shapeshifter
dxoigmn Jun 1, 2023
e8d0852
Merge branch 'adversary_as_lightningmodule' into freeze_callback
dxoigmn Jun 1, 2023
3cf8282
Merge branch 'adversary_as_lightningmodule' into gradient_monitor_cal…
dxoigmn Jun 1, 2023
ed645ee
Merge branch 'main' into add_load_state_dict_to_litmodular
dxoigmn Jun 1, 2023
f7b3e90
Merge branch 'add_load_state_dict_to_litmodular' into shapeshifter
dxoigmn Jun 1, 2023
53dff65
Merge branch 'main' into freeze_callback
dxoigmn Jun 2, 2023
535b012
Merge branch 'main' into gradient_monitor_callback
dxoigmn Jun 2, 2023
ac54402
Merge branch 'freeze_callback' into shapeshifter
dxoigmn Jun 2, 2023
5e26406
Merge branch 'gradient_monitor_callback' into shapeshifter
dxoigmn Jun 2, 2023
c8e9db8
Remove MaskAdditive test
dxoigmn Jun 2, 2023
0c1d7a2
Disable PerturbedImageVisualizer
dxoigmn Jun 2, 2023
50abed8
Merge branch 'better_composer' into shapeshifter
dxoigmn Jun 5, 2023
a84f7d8
Make PerturbedImageVisualizer more generic
dxoigmn Jun 5, 2023
5ff68b7
Merge branch 'general_visualizer' into shapeshifter
dxoigmn Jun 5, 2023
25259c3
Disable test
dxoigmn Jun 5, 2023
014d7b5
Merge branch 'general_visualizer' into shapeshifter
dxoigmn Jun 5, 2023
fad0f15
Merge branch 'better_composer' into shapeshifter
dxoigmn Jun 8, 2023
8c55b47
Merge branch 'main' into freeze_callback
dxoigmn Jun 8, 2023
f006b55
Fix merge error
dxoigmn Jun 8, 2023
99a7669
Use attrgetter
dxoigmn Jun 9, 2023
588068c
Better implementation of ModelParamsNoGrad
dxoigmn Jun 9, 2023
3832d22
Better implementation of AttackInEvalMode
dxoigmn Jun 9, 2023
a9348df
Log which params will have gradients disabled
dxoigmn Jun 9, 2023
9c955df
Remove Freeze callback
dxoigmn Jun 9, 2023
d278aba
bugfix
dxoigmn Jun 9, 2023
55a6161
comments
dxoigmn Jun 9, 2023
830e765
comments
dxoigmn Jun 9, 2023
be8ae5d
comments
dxoigmn Jun 9, 2023
04069b9
Even better AttackInEvalMode
dxoigmn Jun 9, 2023
113d483
Fix type
dxoigmn Jun 9, 2023
3dbdfd4
Even better ModelParamsNoGrad
dxoigmn Jun 9, 2023
48577ad
more lenient
dxoigmn Jun 9, 2023
accdc0f
Revert changes to callbacks
dxoigmn Jun 9, 2023
b858207
Revert changes to utils
dxoigmn Jun 9, 2023
0d3b461
Revert changes to visualizer tests
dxoigmn Jun 9, 2023
5065a02
Revert configs
dxoigmn Jun 9, 2023
c9fdf41
Revert changes to adversary tests
dxoigmn Jun 9, 2023
b2ea1a4
Remove adam attack optimizer
dxoigmn Jun 9, 2023
3d04ef7
Update example modules to run in eval mode
dxoigmn Jun 12, 2023
77c2350
Only log and run in fit stage
dxoigmn Jun 12, 2023
b6f8ca3
CallWith passes non-str arguments directly to module
dxoigmn Jun 13, 2023
52f0501
_return_as_dict -> _return_as_dict_
dxoigmn Jun 14, 2023
a0761ee
style
dxoigmn Jun 14, 2023
b2009a8
Revert "_return_as_dict -> _return_as_dict_"
dxoigmn Jun 15, 2023
cef11f3
fix imports
dxoigmn Jun 15, 2023
9522075
Move _call_with_args_ and _return_as_dict_ functionality into CallWith
dxoigmn Jun 15, 2023
ba58264
Allow overwriting _call_with_args_ and _return_as_dict_ in CallWith.f…
dxoigmn Jun 15, 2023
60fc2ad
Add _train_mode_ and _inference_mode_ to CallWith
dxoigmn Jun 15, 2023
2e51c66
Revert "Add _train_mode_ and _inference_mode_ to CallWith"
dxoigmn Jun 15, 2023
1671755
Revert "Revert "Add _train_mode_ and _inference_mode_ to CallWith""
dxoigmn Jun 15, 2023
731b23d
cleanup
dxoigmn Jun 15, 2023
ac942c7
Merge branch 'better_sequentialdict3' into better_sequentialdict4
dxoigmn Jun 15, 2023
8e664f1
cleanup
dxoigmn Jun 15, 2023
ae1b836
Fix configs
dxoigmn Jun 15, 2023
39f9aaa
cleanup
dxoigmn Jun 15, 2023
0973933
bugfix
dxoigmn Jun 15, 2023
6d3f42a
Merge branch 'better_sequentialdict3' into better_sequentialdict4
dxoigmn Jun 15, 2023
2ec4e49
Only set train mode and inference mode on Modules
dxoigmn Jun 15, 2023
741d282
bugfix
dxoigmn Jun 15, 2023
15c5a5f
CallWith is not a Module
dxoigmn Jun 15, 2023
cb55a31
Merge branch 'better_sequentialdict3' into better_sequentialdict4
dxoigmn Jun 15, 2023
d2742bf
Merge branch 'better_sequentialdict2' into shapeshifter
dxoigmn Jun 15, 2023
fcd1d1a
Add weights to forward
dxoigmn Jun 15, 2023
40dd262
cleanup
dxoigmn Jun 22, 2023
a218bb2
Merge branch 'main' into better_sequentialdict3
dxoigmn Jun 22, 2023
0716be3
Merge branch 'main' into better_sequentialdict3
dxoigmn Jun 22, 2023
ceaa744
bugfix
dxoigmn Jun 22, 2023
e55563d
Merge branch 'better_composer' into shapeshifter
dxoigmn Jun 22, 2023
1972919
Merge branch 'main' into better_sequentialdict2
dxoigmn Jun 22, 2023
29a41f6
Merge branch 'better_sequentialdict2' into shapeshifter
dxoigmn Jun 22, 2023
b48f626
Merge branch 'better_sequentialdict3' into shapeshifter
dxoigmn Jun 22, 2023
d607fdf
Merge branch 'main' into better_sequentialdict3
dxoigmn Jun 22, 2023
b8d473b
fix merge error
dxoigmn Jun 22, 2023
e9cf67b
Change call special arg names
dxoigmn Jun 22, 2023
22cd058
Merge branch 'better_sequentialdict3' into shapeshifter
dxoigmn Jun 22, 2023
3f3e302
Merge branch 'better_sequentialdict3' into better_sequentialdict4
dxoigmn Jun 22, 2023
9c7b05e
bugfix
dxoigmn Jun 23, 2023
82e66f5
Merge branch 'main' into better_sequentialdict3
dxoigmn Jun 23, 2023
14bde3e
Merge branch 'better_sequentialdict3' into better_sequentialdict4
dxoigmn Jun 23, 2023
fab9763
fix merge error
dxoigmn Jun 23, 2023
f9b43eb
cleanup
dxoigmn Jun 23, 2023
471757d
cleanup
dxoigmn Jun 23, 2023
268843a
Merge branch 'better_sequentialdict3' into shapeshifter
dxoigmn Jun 23, 2023
47e92b0
Merge branch 'better_sequentialdict4' into shapeshifter
dxoigmn Jun 23, 2023
43d343b
style
dxoigmn Jun 23, 2023
368b87e
style
dxoigmn Jun 23, 2023
20bb57b
Merge branch 'better_sequentialdict3' into shapeshifter
dxoigmn Jun 23, 2023
ff81d41
Merge remote-tracking branch 'origin/freeze_callback' into shapeshifter
dxoigmn Jun 27, 2023
c5c734a
Merge remote-tracking branch 'origin/better_sequentialdict3' into bet…
dxoigmn Jun 27, 2023
3ca90c9
Enable dot-syntax to call module methods
dxoigmn Jun 27, 2023
085e4c4
Merge branch 'better_sequentialdict5' into shapeshifter
dxoigmn Jun 27, 2023
ceb1b43
Merge branch 'better_composer' into shapeshifter
dxoigmn Jun 28, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 115 additions & 0 deletions mart/attack/composer.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,118 @@ def compose(self, perturbation, *, input, target):
perturbation = perturbation * mask

return input * (1 - mask) + perturbation


# FIXME: It would be really nice if we could compose composers just like we can compose everything else...
class WarpComposite(Composite):
def __init__(
self,
warp,
*args,
clamp=(0, 255),
premultiplied_alpha=True,
**kwargs,
):
super().__init__(*args, premultiplied_alpha=premultiplied_alpha, **kwargs)

self._warp = warp
self.clamp = clamp

# FIXME: This looks an awful like warp below. We should be able to get rid of this function.
def fixed_warp(self, perturbation, *, input, target):
# Use gs_coords to do fixed perspective warp
assert "gs_coords" in target

if len(input.shape) == 4 and len(perturbation.shape) == 3:
return torch.stack(
[
self.warp(perturbation, input=inp, target={"gs_coords": endpoints})
for inp, endpoints in zip(input, target["gs_coords"])
]
)
else:
# coordinates are [[left, top], [right, top], [right, bottom], [left, bottom]]
# perturbation is CHW
startpoints = [
[0, 0],
[perturbation.shape[2], 0],
[perturbation.shape[2], perturbation.shape[1]],
[0, perturbation.shape[1]],
]
endpoints = target["gs_coords"]

pert_w, pert_h = F.get_image_size(perturbation)
image_w, image_h = F.get_image_size(input)

# Pad perturbation to image size
if pert_w < image_w or pert_h < image_h:
# left, top, right and bottom
padding = [0, 0, max(image_w - pert_w, 0), max(image_h - pert_h, 0)]
perturbation = F.pad(perturbation, padding)

perturbation = F.perspective(perturbation, startpoints, endpoints)

# Crop perturbation to image size
if pert_w != image_w or pert_h != image_h:
perturbation = F.crop(perturbation, 0, 0, image_h, image_w)
return perturbation

def warp(self, perturbation, *, input, target):
# Always use gs_coords if present in target
if "gs_coords" in target:
return self.fixed_warp(perturbation, input=input, target=target)

# Otherwise, warp the perturbation onto the input
if len(input.shape) == 4 and len(perturbation.shape) == 3: # support for batch warping
return torch.stack(
[self.warp(perturbation, input=inp, target=target) for inp in input]
)
else:
pert_w, pert_h = F.get_image_size(perturbation)
image_w, image_h = F.get_image_size(input)

# Pad perturbation to image size
if pert_w < image_w or pert_h < image_h:
# left, top, right and bottom
padding = [0, 0, max(image_w - pert_w, 0), max(image_h - pert_h, 0)]
perturbation = F.pad(perturbation, padding)

perturbation = self._warp(perturbation)

# Crop perturbation to image size
if pert_w != image_w or pert_h != image_h:
perturbation = F.crop(perturbation, 0, 0, image_h, image_w)
return perturbation

def compose(self, perturbation, *, input, target):
# Create mask of ones to keep track of filled in pixels
mask = torch.ones_like(perturbation[:1])

# Add mask to perturbation so we can keep track of warping.
perturbation = torch.cat((perturbation, mask))

# Apply warp transform
perturbation = self.warp(perturbation, input=input, target=target)

# Extract mask from perturbation. The use of channels first forces this hack.
if len(perturbation.shape) == 4:
mask = perturbation[:, 3:, ...]
perturbation = perturbation[:, :3, ...]
else:
mask = perturbation[3:, ...]
perturbation = perturbation[:3, ...]

# Set/update perturbable mask
perturbable_mask = 1
if "perturbable_mask" in target:
perturbable_mask = target["perturbable_mask"]
perturbable_mask = perturbable_mask * mask

# Pre multiply perturbation and clamp it to input min/max
perturbation = perturbation * perturbable_mask
perturbation.clamp_(*self.clamp)

# Set mask for super().compose
target["perturbable_mask"] = perturbable_mask

return super().compose(perturbation, input=input, target=target)
5 changes: 5 additions & 0 deletions mart/attack/perturber.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def __init__(
*,
initializer: Initializer,
projector: Projector | None = None,
size: Iterable[int] | None = None,
):
"""_summary_

Expand All @@ -39,6 +40,10 @@ def __init__(

self.perturbation = None

# FIXME: Should this be in UniversalAdversary?
if size is not None:
self.configure_perturbation(torch.empty(size))

def configure_perturbation(self, input: torch.Tensor | Iterable[torch.Tensor]):
def matches(input, perturbation):
if perturbation is None:
Expand Down
8 changes: 8 additions & 0 deletions mart/attack/projector.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,14 @@ def __call__(
if isinstance(perturbation, torch.Tensor) and isinstance(input, torch.Tensor):
self.project_(perturbation, input=input, target=target)

elif (
isinstance(perturbation, torch.Tensor)
and isinstance(input, Iterable) # noqa: W503
and isinstance(target, Iterable) # noqa: W503
):
for input_i, target_i in zip(input, target):
self.project_(perturbation, input=input_i, target=target_i)

elif (
isinstance(perturbation, Iterable)
and isinstance(input, Iterable) # noqa: W503
Expand Down
48 changes: 36 additions & 12 deletions mart/callbacks/eval_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,47 @@
# SPDX-License-Identifier: BSD-3-Clause
#

from __future__ import annotations

from pytorch_lightning.callbacks import Callback

from mart import utils

logger = utils.get_pylogger(__name__)

__all__ = ["AttackInEvalMode"]


class AttackInEvalMode(Callback):
"""Switch the model into eval mode during attack."""

def __init__(self):
self.training_mode_status = None

def on_train_start(self, trainer, model):
self.training_mode_status = model.training
model.train(False)

def on_train_end(self, trainer, model):
assert self.training_mode_status is not None

# Resume the previous training status of the model.
model.train(self.training_mode_status)
def __init__(self, module_classes: type | list[type]):
# FIXME: convert strings to classes using hydra.utils.get_class? This will clean up some verbosity in configuration but will require importing hydra in this callback.
if isinstance(module_classes, type):
module_classes = [module_classes]

self.module_classes = tuple(module_classes)

def setup(self, trainer, pl_module, stage):
if stage != "fit":
return

# Log to the console so the user can see visually see which modules will be in eval mode during training.
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
logger.info(
f"Setting eval mode for {name} ({module.__class__.__module__}.{module.__class__.__name__})"
)

def on_train_epoch_start(self, trainer, pl_module):
# We must use on_train_epoch_start because PL will set pl_module to train mode right before this callback.
# See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
module.eval()

def on_train_epoch_end(self, trainer, pl_module):
# FIXME: Why is this necessary?
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
module.train()
34 changes: 28 additions & 6 deletions mart/callbacks/no_grad_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,15 @@
# SPDX-License-Identifier: BSD-3-Clause
#

from __future__ import annotations

import torch
from pytorch_lightning.callbacks import Callback

from mart import utils

logger = utils.get_pylogger(__name__)

__all__ = ["ModelParamsNoGrad"]


Expand All @@ -15,10 +22,25 @@ class ModelParamsNoGrad(Callback):
This callback should not change the result. Don't use unless an attack runs faster.
"""

def on_train_start(self, trainer, model):
for param in model.parameters():
param.requires_grad_(False)
def __init__(self, module_names: str | list[str] = None):
if isinstance(module_names, str):
module_names = [module_names]

self.module_names = module_names

def setup(self, trainer, pl_module, stage):
if stage != "fit":
return

# We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients.
# See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks
for name, param in pl_module.named_parameters():
if any(name.startswith(module_name) for module_name in self.module_names):
logger.info(f"Disabling gradient for {name}")
param.requires_grad_(False)

def on_train_end(self, trainer, model):
for param in model.parameters():
param.requires_grad_(True)
def teardown(self, trainer, pl_module, stage):
for name, param in pl_module.named_parameters():
if any(name.startswith(module_name) for module_name in self.module_names):
# FIXME: Why is this necessary?
param.requires_grad_(True)
2 changes: 2 additions & 0 deletions mart/configs/attack/composer/warp_composite.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
_target_: mart.attack.composer.WarpComposite
warp: ???
1 change: 1 addition & 0 deletions mart/configs/attack/perturber/default.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
_target_: mart.attack.Perturber
initializer: ???
projector: null
size: null
9 changes: 9 additions & 0 deletions mart/configs/callbacks/attack_in_eval_mode.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,11 @@
attack_in_eval_mode:
_target_: mart.callbacks.AttackInEvalMode
module_classes: ???
# - _target_: hydra.utils.get_class
# path: mart.models.LitModular
# - _target_: hydra.utils.get_class
# path: torch.nn.BatchNorm2d
# - _target_: hydra.utils.get_class
# path: torch.nn.Dropout
# - _target_: hydra.utils.get_class
# path: torch.nn.SyncBatchNorm
3 changes: 2 additions & 1 deletion mart/configs/callbacks/no_grad_mode.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
attack_in_eval_mode:
no_grad_mode:
_target_: mart.callbacks.ModelParamsNoGrad
module_names: ???
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# @package _global_

defaults:
- /attack/perturber@model.modules.perturbation: default
- /attack/perturber/initializer@model.modules.perturbation.initializer: uniform
- /attack/perturber/projector@model.modules.perturbation.projector: range
- /attack/composer@model.modules.input_adv: warp_composite
- /attack/gradient_modifier@model.gradient_modifier: lp_normalizer
- override /datamodule: coco
- override /model: torchvision_faster_rcnn
- override /metric: average_precision
- override /optimization: super_convergence
- override /callbacks:
[model_checkpoint, lr_monitor, perturbation_visualizer, gradient_monitor]

task_name: "COCO_TorchvisionFasterRCNN_ShapeShifter"
tags: ["adv"]

optimized_metric: "test_metrics/map"

callbacks:
model_checkpoint:
monitor: "validation_metrics/map"
mode: "min"

perturbation_visualizer:
perturbation: "model.perturbation.perturbation"

trainer:
# 117,266 training images, 6 epochs, batch_size=2, 351798
max_steps: 351798
# FIXME: "nms_kernel" not implemented for 'BFloat16', torch.ops.torchvision.nms().
precision: 32

datamodule:
num_workers: 8
ims_per_batch: 2

model:
modules:
perturbation:
size: [3, 416, 416]

initializer:
min: 127
max: 129

input_adv:
warp:
_target_: torchvision.transforms.Compose
transforms:
- _target_: mart.transforms.ColorJitter
brightness: [0.5, 1.5]
contrast: [0.5, 1.5]
saturation: [0.5, 1.0]
hue: [-0.05, 0.05]
- _target_: torchvision.transforms.RandomAffine
degrees: [-5, 5]
translate: [0.1, 0.25]
scale: [0.4, 0.6]
shear: [-3, 3, -3, 3]
interpolation: 2 # BILINEAR
clamp: [0, 255]

losses_and_detections:
model:
num_classes: null # inferred by torchvision
weights: COCO_V1

optimizer:
lr: 25.5
momentum: 0.9
maximize: True

gradient_modifier:
p: inf

training_sequence:
seq005: perturbation
seq006: input_adv
seq010:
preprocessor: ["input_adv"]

validation_sequence: ${.training_sequence}

test_sequence: ${.validation_sequence}
12 changes: 2 additions & 10 deletions mart/configs/metric/average_precision.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,5 @@ validation_metrics:
compute_on_step: false

test_metrics:
_target_: torchmetrics.collections.MetricCollection
_convert_: partial
metrics:
map:
_target_: torchmetrics.detection.MAP
compute_on_step: false
json:
_target_: mart.utils.export.CocoPredictionJSON
prediction_file_name: ${paths.output_dir}/test_prediction.json
groundtruth_file_name: ${paths.output_dir}/test_groundtruth.json
_target_: torchmetrics.detection.MAP
compute_on_step: false
Loading