From d699b14235887e2bed00d77edf7413a61687f998 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 14:04:49 -0700 Subject: [PATCH 001/106] Add YOLOv3 with ShapeShifter --- mart/attack/composer.py | 4 +- mart/configs/datamodule/coco_yolov3.yaml | 24 ++++++ mart/configs/experiment/COCO_YOLOv3.yaml | 31 ++++++++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 64 ++++++++++++++++ mart/configs/metric/average_precision.yaml | 12 +-- mart/configs/model/yolov3.yaml | 63 ++++++++++++++++ mart/datamodules/coco.py | 8 ++ mart/models/yolov3.py | 74 +++++++++++++++++++ 8 files changed, 269 insertions(+), 11 deletions(-) create mode 100644 mart/configs/datamodule/coco_yolov3.yaml create mode 100644 mart/configs/experiment/COCO_YOLOv3.yaml create mode 100644 mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml create mode 100644 mart/configs/model/yolov3.yaml create mode 100644 mart/models/yolov3.py diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 728a8f15..65fbaf14 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -143,14 +143,16 @@ def __init__( contrast=0, saturation=0, hue=0, + pixel_scale=255, **kwargs, ): super().__init__(*args, **kwargs) self.color_jitter = T.ColorJitter(brightness, contrast, saturation, hue) + self.pixel_scale = pixel_scale def compose(self, perturbation, *, input, target): # ColorJitter and friends assume floating point tensors are between [0, 1]... - perturbation = self.color_jitter(perturbation / 255) * 255 + perturbation = self.color_jitter(perturbation / self.pixel_scale) * self.pixel_scale return super().compose(perturbation, input=input, target=target) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml new file mode 100644 index 00000000..e7acef38 --- /dev/null +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -0,0 +1,24 @@ +defaults: + - default.yaml + +num_workers: 2 + +train_dataset: + _target_: yolov3.datasets.coco.CocoDetectionBoundingBox + img_root: ${paths.data_dir}/coco/train2017 + ann_file_name: ${paths.data_dir}/coco/annotations/instances_train2017.json + img_size: ??? + transform: random + +val_dataset: + _target_: yolov3.datasets.coco.CocoDetectionBoundingBox + img_root: ${paths.data_dir}/coco/val2017 + ann_file_name: ${paths.data_dir}/coco/annotations/instances_val2017.json + img_size: ${..train_dataset.img_size} + transform: default + +test_dataset: ${.val_dataset} + +collate_fn: + _target_: hydra.utils.get_method + path: mart.datamodules.coco.collate_img_label_fn diff --git a/mart/configs/experiment/COCO_YOLOv3.yaml b/mart/configs/experiment/COCO_YOLOv3.yaml new file mode 100644 index 00000000..db5e991a --- /dev/null +++ b/mart/configs/experiment/COCO_YOLOv3.yaml @@ -0,0 +1,31 @@ +# @package _global_ + +defaults: + - override /datamodule: coco_yolov3 + - override /model: yolov3 + - override /metric: average_precision + - override /optimization: super_convergence + +task_name: "COCO_YOLOv3" +tags: ["evaluation"] + +optimized_metric: "test_metrics/map" + +trainer: + # 117,266 training images, 6 epochs, batch_size=16, 43,974.75 + max_steps: 43975 + # FIXME: "nms_kernel" not implemented for 'BFloat16', torch.ops.torchvision.nms(). + precision: 32 + +datamodule: + num_workers: 32 + ims_per_batch: 16 + + train_dataset: + img_size: 416 + +model: + optimizer: + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0005 diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml new file mode 100644 index 00000000..1413214e --- /dev/null +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -0,0 +1,64 @@ +# @package _global_ + +defaults: + - COCO_YOLOv3 + - /attack/perturber@model.modules.perturber: default + - /attack/perturber/initializer@model.modules.perturber.initializer: uniform + - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_random_affine_overlay + - /attack/perturber/projector@model.modules.perturber.projector: range + - /attack/optimizer@model.optimizer: sgd + - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer + +task_name: "COCO_YOLOv3_ShapeShifter" +tags: ["adv"] + +trainer: + # 117,266 training images, 1 epochs, batch_size=16, 7,329.125 + max_steps: 7330 + +datamodule: + num_workers: 32 + ims_per_batch: 16 + +model: + modules: + perturber: + size: [3, 416, 234] + + initializer: + min: 0.49 + max: 0.51 + + projector: + min: 0.0 + max: 1.0 + + composer: + degrees: [-15, 15] + translate: [0.25, 0.25] + scale: [0.25, 0.75] + shear: [-5, 5, -5, 5] + brightness: 0.3 + contrast: 0.3 + saturation: 1.0 + hue: 0.1 + pixel_scale: 1.0 + clamp: [0, 1] + + freeze: "logits" + + optimizer: + lr: 0.1 + + gradient_modifier: + p: inf + + training_sequence: + seq005: perturber + + seq010: + logits: ["perturber"] + + validation_sequence: ${.training_sequence} + + test_sequence: ${.validation_sequence} diff --git a/mart/configs/metric/average_precision.yaml b/mart/configs/metric/average_precision.yaml index d41f9743..3438a090 100644 --- a/mart/configs/metric/average_precision.yaml +++ b/mart/configs/metric/average_precision.yaml @@ -9,13 +9,5 @@ validation_metrics: compute_on_step: false test_metrics: - _target_: torchmetrics.collections.MetricCollection - _convert_: partial - metrics: - map: - _target_: torchmetrics.detection.MAP - compute_on_step: false - json: - _target_: mart.utils.export.CocoPredictionJSON - prediction_file_name: ${paths.output_dir}/test_prediction.json - groundtruth_file_name: ${paths.output_dir}/test_groundtruth.json + _target_: torchmetrics.detection.MAP + compute_on_step: false diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml new file mode 100644 index 00000000..ff05e2ce --- /dev/null +++ b/mart/configs/model/yolov3.yaml @@ -0,0 +1,63 @@ +defaults: + - modular + +modules: + logits: + _target_: yolov3.main.load_yolov3_model + device: cpu + ckpt: false + mode: eval + weight_path: ${paths.data_dir}/yolov3_original.pt + + loss: + _target_: mart.models.yolov3.Loss + image_size: 416 # FIXME: use ${training_data.transform.image_size}? + average: True + + detections: + _target_: mart.models.yolov3.Detections + nms: true + conf_thres: 0.1 + nms_thres: 0.4 + + output: + _target_: mart.nn.ReturnKwargs + +training_sequence: + seq010: + logits: ["input"] + + seq020: + loss: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq030: + detections: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq040: + output: + preds: detections.preds + target: detections.target + loss: loss.total_loss + coord_loss: loss.coord_loss + obj_loss: loss.obj_loss + noobj_loss: loss.noobj_loss + class_loss: loss.class_loss + +validation_sequence: ${.training_sequence} +test_sequence: ${.validation_sequence} + +training_step_log: + - loss + - coord_loss + - obj_loss + - noobj_loss + - class_loss + +validation_step_log: ${.training_step_log} +test_step_log: ${.validation_step_log} diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 42ddcebb..dc250112 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -10,6 +10,7 @@ import numpy as np from torchvision.datasets.coco import CocoDetection as CocoDetection_ from torchvision.datasets.folder import default_loader +from yolov3.datasets.utils import collate_img_label_fn as collate_img_label_fn_ __all__ = ["CocoDetection"] @@ -89,3 +90,10 @@ def __getitem__(self, index: int): # Source: https://github.com/pytorch/vision/blob/dc07ac2add8285e16a716564867d0b4b953f6735/references/detection/utils.py#L203 def collate_fn(batch): return tuple(zip(*batch)) + + +def collate_img_label_fn(batch): + image, target, lengths = collate_img_label_fn_(batch) + + # Collate into tuple of (input, target) where target is a dict. + return image, {"target": target, "lengths": lengths} diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py new file mode 100644 index 00000000..0ade13fb --- /dev/null +++ b/mart/models/yolov3.py @@ -0,0 +1,74 @@ +# +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import torch +from yolov3.inference import post_process +from yolov3.training import yolo_loss_fn +from yolov3.utils import cxcywh_to_xywh + + +class Loss(torch.nn.Module): + def __init__(self, image_size, average=True): + super().__init__() + + self.image_size = image_size + self.average = average + + def forward(self, logits, targets, target_lengths): + losses = yolo_loss_fn(logits, targets, target_lengths, self.image_size, self.average) + total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses + + return { + "total_loss": total_loss, + "coord_loss": coord_loss, + "obj_loss": obj_loss, + "noobj_loss": noobj_loss, + "class_loss": class_loss, + } + + +class Detections(torch.nn.Module): + def __init__(self, nms=True, conf_thres=0.8, nms_thres=0.4): + super().__init__() + + self.nms = nms + self.conf_thres = conf_thres + self.nms_thres = nms_thres + + @staticmethod + def xywh_to_xyxy(boxes): + boxes[:, 2] = boxes[:, 0] + boxes[:, 2] + boxes[:, 3] = boxes[:, 1] + boxes[:, 3] + + return boxes + + @staticmethod + def tensor_to_dict(detection): + boxes = detection[:, 0:4] + scores = detection[:, 4] + labels = detection[:, 5:] + + boxes = cxcywh_to_xywh(boxes) + boxes = Detections.xywh_to_xyxy(boxes) + + if labels.shape[1] == 1: # index + labels = labels[:, 0].to(int) + else: # one-hot + labels = labels.argmax(dim=1) + + return {"boxes": boxes, "labels": labels, "scores": scores} + + @torch.no_grad() + def forward(self, logits, targets, target_lengths): + detections = post_process(logits, self.nms, self.conf_thres, self.nms_thres) + + # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format + # torchmetrics wants. + preds = [Detections.tensor_to_dict(det) for det in detections] + targets = [target[:length] for target, length in zip(targets, target_lengths)] + targets = [Detections.tensor_to_dict(target) for target in targets] + + return {"preds": preds, "target": targets} From 44b3be5d28e28d900496a3616db06ba98ebfb9ad Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:06:53 -0700 Subject: [PATCH 002/106] Move mart.attack.callbacks to mart.callbacks --- mart/{attack => }/callbacks/__init__.py | 0 mart/{attack => }/callbacks/eval_mode.py | 0 mart/{attack => }/callbacks/no_grad_mode.py | 0 mart/{attack => }/callbacks/progress_bar.py | 0 mart/{attack => }/callbacks/visualizer.py | 0 mart/configs/attack/callbacks/attack_in_eval_mode.yaml | 2 -- mart/configs/attack/callbacks/no_grad_mode.yaml | 2 -- mart/configs/attack/callbacks/progress_bar.yaml | 3 --- mart/configs/callbacks/attack_in_eval_mode.yaml | 2 ++ mart/configs/{attack => }/callbacks/image_visualizer.yaml | 2 +- mart/configs/callbacks/no_grad_mode.yaml | 2 ++ mart/configs/callbacks/progress_bar.yaml | 3 +++ 12 files changed, 8 insertions(+), 8 deletions(-) rename mart/{attack => }/callbacks/__init__.py (100%) rename mart/{attack => }/callbacks/eval_mode.py (100%) rename mart/{attack => }/callbacks/no_grad_mode.py (100%) rename mart/{attack => }/callbacks/progress_bar.py (100%) rename mart/{attack => }/callbacks/visualizer.py (100%) delete mode 100644 mart/configs/attack/callbacks/attack_in_eval_mode.yaml delete mode 100644 mart/configs/attack/callbacks/no_grad_mode.yaml delete mode 100644 mart/configs/attack/callbacks/progress_bar.yaml create mode 100644 mart/configs/callbacks/attack_in_eval_mode.yaml rename mart/configs/{attack => }/callbacks/image_visualizer.yaml (53%) create mode 100644 mart/configs/callbacks/no_grad_mode.yaml create mode 100644 mart/configs/callbacks/progress_bar.yaml diff --git a/mart/attack/callbacks/__init__.py b/mart/callbacks/__init__.py similarity index 100% rename from mart/attack/callbacks/__init__.py rename to mart/callbacks/__init__.py diff --git a/mart/attack/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py similarity index 100% rename from mart/attack/callbacks/eval_mode.py rename to mart/callbacks/eval_mode.py diff --git a/mart/attack/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py similarity index 100% rename from mart/attack/callbacks/no_grad_mode.py rename to mart/callbacks/no_grad_mode.py diff --git a/mart/attack/callbacks/progress_bar.py b/mart/callbacks/progress_bar.py similarity index 100% rename from mart/attack/callbacks/progress_bar.py rename to mart/callbacks/progress_bar.py diff --git a/mart/attack/callbacks/visualizer.py b/mart/callbacks/visualizer.py similarity index 100% rename from mart/attack/callbacks/visualizer.py rename to mart/callbacks/visualizer.py diff --git a/mart/configs/attack/callbacks/attack_in_eval_mode.yaml b/mart/configs/attack/callbacks/attack_in_eval_mode.yaml deleted file mode 100644 index 15768e22..00000000 --- a/mart/configs/attack/callbacks/attack_in_eval_mode.yaml +++ /dev/null @@ -1,2 +0,0 @@ -attack_in_eval_mode: - _target_: mart.attack.callbacks.AttackInEvalMode diff --git a/mart/configs/attack/callbacks/no_grad_mode.yaml b/mart/configs/attack/callbacks/no_grad_mode.yaml deleted file mode 100644 index c94b9597..00000000 --- a/mart/configs/attack/callbacks/no_grad_mode.yaml +++ /dev/null @@ -1,2 +0,0 @@ -attack_in_eval_mode: - _target_: mart.attack.callbacks.ModelParamsNoGrad diff --git a/mart/configs/attack/callbacks/progress_bar.yaml b/mart/configs/attack/callbacks/progress_bar.yaml deleted file mode 100644 index e528c714..00000000 --- a/mart/configs/attack/callbacks/progress_bar.yaml +++ /dev/null @@ -1,3 +0,0 @@ -progress_bar: - _target_: mart.attack.callbacks.ProgressBar - process_position: 1 diff --git a/mart/configs/callbacks/attack_in_eval_mode.yaml b/mart/configs/callbacks/attack_in_eval_mode.yaml new file mode 100644 index 00000000..2acdc953 --- /dev/null +++ b/mart/configs/callbacks/attack_in_eval_mode.yaml @@ -0,0 +1,2 @@ +attack_in_eval_mode: + _target_: mart.callbacks.AttackInEvalMode diff --git a/mart/configs/attack/callbacks/image_visualizer.yaml b/mart/configs/callbacks/image_visualizer.yaml similarity index 53% rename from mart/configs/attack/callbacks/image_visualizer.yaml rename to mart/configs/callbacks/image_visualizer.yaml index a75b6db2..65b9f8dd 100644 --- a/mart/configs/attack/callbacks/image_visualizer.yaml +++ b/mart/configs/callbacks/image_visualizer.yaml @@ -1,3 +1,3 @@ image_visualizer: - _target_: mart.attack.callbacks.PerturbedImageVisualizer + _target_: mart.callbacks.PerturbedImageVisualizer folder: ${paths.output_dir}/adversarial_examples diff --git a/mart/configs/callbacks/no_grad_mode.yaml b/mart/configs/callbacks/no_grad_mode.yaml new file mode 100644 index 00000000..6b4312fd --- /dev/null +++ b/mart/configs/callbacks/no_grad_mode.yaml @@ -0,0 +1,2 @@ +attack_in_eval_mode: + _target_: mart.callbacks.ModelParamsNoGrad diff --git a/mart/configs/callbacks/progress_bar.yaml b/mart/configs/callbacks/progress_bar.yaml new file mode 100644 index 00000000..4298c7d7 --- /dev/null +++ b/mart/configs/callbacks/progress_bar.yaml @@ -0,0 +1,3 @@ +progress_bar: + _target_: mart.callbacks.ProgressBar + process_position: 1 From 25a81efc38e0b2aeede92491e2e960bf64ced11a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:07:20 -0700 Subject: [PATCH 003/106] Disable default EarlyStopping --- mart/configs/callbacks/default.yaml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/mart/configs/callbacks/default.yaml b/mart/configs/callbacks/default.yaml index 5df27bfd..abdfa8b2 100644 --- a/mart/configs/callbacks/default.yaml +++ b/mart/configs/callbacks/default.yaml @@ -1,8 +1,7 @@ defaults: - - model_checkpoint.yaml - - early_stopping.yaml - - model_summary.yaml - - rich_progress_bar.yaml + - model_checkpoint + - model_summary + - rich_progress_bar - _self_ model_checkpoint: @@ -13,10 +12,5 @@ model_checkpoint: save_last: True auto_insert_metric_name: False -early_stopping: - monitor: "val/acc" - patience: 100 - mode: "max" - model_summary: max_depth: -1 From 0c6bad4b4772fb4b8b404147947177d3a7f4eeca Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:07:47 -0700 Subject: [PATCH 004/106] Add PerturbationVisualizer callback --- mart/callbacks/visualizer.py | 22 ++++++++++++++++++- .../callbacks/perturbation_visualizer.yaml | 2 ++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 mart/configs/callbacks/perturbation_visualizer.yaml diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 3354321e..cd822311 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -9,7 +9,7 @@ from pytorch_lightning.callbacks import Callback from torchvision.transforms import ToPILImage -__all__ = ["PerturbedImageVisualizer"] +__all__ = ["PerturbedImageVisualizer", "PerturbationVisualizer"] class PerturbedImageVisualizer(Callback): @@ -39,3 +39,23 @@ def on_train_end(self, trainer, model): fpath = os.path.join(self.folder, fname) im = self.convert(img / 255) im.save(fpath) + + +class PerturbationVisualizer(Callback): + def __init__(self, frequency: int = 100): + self.frequency = 100 + + def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx): + if batch_idx % self.frequency != 0: + return + + # FIXME: Generalize this by using DotDict? + perturbation = module.model.perturber.perturbation + + # Add image to each logger + for logger in trainer.loggers: + # FIXME: Should we just use isinstance(logger.experiment, SummaryWriter)? + if not hasattr(logger.experiment, "add_image"): + continue + + logger.experiment.add_image("perturbation", perturbation, global_step=trainer.global_step) diff --git a/mart/configs/callbacks/perturbation_visualizer.yaml b/mart/configs/callbacks/perturbation_visualizer.yaml new file mode 100644 index 00000000..71071af9 --- /dev/null +++ b/mart/configs/callbacks/perturbation_visualizer.yaml @@ -0,0 +1,2 @@ +perturbation_visualizer: + _target_: mart.callbacks.PerturbationVisualizer diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 1413214e..292d34a2 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -8,6 +8,7 @@ defaults: - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/optimizer@model.optimizer: sgd - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer + - override /callbacks: [perturbation_visualizer] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] From 150e862e8963d1623ee00d58af39bcbeb18c4c9c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:33:33 -0700 Subject: [PATCH 005/106] Add train end perturbation --- mart/callbacks/visualizer.py | 16 +++++++++++----- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 4 ++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index cd822311..835c3fe3 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -45,12 +45,9 @@ class PerturbationVisualizer(Callback): def __init__(self, frequency: int = 100): self.frequency = 100 - def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx): - if batch_idx % self.frequency != 0: - return - + def log_perturbation(self, trainer, pl_module) # FIXME: Generalize this by using DotDict? - perturbation = module.model.perturber.perturbation + perturbation = pl_module.model.perturber.perturbation # Add image to each logger for logger in trainer.loggers: @@ -59,3 +56,12 @@ def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx): continue logger.experiment.add_image("perturbation", perturbation, global_step=trainer.global_step) + + def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): + if batch_idx % self.frequency != 0: + return + + self.log_perturbation(trainer, pl_module) + + def on_train_end(self, trainer, pl_module): + self.log_perturbation(trainer, pl_module) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 292d34a2..3788754a 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -17,6 +17,10 @@ trainer: # 117,266 training images, 1 epochs, batch_size=16, 7,329.125 max_steps: 7330 +callbacks: + perturbation_visualizer: + frequency: 500 + datamodule: num_workers: 32 ims_per_batch: 16 From def4095ab30350169a4832636b49d802c27a8b3b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:33:49 -0700 Subject: [PATCH 006/106] Log learning rate --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 3788754a..ecb0accc 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -8,7 +8,7 @@ defaults: - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/optimizer@model.optimizer: sgd - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - - override /callbacks: [perturbation_visualizer] + - override /callbacks: [perturbation_visualizer, lr_monitor] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] From f57edd08bc84e678ba7130216276538debb7ff23 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:34:38 -0700 Subject: [PATCH 007/106] style --- mart/callbacks/visualizer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 835c3fe3..2dc01c2c 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -45,7 +45,7 @@ class PerturbationVisualizer(Callback): def __init__(self, frequency: int = 100): self.frequency = 100 - def log_perturbation(self, trainer, pl_module) + def log_perturbation(self, trainer, pl_module): # FIXME: Generalize this by using DotDict? perturbation = pl_module.model.perturber.perturbation @@ -55,7 +55,9 @@ def log_perturbation(self, trainer, pl_module) if not hasattr(logger.experiment, "add_image"): continue - logger.experiment.add_image("perturbation", perturbation, global_step=trainer.global_step) + logger.experiment.add_image( + "perturbation", perturbation, global_step=trainer.global_step + ) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if batch_idx % self.frequency != 0: From 0580214f1ed4a3ef4d873efbd24303553f13fe06 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:41:25 -0700 Subject: [PATCH 008/106] Fix max_steps --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index ecb0accc..7729a8a9 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -14,8 +14,8 @@ task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] trainer: - # 117,266 training images, 1 epochs, batch_size=16, 7,329.125 - max_steps: 7330 + # 118287 training images, 1 epoch, batch_size=16: FLOOR(118287/16) = 7392 + max_steps: 7392 callbacks: perturbation_visualizer: From ae20b9826b2092b5fe54fac55f11b3da8a468e77 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 17:44:51 -0700 Subject: [PATCH 009/106] bugfix --- mart/callbacks/visualizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 2dc01c2c..74490dc5 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -43,7 +43,7 @@ def on_train_end(self, trainer, model): class PerturbationVisualizer(Callback): def __init__(self, frequency: int = 100): - self.frequency = 100 + self.frequency = frequency def log_perturbation(self, trainer, pl_module): # FIXME: Generalize this by using DotDict? From 3fe80a8d7760dd16d290fd54e94ba46599a4fb3f Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 17 Apr 2023 18:05:33 -0700 Subject: [PATCH 010/106] Enable configuration of loss from command line --- mart/configs/model/yolov3.yaml | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index ff05e2ce..4cd1cefa 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -9,11 +9,14 @@ modules: mode: eval weight_path: ${paths.data_dir}/yolov3_original.pt - loss: + losses: _target_: mart.models.yolov3.Loss image_size: 416 # FIXME: use ${training_data.transform.image_size}? average: True + loss: + _target_: mart.nn.Sum + detections: _target_: mart.models.yolov3.Detections nms: true @@ -28,26 +31,30 @@ training_sequence: logits: ["input"] seq020: - loss: + losses: logits: logits targets: target.target target_lengths: target.lengths seq030: + loss: + - losses.total_loss + + seq040: detections: logits: logits targets: target.target target_lengths: target.lengths - seq040: + seq050: output: preds: detections.preds target: detections.target - loss: loss.total_loss - coord_loss: loss.coord_loss - obj_loss: loss.obj_loss - noobj_loss: loss.noobj_loss - class_loss: loss.class_loss + loss: loss + coord_loss: losses.coord_loss + obj_loss: losses.obj_loss + noobj_loss: losses.noobj_loss + class_loss: losses.class_loss validation_sequence: ${.training_sequence} test_sequence: ${.validation_sequence} From 551e8d72840eb895297ed87e1e1c1d9e7c519258 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 07:27:09 -0700 Subject: [PATCH 011/106] Limit val/test batches --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 7729a8a9..7629c4fa 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -16,6 +16,9 @@ tags: ["adv"] trainer: # 118287 training images, 1 epoch, batch_size=16: FLOOR(118287/16) = 7392 max_steps: 7392 + # mAP can be slow to compute so limit number of images + limit_val_batches: 100 + limit_test_batches: 100 callbacks: perturbation_visualizer: From 8ca64fd91e1bbc43d3e18123efe31f45d6e72ac2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 09:00:29 -0700 Subject: [PATCH 012/106] Optimize composer parameters --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 7629c4fa..5069a8c1 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -42,14 +42,14 @@ model: max: 1.0 composer: - degrees: [-15, 15] - translate: [0.25, 0.25] - scale: [0.25, 0.75] - shear: [-5, 5, -5, 5] - brightness: 0.3 - contrast: 0.3 - saturation: 1.0 - hue: 0.1 + degrees: [-5, 5] + translate: null + scale: [0.3, 0.5] + shear: [-3, 3, -3, 3] + brightness: [0.5, 1.5] + contrast: [0.5, 1.5] + saturation: [0.5, 1.5] + hue: [-0.05, 0.05] pixel_scale: 1.0 clamp: [0, 1] From 40932c24ed06800c162c5d0d9c5bafe197d04870 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 09:15:26 -0700 Subject: [PATCH 013/106] Don't use variable interpolation on YOLOv3 sequences --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 11 +++- mart/configs/model/yolov3.yaml | 61 ++++++++++++++++++- 2 files changed, 68 insertions(+), 4 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 5069a8c1..14bf481e 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -63,10 +63,17 @@ model: training_sequence: seq005: perturber + seq010: + logits: ["perturber"] + validation_sequence: + seq005: perturber + seq010: + logits: ["perturber"] + + test_sequence: + seq005: perturber seq010: logits: ["perturber"] - validation_sequence: ${.training_sequence} - test_sequence: ${.validation_sequence} diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 4cd1cefa..3f65e5fd 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -56,8 +56,65 @@ training_sequence: noobj_loss: losses.noobj_loss class_loss: losses.class_loss -validation_sequence: ${.training_sequence} -test_sequence: ${.validation_sequence} +validation_sequence: + seq010: + logits: ["input"] + + seq020: + losses: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq030: + loss: + - losses.total_loss + + seq040: + detections: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq050: + output: + preds: detections.preds + target: detections.target + loss: loss + coord_loss: losses.coord_loss + obj_loss: losses.obj_loss + noobj_loss: losses.noobj_loss + class_loss: losses.class_loss + +test_sequence: + seq010: + logits: ["input"] + + seq020: + losses: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq030: + loss: + - losses.total_loss + + seq040: + detections: + logits: logits + targets: target.target + target_lengths: target.lengths + + seq050: + output: + preds: detections.preds + target: detections.target + loss: loss + coord_loss: losses.coord_loss + obj_loss: losses.obj_loss + noobj_loss: losses.noobj_loss + class_loss: losses.class_loss training_step_log: - loss From 407b80312e46866137dfbe472051f09d0e281e15 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 09:21:47 -0700 Subject: [PATCH 014/106] Don't output training metrics in YOLOv3 model --- mart/configs/experiment/COCO_YOLOv3.yaml | 3 +++ mart/configs/model/yolov3.yaml | 11 +++-------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3.yaml b/mart/configs/experiment/COCO_YOLOv3.yaml index db5e991a..bcc04185 100644 --- a/mart/configs/experiment/COCO_YOLOv3.yaml +++ b/mart/configs/experiment/COCO_YOLOv3.yaml @@ -25,6 +25,9 @@ datamodule: img_size: 416 model: + # yolov3 model does not produce preds/targets in training sequence + training_metrics: null + optimizer: lr: 0.001 momentum: 0.9 diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 3f65e5fd..988ef79f 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -26,6 +26,9 @@ modules: output: _target_: mart.nn.ReturnKwargs +# training sequence does not produce preds/targets +training_metrics: null + training_sequence: seq010: logits: ["input"] @@ -41,15 +44,7 @@ training_sequence: - losses.total_loss seq040: - detections: - logits: logits - targets: target.target - target_lengths: target.lengths - - seq050: output: - preds: detections.preds - target: detections.target loss: loss coord_loss: losses.coord_loss obj_loss: losses.obj_loss From c19e81422a6de6d6c477f7f2f6e22c0d9f21ead5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 09:33:37 -0700 Subject: [PATCH 015/106] Update optimizer parameters --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 14bf481e..0b68772f 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -15,7 +15,7 @@ tags: ["adv"] trainer: # 118287 training images, 1 epoch, batch_size=16: FLOOR(118287/16) = 7392 - max_steps: 7392 + max_steps: 73920 # mAP can be slow to compute so limit number of images limit_val_batches: 100 limit_test_batches: 100 @@ -56,7 +56,8 @@ model: freeze: "logits" optimizer: - lr: 0.1 + lr: 0.01 + momentum: 0.9 gradient_modifier: p: inf From b5cc6e931efa9c491f57d5cde7c1d2e5918509ec Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 12:31:54 -0700 Subject: [PATCH 016/106] style --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 0b68772f..37106ba0 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -76,5 +76,3 @@ model: seq005: perturber seq010: logits: ["perturber"] - - From 07ef40e9fc002480e7b9b1305669d1edaf1250b1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 17:32:58 -0700 Subject: [PATCH 017/106] Add TV loss --- mart/attack/perturber.py | 12 +++++++++- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 22 ++++++++++++++++--- mart/configs/model/yolov3.yaml | 21 ++++++++++++++++-- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 9c3c4a24..9ee82941 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -108,4 +108,14 @@ def forward(self, **batch): self.projector(self.perturbation, **batch) input_adv = self.composer(self.perturbation, **batch) - return input_adv + # FIXME: This is a hack + total_variation = torch.mean( + torch.sum( + torch.square(self.perturbation[:, 1:, :] - self.perturbation[:, :-1, :]) + ) + + torch.sum( # noqa: W503 + torch.square(self.perturbation[:, :, 1:] - self.perturbation[:, :, :-1]) + ) + ) + + return {"input_adv": input_adv, "total_variation": 1e-5*total_variation} diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 37106ba0..03b2d8af 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -65,14 +65,30 @@ model: training_sequence: seq005: perturber seq010: - logits: ["perturber"] + logits: ["perturber.input_adv"] + seq030: + loss: + - losses.total_loss + - perturber.total_variation + seq040: + output: + total_variation: perturber.total_variation + + training_step_log: + - loss + - total_loss + - coord_loss + - obj_loss + - noobj_loss + - class_loss + - total_variation validation_sequence: seq005: perturber seq010: - logits: ["perturber"] + logits: ["perturber.input_adv"] test_sequence: seq005: perturber seq010: - logits: ["perturber"] + logits: ["perturber.input_adv"] diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 988ef79f..2e85211e 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -46,6 +46,7 @@ training_sequence: seq040: output: loss: loss + total_loss: losses.total_loss coord_loss: losses.coord_loss obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss @@ -76,6 +77,7 @@ validation_sequence: preds: detections.preds target: detections.target loss: loss + total_loss: losses.total_loss coord_loss: losses.coord_loss obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss @@ -106,6 +108,7 @@ test_sequence: preds: detections.preds target: detections.target loss: loss + total_loss: losses.total_loss coord_loss: losses.coord_loss obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss @@ -113,10 +116,24 @@ test_sequence: training_step_log: - loss + - total_loss - coord_loss - obj_loss - noobj_loss - class_loss -validation_step_log: ${.training_step_log} -test_step_log: ${.validation_step_log} +validation_step_log: + - loss + - total_loss + - coord_loss + - obj_loss + - noobj_loss + - class_loss + +test_step_log: + - loss + - total_loss + - coord_loss + - obj_loss + - noobj_loss + - class_loss From a686a05e323a7c8d2b74e3138ac1d54e5b48883f Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 18 Apr 2023 21:54:07 -0700 Subject: [PATCH 018/106] Alwaysm inimize noobj loss --- mart/models/yolov3.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 0ade13fb..e988574d 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -21,6 +21,11 @@ def forward(self, logits, targets, target_lengths): losses = yolo_loss_fn(logits, targets, target_lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses + # keep no objects no objects + # FIXME: Parameterize this + noobj_loss = -noobj_loss + total_loss = 0.2*noobj_loss + obj_loss + class_loss + 5*coord_loss + return { "total_loss": total_loss, "coord_loss": coord_loss, From ae34adc559760b7b207993c6193d79762c21f190 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 19 Apr 2023 08:10:54 -0700 Subject: [PATCH 019/106] Update warp --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 03b2d8af..a62455f5 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -4,7 +4,7 @@ defaults: - COCO_YOLOv3 - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_random_affine_overlay + - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_overlay - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/optimizer@model.optimizer: sgd - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer @@ -42,16 +42,21 @@ model: max: 1.0 composer: - degrees: [-5, 5] - translate: null - scale: [0.3, 0.5] - shear: [-3, 3, -3, 3] + warp: + _target_: torchvision.transforms.Compose + transforms: + - _target_: torchvision.transforms.RandomAffine + degrees: 0 + scale: [0.3, 0.5] + - _target_: torchvision.transforms.RandomPerspective + distortion_scale: 0.2 + p: 0.5 + clamp: [0, 1] brightness: [0.5, 1.5] contrast: [0.5, 1.5] saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 - clamp: [0, 1] freeze: "logits" From 763be220796736983fa6d0875ee382df39d2941d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 19 Apr 2023 13:17:28 -0700 Subject: [PATCH 020/106] Add drop params --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index a62455f5..303e5a46 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -57,6 +57,8 @@ model: saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 + drop_p: 0.75 + drop_range: [0.2, 0.7] freeze: "logits" From 025452d09fcb82f872a9fea4d0ddf2c7e7e49bbd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 19 Apr 2023 16:23:12 -0700 Subject: [PATCH 021/106] bugfix --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 303e5a46..1b3a4c1c 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -45,6 +45,10 @@ model: warp: _target_: torchvision.transforms.Compose transforms: + - _target_: torchvision.transforms.RandomErasing + p: 0.75 + scale: [0.2, 0.7] + ratio: [0.3, 3.3] - _target_: torchvision.transforms.RandomAffine degrees: 0 scale: [0.3, 0.5] @@ -57,8 +61,6 @@ model: saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 - drop_p: 0.75 - drop_range: [0.2, 0.7] freeze: "logits" From bffb0b69f8da10ae01200617fca70cb4be0aad7b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 20 Apr 2023 09:42:26 -0700 Subject: [PATCH 022/106] cleanup --- mart/configs/attack/optimizer/adam.yaml | 10 +++++++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 26 +++++++++++++------ 2 files changed, 28 insertions(+), 8 deletions(-) create mode 100644 mart/configs/attack/optimizer/adam.yaml diff --git a/mart/configs/attack/optimizer/adam.yaml b/mart/configs/attack/optimizer/adam.yaml new file mode 100644 index 00000000..816eadeb --- /dev/null +++ b/mart/configs/attack/optimizer/adam.yaml @@ -0,0 +1,10 @@ +_target_: mart.optim.OptimizerFactory +optimizer: + _target_: hydra.utils.get_method + path: torch.optim.Adam +lr: ??? +betas: [0.9, 0.999] +weight_decay: 0 +bias_decay: 0 +norm_decay: 0 +maximize: True diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 1b3a4c1c..4a758201 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -1,24 +1,29 @@ # @package _global_ defaults: - - COCO_YOLOv3 - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_overlay - /attack/perturber/projector@model.modules.perturber.projector: range - - /attack/optimizer@model.optimizer: sgd - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer + - override /optimization: super_convergence + - override /datamodule: coco_yolov3 + - override /model: yolov3 + - override /metric: average_precision - override /callbacks: [perturbation_visualizer, lr_monitor] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] +optimized_metric: "test_metrics/map" + trainer: - # 118287 training images, 1 epoch, batch_size=16: FLOOR(118287/16) = 7392 - max_steps: 73920 + # 118287 training images, batch_size=16, FLOOR(118287/16) = 7392 + max_steps: 73920 # 10 epochs # mAP can be slow to compute so limit number of images limit_val_batches: 100 limit_test_batches: 100 + precision: 32 callbacks: perturbation_visualizer: @@ -28,6 +33,9 @@ datamodule: num_workers: 32 ims_per_batch: 16 + train_dataset: + img_size: 416 + model: modules: perturber: @@ -50,11 +58,10 @@ model: scale: [0.2, 0.7] ratio: [0.3, 3.3] - _target_: torchvision.transforms.RandomAffine - degrees: 0 + degrees: [-5, 5] scale: [0.3, 0.5] - - _target_: torchvision.transforms.RandomPerspective - distortion_scale: 0.2 - p: 0.5 + shear: [-3, 3, -3, 3] + interpolation: 2 # BILINEAR clamp: [0, 1] brightness: [0.5, 1.5] contrast: [0.5, 1.5] @@ -67,6 +74,7 @@ model: optimizer: lr: 0.01 momentum: 0.9 + maximize: True gradient_modifier: p: inf @@ -92,6 +100,8 @@ model: - class_loss - total_variation + training_metrics: null + validation_sequence: seq005: perturber seq010: From 3a5977dd763a8f28db5c15193943258d1400bb59 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 20 Apr 2023 12:11:24 -0700 Subject: [PATCH 023/106] style --- mart/attack/perturber.py | 6 ++---- mart/models/yolov3.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 9ee82941..879635a1 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -110,12 +110,10 @@ def forward(self, **batch): # FIXME: This is a hack total_variation = torch.mean( - torch.sum( - torch.square(self.perturbation[:, 1:, :] - self.perturbation[:, :-1, :]) - ) + torch.sum(torch.square(self.perturbation[:, 1:, :] - self.perturbation[:, :-1, :])) + torch.sum( # noqa: W503 torch.square(self.perturbation[:, :, 1:] - self.perturbation[:, :, :-1]) ) ) - return {"input_adv": input_adv, "total_variation": 1e-5*total_variation} + return {"input_adv": input_adv, "total_variation": 1e-5 * total_variation} diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index e988574d..5df22fb6 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -24,7 +24,7 @@ def forward(self, logits, targets, target_lengths): # keep no objects no objects # FIXME: Parameterize this noobj_loss = -noobj_loss - total_loss = 0.2*noobj_loss + obj_loss + class_loss + 5*coord_loss + total_loss = 0.2 * noobj_loss + obj_loss + class_loss + 5 * coord_loss return { "total_loss": total_loss, From f46cc19ce01e86ebf7767344b7bfdbe5935cee51 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 20 Apr 2023 14:23:37 -0700 Subject: [PATCH 024/106] Add adversarial losses --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 8 ++++--- mart/configs/model/yolov3.yaml | 12 +++++++++++ mart/models/yolov3.py | 21 +++++++++++++++---- 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 4a758201..ed6bdd9c 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -19,7 +19,7 @@ optimized_metric: "test_metrics/map" trainer: # 118287 training images, batch_size=16, FLOOR(118287/16) = 7392 - max_steps: 73920 # 10 epochs + max_steps: 73920 # 10 epochs # mAP can be slow to compute so limit number of images limit_val_batches: 100 limit_test_batches: 100 @@ -74,7 +74,6 @@ model: optimizer: lr: 0.01 momentum: 0.9 - maximize: True gradient_modifier: p: inf @@ -85,7 +84,8 @@ model: logits: ["perturber.input_adv"] seq030: loss: - - losses.total_loss + - losses.hide_objects_loss + - losses.target_class_loss - perturber.total_variation seq040: output: @@ -98,6 +98,8 @@ model: - obj_loss - noobj_loss - class_loss + - hide_objects_loss + - target_class_loss - total_variation training_metrics: null diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 2e85211e..6c9198e2 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -51,6 +51,8 @@ training_sequence: obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss class_loss: losses.class_loss + hide_objects_loss: losses.hide_objects_loss + target_class_loss: losses.target_class_loss validation_sequence: seq010: @@ -82,6 +84,8 @@ validation_sequence: obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss class_loss: losses.class_loss + hide_objects_loss: losses.hide_objects_loss + target_class_loss: losses.target_class_loss test_sequence: seq010: @@ -113,6 +117,8 @@ test_sequence: obj_loss: losses.obj_loss noobj_loss: losses.noobj_loss class_loss: losses.class_loss + hide_objects_loss: losses.hide_objects_loss + target_class_loss: losses.target_class_loss training_step_log: - loss @@ -121,6 +127,8 @@ training_step_log: - obj_loss - noobj_loss - class_loss + - hide_objects_loss + - target_class_loss validation_step_log: - loss @@ -129,6 +137,8 @@ validation_step_log: - obj_loss - noobj_loss - class_loss + - hide_objects_loss + - target_class_loss test_step_log: - loss @@ -137,3 +147,5 @@ test_step_log: - obj_loss - noobj_loss - class_loss + - hide_objects_loss + - target_class_loss diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 5df22fb6..34f5b0c1 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -5,6 +5,7 @@ # import torch +import torch.nn.functional as F from yolov3.inference import post_process from yolov3.training import yolo_loss_fn from yolov3.utils import cxcywh_to_xywh @@ -21,10 +22,20 @@ def forward(self, logits, targets, target_lengths): losses = yolo_loss_fn(logits, targets, target_lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses - # keep no objects no objects - # FIXME: Parameterize this - noobj_loss = -noobj_loss - total_loss = 0.2 * noobj_loss + obj_loss + class_loss + 5 * coord_loss + pred_conf_logit = logits[..., 4] + class_logits = logits[..., 5:] + + # make objectness go to zero + tgt_zero = torch.zeros(pred_conf_logit.size(), device=pred_conf_logit.device) + hide_objects_loss = F.binary_cross_entropy_with_logits( + pred_conf_logit, tgt_zero, reduction="sum" + ) + + # make target logit go to zero + target_class_logit = class_logits[..., 0] # 0 == person + target_class_loss = ( + F.binary_cross_entropy_with_logits(target_class_logit, tgt_zero, reduction="sum") / 80 + ) return { "total_loss": total_loss, @@ -32,6 +43,8 @@ def forward(self, logits, targets, target_lengths): "obj_loss": obj_loss, "noobj_loss": noobj_loss, "class_loss": class_loss, + "hide_objects_loss": hide_objects_loss, + "target_class_loss": target_class_loss, } From 37261d0c6a724ce9d77929d3f512a012381d0f58 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 20 Apr 2023 16:30:10 -0700 Subject: [PATCH 025/106] Turn off BatchNorm buffer updating in freeze --- mart/models/modular.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mart/models/modular.py b/mart/models/modular.py index d53006aa..7db46ac1 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -79,10 +79,16 @@ def __init__( self.test_metrics = test_metrics if freeze: + # Turn of gradients for parameters for name, param in self.model.named_parameters(): if re.match(freeze, name): param.requires_grad_(False) + # Turn off BatchNorm updating + for name, module in self.model.named_modules(): + if re.match(freeze, name) and "Norm" in module.__class__.__name__: + module.track_running_stats = False + self.gradient_modifier = gradient_modifier def configure_optimizers(self): From 55d7016a321198c53ff32371853b3bdfd2998803 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 20 Apr 2023 18:15:17 -0700 Subject: [PATCH 026/106] Add weights to mart.nn.Sum This is a terrible implementation but MART does not enable us to dynamically pass values into forward functions. As such, we are forced to choose the weights a priori even though we can dynamically choose what we pass to the forward function. --- mart/attack/perturber.py | 2 +- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 13 +++++++++++++ mart/nn/nn.py | 14 +++++++++++--- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 879635a1..20072c64 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -116,4 +116,4 @@ def forward(self, **batch): ) ) - return {"input_adv": input_adv, "total_variation": 1e-5 * total_variation} + return {"input_adv": input_adv, "total_variation": total_variation} diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index ed6bdd9c..5f064208 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -69,6 +69,9 @@ model: hue: [-0.05, 0.05] pixel_scale: 1.0 + loss: + weights: [1, 1, 1e-5] + freeze: "logits" optimizer: @@ -108,8 +111,18 @@ model: seq005: perturber seq010: logits: ["perturber.input_adv"] + seq030: + loss: + - losses.hide_objects_loss + - losses.target_class_loss + - perturber.total_variation test_sequence: seq005: perturber seq010: logits: ["perturber.input_adv"] + seq030: + loss: + - losses.hide_objects_loss + - losses.target_class_loss + - perturber.total_variation diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 1fae2368..cd574b98 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -237,11 +237,19 @@ def __init__(self, *args, **kwargs): # FIXME: This must exist already?! class Sum(torch.nn.Module): - def __init__(self): + def __init__(self, weights=None): super().__init__() - def forward(self, *args): - return sum(args) + self.weights = weights + + def forward(self, *values): + weights = self.weights + + if weights is None: + weights = [1 for _ in values] + + assert len(weights) == len(values) + return sum(value * weight for value, weight in zip(values, weights)) def load_state_dict(model, weights_fpath=None): From 3027caae81be7b130862aa4416232b61a431265c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Sun, 23 Apr 2023 10:59:11 -0700 Subject: [PATCH 027/106] Add OverrideMode callback --- mart/callbacks/__init__.py | 1 + mart/callbacks/mode.py | 51 +++++++++++++++++++ mart/configs/callbacks/override_mode.yaml | 5 ++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 8 ++- 4 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 mart/callbacks/mode.py create mode 100644 mart/configs/callbacks/override_mode.yaml diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 8e117180..56bdc7af 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,5 +1,6 @@ from .eval_mode import * from .gradients import * +from .mode import * from .no_grad_mode import * from .progress_bar import * from .visualizer import * diff --git a/mart/callbacks/mode.py b/mart/callbacks/mode.py new file mode 100644 index 00000000..65544de4 --- /dev/null +++ b/mart/callbacks/mode.py @@ -0,0 +1,51 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +from pytorch_lightning.callbacks import Callback + +__all__ = ["OverrideMode"] + + +class OverrideMode(Callback): + def __init__( + self, + training_mode: str = "train", + validation_mode: str = "eval", + test_mode: str = "eval", + ): + self.training_mode = training_mode == "train" + self.validation_mode = validation_mode == "train" + self.test_mode = test_mode == "train" + + self.mode = None + + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, unused=0): + self.mode = pl_module.training + pl_module.train(self.training_mode) + + def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0): + pl_module.train(self.mode) + self.mode = None + + def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + self.mode = pl_module.training + pl_module.train(self.validation_mode) + + def on_validation_batch_end( + self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx + ): + pl_module.train(self.mode) + self.mode = None + + def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + self.mode = pl_module.training + pl_module.train(self.test_mode) + + def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): + pl_module.train(self.mode) + self.mode = None diff --git a/mart/configs/callbacks/override_mode.yaml b/mart/configs/callbacks/override_mode.yaml new file mode 100644 index 00000000..d69d7050 --- /dev/null +++ b/mart/configs/callbacks/override_mode.yaml @@ -0,0 +1,5 @@ +override_mode: + _target_: mart.callbacks.OverrideMode + training_mode: ??? + validation_mode: ??? + test_mode: ??? diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 5f064208..1385cba4 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -10,7 +10,7 @@ defaults: - override /datamodule: coco_yolov3 - override /model: yolov3 - override /metric: average_precision - - override /callbacks: [perturbation_visualizer, lr_monitor] + - override /callbacks: [perturbation_visualizer, lr_monitor, override_mode] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] @@ -29,6 +29,12 @@ callbacks: perturbation_visualizer: frequency: 500 + override_mode: + # YOLOv3 uses training/eval modes to switch functionality. We disable this and just always use training mode. + training_mode: "train" + validation_mode: "train" + test_mode: "train" + datamodule: num_workers: 32 ims_per_batch: 16 From 7d51487315ba624f0635791efd8d11d802efa64a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Sun, 23 Apr 2023 10:59:52 -0700 Subject: [PATCH 028/106] Add target-specific losses --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 15 +++++++------ mart/configs/model/yolov3.yaml | 18 ++++++++++++++++ mart/models/yolov3.py | 21 +++++++++++++++---- 3 files changed, 44 insertions(+), 10 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 1385cba4..d73a2a49 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -93,8 +93,8 @@ model: logits: ["perturber.input_adv"] seq030: loss: - - losses.hide_objects_loss - - losses.target_class_loss + - losses.hide_target_objects_loss + - losses.correct_target_class_loss - perturber.total_variation seq040: output: @@ -109,6 +109,9 @@ model: - class_loss - hide_objects_loss - target_class_loss + - hide_target_objects_loss + - correct_target_class_loss + - target_count - total_variation training_metrics: null @@ -119,8 +122,8 @@ model: logits: ["perturber.input_adv"] seq030: loss: - - losses.hide_objects_loss - - losses.target_class_loss + - losses.hide_target_objects_loss + - losses.correct_target_class_loss - perturber.total_variation test_sequence: @@ -129,6 +132,6 @@ model: logits: ["perturber.input_adv"] seq030: loss: - - losses.hide_objects_loss - - losses.target_class_loss + - losses.hide_target_objects_loss + - losses.correct_target_class_loss - perturber.total_variation diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 6c9198e2..f02ed59e 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -53,6 +53,9 @@ training_sequence: class_loss: losses.class_loss hide_objects_loss: losses.hide_objects_loss target_class_loss: losses.target_class_loss + hide_target_objects_loss: losses.hide_target_objects_loss + correct_target_class_loss: losses.correct_target_class_loss + target_count: losses.target_count validation_sequence: seq010: @@ -86,6 +89,9 @@ validation_sequence: class_loss: losses.class_loss hide_objects_loss: losses.hide_objects_loss target_class_loss: losses.target_class_loss + hide_target_objects_loss: losses.hide_target_objects_loss + correct_target_class_loss: losses.correct_target_class_loss + target_count: losses.target_count test_sequence: seq010: @@ -119,6 +125,9 @@ test_sequence: class_loss: losses.class_loss hide_objects_loss: losses.hide_objects_loss target_class_loss: losses.target_class_loss + hide_target_objects_loss: losses.hide_target_objects_loss + correct_target_class_loss: losses.correct_target_class_loss + target_count: losses.target_count training_step_log: - loss @@ -129,6 +138,9 @@ training_step_log: - class_loss - hide_objects_loss - target_class_loss + - hide_target_objects_loss + - correct_target_class_loss + - target_count validation_step_log: - loss @@ -139,6 +151,9 @@ validation_step_log: - class_loss - hide_objects_loss - target_class_loss + - hide_target_objects_loss + - correct_target_class_loss + - target_count test_step_log: - loss @@ -149,3 +164,6 @@ test_step_log: - class_loss - hide_objects_loss - target_class_loss + - hide_target_objects_loss + - correct_target_class_loss + - target_count diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 34f5b0c1..334bfd1d 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -23,19 +23,29 @@ def forward(self, logits, targets, target_lengths): total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses pred_conf_logit = logits[..., 4] + pred_conf_score = torch.sigmoid(pred_conf_logit) class_logits = logits[..., 5:] + target_mask = (torch.argmax(class_logits, dim=-1) == 0) & (pred_conf_score > 0.1) # make objectness go to zero tgt_zero = torch.zeros(pred_conf_logit.size(), device=pred_conf_logit.device) - hide_objects_loss = F.binary_cross_entropy_with_logits( - pred_conf_logit, tgt_zero, reduction="sum" + hide_objects_losses = F.binary_cross_entropy_with_logits( + pred_conf_logit, tgt_zero, reduction="none" ) + hide_objects_loss = hide_objects_losses.sum() + + # make target objectness go to zero + hide_target_objects_loss = hide_objects_losses[target_mask].sum() # make target logit go to zero target_class_logit = class_logits[..., 0] # 0 == person - target_class_loss = ( - F.binary_cross_entropy_with_logits(target_class_logit, tgt_zero, reduction="sum") / 80 + target_class_losses = F.binary_cross_entropy_with_logits( + target_class_logit, tgt_zero, reduction="none" ) + target_class_loss = target_class_losses.sum() + + # make correctly predicted target class logit go to zero + correct_target_class_loss = target_class_losses[target_mask].sum() return { "total_loss": total_loss, @@ -44,7 +54,10 @@ def forward(self, logits, targets, target_lengths): "noobj_loss": noobj_loss, "class_loss": class_loss, "hide_objects_loss": hide_objects_loss, + "hide_target_objects_loss": hide_target_objects_loss, "target_class_loss": target_class_loss, + "correct_target_class_loss": correct_target_class_loss, + "target_count": target_mask.sum(), } From 21aa038ae7839612c5127152d9268e1a5e6b50e9 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Sun, 23 Apr 2023 11:00:01 -0700 Subject: [PATCH 029/106] Turn off gradient modifier --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index d73a2a49..6e7c63b1 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -84,8 +84,7 @@ model: lr: 0.01 momentum: 0.9 - gradient_modifier: - p: inf + gradient_modifier: null training_sequence: seq005: perturber From f0ac5835eea36b165c25cd40bae4c14b03dbb159 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Sun, 23 Apr 2023 17:28:36 -0700 Subject: [PATCH 030/106] Use default transform --- mart/configs/datamodule/coco_yolov3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index e7acef38..37b2f667 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -8,7 +8,7 @@ train_dataset: img_root: ${paths.data_dir}/coco/train2017 ann_file_name: ${paths.data_dir}/coco/annotations/instances_train2017.json img_size: ??? - transform: random + transform: default val_dataset: _target_: yolov3.datasets.coco.CocoDetectionBoundingBox From e5dc9bbad04e0ba7208e9ea4b5b877a4adcf8e53 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:03:52 -0700 Subject: [PATCH 031/106] Use CocoDetection datamodule --- mart/configs/datamodule/coco_yolov3.yaml | 66 ++++- mart/configs/experiment/COCO_YOLOv3.yaml | 3 - .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 3 - mart/datamodules/coco.py | 21 +- mart/transforms/extended.py | 254 +++++++++++++++++- 5 files changed, 321 insertions(+), 26 deletions(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index 37b2f667..411ac440 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -1,24 +1,62 @@ defaults: - - default.yaml + - default -num_workers: 2 +num_workers: 1 train_dataset: - _target_: yolov3.datasets.coco.CocoDetectionBoundingBox - img_root: ${paths.data_dir}/coco/train2017 - ann_file_name: ${paths.data_dir}/coco/annotations/instances_train2017.json - img_size: ??? - transform: default + _target_: mart.datamodules.coco.CocoDetection + root: ${paths.data_dir}/coco/train2017 + annFile: ${paths.data_dir}/coco/annotations/instances_train2017.json + transforms: + _target_: mart.transforms.Compose + transforms: + - _target_: torchvision.transforms.ToTensor + - _target_: mart.transforms.ConvertCocoPolysToMask + - _target_: mart.transforms.PadToSquare + fill: 0.5 + - _target_: mart.transforms.Resize + size: [416, 416] + - _target_: mart.transforms.ConvertBoxesToCXCYHW + - _target_: mart.transforms.RemapLabels + - _target_: mart.transforms.PackTarget + num_classes: 80 val_dataset: - _target_: yolov3.datasets.coco.CocoDetectionBoundingBox - img_root: ${paths.data_dir}/coco/val2017 - ann_file_name: ${paths.data_dir}/coco/annotations/instances_val2017.json - img_size: ${..train_dataset.img_size} - transform: default + _target_: mart.datamodules.coco.CocoDetection + root: ${paths.data_dir}/coco/val2017 + annFile: ${paths.data_dir}/coco/annotations/instances_val2017.json + transforms: + _target_: mart.transforms.Compose + transforms: + - _target_: torchvision.transforms.ToTensor + - _target_: mart.transforms.ConvertCocoPolysToMask + - _target_: mart.transforms.PadToSquare + fill: 0.5 + - _target_: mart.transforms.Resize + size: [416, 416] + - _target_: mart.transforms.ConvertBoxesToCXCYHW + - _target_: mart.transforms.RemapLabels + - _target_: mart.transforms.PackTarget + num_classes: 80 -test_dataset: ${.val_dataset} +test_dataset: + _target_: mart.datamodules.coco.CocoDetection + root: ${paths.data_dir}/coco/val2017 + annFile: ${paths.data_dir}/coco/annotations/instances_val2017.json + transforms: + _target_: mart.transforms.Compose + transforms: + - _target_: torchvision.transforms.ToTensor + - _target_: mart.transforms.ConvertCocoPolysToMask + - _target_: mart.transforms.PadToSquare + fill: 0.5 + - _target_: mart.transforms.Resize + size: [416, 416] + - _target_: mart.transforms.ConvertBoxesToCXCYHW + - _target_: mart.transforms.RemapLabels + - _target_: mart.transforms.PackTarget + num_classes: 80 collate_fn: _target_: hydra.utils.get_method - path: mart.datamodules.coco.collate_img_label_fn + path: mart.datamodules.coco.yolo_collate_fn diff --git a/mart/configs/experiment/COCO_YOLOv3.yaml b/mart/configs/experiment/COCO_YOLOv3.yaml index bcc04185..652396ce 100644 --- a/mart/configs/experiment/COCO_YOLOv3.yaml +++ b/mart/configs/experiment/COCO_YOLOv3.yaml @@ -21,9 +21,6 @@ datamodule: num_workers: 32 ims_per_batch: 16 - train_dataset: - img_size: 416 - model: # yolov3 model does not produce preds/targets in training sequence training_metrics: null diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 6e7c63b1..92a6090d 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -39,9 +39,6 @@ datamodule: num_workers: 32 ims_per_batch: 16 - train_dataset: - img_size: 416 - model: modules: perturber: diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index dc250112..919c706e 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -8,6 +8,8 @@ from typing import Any, Callable, List, Optional import numpy as np +import torch +from torch.utils.data import default_collate from torchvision.datasets.coco import CocoDetection as CocoDetection_ from torchvision.datasets.folder import default_loader from yolov3.datasets.utils import collate_img_label_fn as collate_img_label_fn_ @@ -92,8 +94,19 @@ def collate_fn(batch): return tuple(zip(*batch)) -def collate_img_label_fn(batch): - image, target, lengths = collate_img_label_fn_(batch) +def yolo_collate_fn(batch): + images, targets = tuple(zip(*batch)) - # Collate into tuple of (input, target) where target is a dict. - return image, {"target": target, "lengths": lengths} + images = default_collate(images) + + # Turn tuple of dicts into dict of tuples + keys = targets[0].keys() + target = {k: tuple(t[k] for t in targets) for k in keys} + + # Pad packed using torch.nested + packed = torch.nested.nested_tensor(list(target["packed"])) + packed = torch.nested.to_padded_tensor(packed, 0.0) + + lengths = default_collate(target["packed_length"]) + + return images, {"target": packed, "lengths": lengths} diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 13cd0e74..b4aa5f00 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import logging import os from typing import Dict, Optional, Tuple @@ -29,6 +31,11 @@ "ConvertInstanceSegmentationToPerturbable", "RandomHorizontalFlip", "ConvertCocoPolysToMask", + "PadToSquare", + "Resize", + "ConvertBoxesToCXCYHW", + "RemapLabels", + "PackTarget", ] @@ -173,8 +180,8 @@ def flip_perturbable_mask(image, target): return image, target def forward( - self, image: Tensor, target: Optional[Dict[str, Tensor]] = None - ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + self, image: Tensor, target: dict[str, Tensor] | None = None + ) -> tuple[Tensor, dict[str, Tensor] | None]: if torch.rand(1) < self.p: image = F.hflip(image) if target is not None: @@ -190,3 +197,246 @@ def forward( class ConvertCocoPolysToMask(ConvertCocoPolysToMask_, ExTransform): pass + + +class PadToSquare(ExTransform): + def __init__(self, fill): + self.fill = fill + + def __call__( + self, + image: Tensor, # CHW + target: dict[str, Tensor] | None = None, + ): + w, h = F.get_image_size(image) + + l_or_t = abs(h - w) // 2 + r_or_b = abs(h - w) - l_or_t + + # padding is (left, top, right, bottom) + if h > w: + padding = (l_or_t, 0, r_or_b, 0) + else: + padding = (0, l_or_t, 0, r_or_b) + + image = F.pad(image, padding, fill=self.fill) + + if target is not None: + if "boxes" in target: + target["boxes"] = self.pad_boxes(target["boxes"], padding) + if "masks" in target: + target["masks"] = self.pad_masks(target["masks"], padding) + if "keypoints" in target: + target["keypoints"] = self.pad_keypoints(target["keypoints"], padding) + + return image, target + + def pad_boxes(self, boxes, padding): + boxes[:, 0] += padding[0] # X + left + boxes[:, 1] += padding[1] # Y + top + boxes[:, 2] += padding[0] # X + left + boxes[:, 3] += padding[1] # Y + top + + return boxes + + def pad_masks(self, masks, padding): + return F.pad(masks, padding, fill=0) + + def pad_keypoints(self, keypoints, padding): + raise NotImplementedError + + +class Resize(ExTransform): + def __init__(self, size): + self.size = size + + def __call__( + self, + image: Tensor, + target: dict[str, Tensor] | None = None, + ): + orig_w, orig_h = F.get_image_size(image) + image = F.resize(image, size=self.size) + new_w, new_h = F.get_image_size(image) + + dw, dh = new_w / orig_w, new_h / orig_h + + if target is not None: + if "boxes" in target: + target["boxes"] = self.resize_boxes(target["boxes"], (dw, dh)) + if "masks" in target: + target["masks"] = self.resize_masks(target["masks"], (dw, dh)) + if "keypoints" in target: + target["keypoints"] = self.resize_keypoints(target["keypoints"], (dw, dh)) + + return image, target + + def resize_boxes(self, boxes, ratio): + boxes[:, 0] *= ratio[0] # X1 * width ratio + boxes[:, 1] *= ratio[1] # Y1 * height ratio + boxes[:, 2] *= ratio[0] # X2 * width ratio + boxes[:, 3] *= ratio[1] # Y2 * height ratio + + return boxes + + def resize_masks(self, masks, ratio): + # Resize fails on empty tensors + if masks.shape[0] == 0: + return masks + + return F.resize(masks, size=self.size, interpolation=F.InterpolationMode.NEAREST) + + def resize_keypoints(self, keypoints, ratio): + raise NotImplementedError + + +class ConvertBoxesToCXCYHW(ExTransform): + def __call__( + self, + image: Tensor, + target: dict[str, Tensor], + ): + # X1Y1X2Y2 + boxes = target["boxes"] + + # X2Y2 -> HW + boxes[:, 2] -= boxes[:, 0] + boxes[:, 3] -= boxes[:, 1] + + # X1Y1 -> CXCY + boxes[:, 0] += boxes[:, 2] / 2 + boxes[:, 1] += boxes[:, 3] / 2 + + target["boxes"] = boxes + + return image, target + + +class RemapLabels(ExTransform): + COCO_MAP = { + 1: 0, + 2: 1, + 3: 2, + 4: 3, + 5: 4, + 6: 5, + 7: 6, + 8: 7, + 9: 8, + 10: 9, + 11: 10, + 13: 11, + 14: 12, + 15: 13, + 16: 14, + 17: 15, + 18: 16, + 19: 17, + 20: 18, + 21: 19, + 22: 20, + 23: 21, + 24: 22, + 25: 23, + 27: 24, + 28: 25, + 31: 26, + 32: 27, + 33: 28, + 34: 29, + 35: 30, + 36: 31, + 37: 32, + 38: 33, + 39: 34, + 40: 35, + 41: 36, + 42: 37, + 43: 38, + 44: 39, + 46: 40, + 47: 41, + 48: 42, + 49: 43, + 50: 44, + 51: 45, + 52: 46, + 53: 47, + 54: 48, + 55: 49, + 56: 50, + 57: 51, + 58: 52, + 59: 53, + 60: 54, + 61: 55, + 62: 56, + 63: 57, + 64: 58, + 65: 59, + 67: 60, + 70: 61, + 72: 62, + 73: 63, + 74: 64, + 75: 65, + 76: 66, + 77: 67, + 78: 68, + 79: 69, + 80: 70, + 81: 71, + 82: 72, + 84: 73, + 85: 74, + 86: 75, + 87: 76, + 88: 77, + 89: 78, + 90: 79, + } + + def __init__( + self, + label_map: dict[int, int] | None = None, + ): + if label_map is None: + label_map = self.COCO_MAP + + self.label_map = label_map + + def __call__( + self, + image: Tensor, + target: dict[str, Tensor], + ): + labels = target["labels"] + + # This is a terrible implementation + for i, label in enumerate(labels): + labels[i] = self.label_map[label.item()] + + target["labels"] = labels + + return image, target + + +class PackTarget(ExTransform): + def __init__(self, num_classes: int): + self.num_classes = num_classes + + def __call__( + self, + image: Tensor, + target: dict[str, Tensor], + ): + boxes = target["boxes"] + labels = target["labels"] + scores = torch.ones_like(labels)[..., None] + + labels = torch.nn.functional.one_hot(labels, num_classes=self.num_classes) + + target["packed"] = torch.cat([boxes, scores, labels], dim=-1) + target["packed_length"] = target["packed"].shape[0] + + return image, target From d214e179cf6af1c93d6d3d99c671a45ae4069281 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:06:52 -0700 Subject: [PATCH 032/106] Replace LitModular weights_fpath's with load_state_dict --- mart/configs/experiment/COCO_YOLOv3.yaml | 3 ++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 3 ++ mart/configs/model/yolov3.yaml | 6 +-- mart/models/modular.py | 38 ++++++++++--------- 4 files changed, 28 insertions(+), 22 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3.yaml b/mart/configs/experiment/COCO_YOLOv3.yaml index 652396ce..05c6016b 100644 --- a/mart/configs/experiment/COCO_YOLOv3.yaml +++ b/mart/configs/experiment/COCO_YOLOv3.yaml @@ -22,6 +22,9 @@ datamodule: ims_per_batch: 16 model: + load_state_dict: + logits: ${paths.data_dir}/yolov3_original.pt + # yolov3 model does not produce preds/targets in training sequence training_metrics: null diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 92a6090d..2e9f9a44 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -77,6 +77,9 @@ model: freeze: "logits" + load_state_dict: + logits: ${paths.data_dir}/yolov3_original.pt + optimizer: lr: 0.01 momentum: 0.9 diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index f02ed59e..337810be 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -3,11 +3,7 @@ defaults: modules: logits: - _target_: yolov3.main.load_yolov3_model - device: cpu - ckpt: false - mode: eval - weight_path: ${paths.data_dir}/yolov3_original.pt + _target_: yolov3.model.YoloNetV3 losses: _target_: mart.models.yolov3.Loss diff --git a/mart/models/modular.py b/mart/models/modular.py index 7db46ac1..8054e75d 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -32,10 +32,9 @@ def __init__( test_sequence=None, test_step_log=None, test_metrics=None, - weights_fpath=None, - strict=True, - freeze=None, gradient_modifier=None, + freeze=None, + load_state_dict=None, ): super().__init__() @@ -63,9 +62,6 @@ def __init__( } self.model = SequentialDict(modules, sequences) - if weights_fpath is not None: - self.model.load_state_dict(torch.load(weights_fpath), strict=strict) - self.optimizer = optimizer self.lr_scheduler = lr_scheduler @@ -78,19 +74,27 @@ def __init__( self.test_step_log = test_step_log or [] self.test_metrics = test_metrics - if freeze: - # Turn of gradients for parameters - for name, param in self.model.named_parameters(): - if re.match(freeze, name): - param.requires_grad_(False) - - # Turn off BatchNorm updating - for name, module in self.model.named_modules(): - if re.match(freeze, name) and "Norm" in module.__class__.__name__: - module.track_running_stats = False - self.gradient_modifier = gradient_modifier + # Turn of gradients for parameters + for name, param in self.model.named_parameters(): + if re.match(freeze, name): + logger.info(f"Setting requires_grad to False for {name}.") + param.requires_grad_(False) + + # Turn off BatchNorm updating + for name, module in self.model.named_modules(): + if re.match(freeze, name) and "Norm" in module.__class__.__name__: + logger.info(f"Setting track_running_stats to False for {name}.") + module.track_running_stats = False + + # Load state dict for given modules + load_state_dict = load_state_dict or {} + for name, path in load_state_dict.items(): + module = getattr(self.model, name) + logger.info(f"Loading state_dict {path} for {module.__class__.__name__}...") + module.load_state_dict(torch.load(path)) + def configure_optimizers(self): config = {} config["optimizer"] = self.optimizer(self.model) From 9b98b11ea1bc8e0b99d712448d9520684f61f16b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:08:33 -0700 Subject: [PATCH 033/106] Cleanup sequences --- mart/configs/model/yolov3.yaml | 34 +++++++--------------------------- mart/models/yolov3.py | 17 ++++++++++++----- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 337810be..8234e92c 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -29,11 +29,7 @@ training_sequence: seq010: logits: ["input"] - seq020: - losses: - logits: logits - targets: target.target - target_lengths: target.lengths + seq020: losses seq030: loss: @@ -57,26 +53,18 @@ validation_sequence: seq010: logits: ["input"] - seq020: - losses: - logits: logits - targets: target.target - target_lengths: target.lengths + seq020: losses seq030: loss: - losses.total_loss - seq040: - detections: - logits: logits - targets: target.target - target_lengths: target.lengths + seq040: detections seq050: output: preds: detections.preds - target: detections.target + target: detections.targets loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss @@ -93,26 +81,18 @@ test_sequence: seq010: logits: ["input"] - seq020: - losses: - logits: logits - targets: target.target - target_lengths: target.lengths + seq020: losses seq030: loss: - losses.total_loss - seq040: - detections: - logits: logits - targets: target.target - target_lengths: target.lengths + seq040: detections seq050: output: preds: detections.preds - target: detections.target + target: detections.targets loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 334bfd1d..4e069cb8 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -18,8 +18,11 @@ def __init__(self, image_size, average=True): self.image_size = image_size self.average = average - def forward(self, logits, targets, target_lengths): - losses = yolo_loss_fn(logits, targets, target_lengths, self.image_size, self.average) + def forward(self, logits, target, **kwargs): + targets = target["target"] + lengths = target["lengths"] + + losses = yolo_loss_fn(logits, targets, lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses pred_conf_logit = logits[..., 4] @@ -93,13 +96,17 @@ def tensor_to_dict(detection): return {"boxes": boxes, "labels": labels, "scores": scores} @torch.no_grad() - def forward(self, logits, targets, target_lengths): + def forward(self, logits, target, **kwargs): detections = post_process(logits, self.nms, self.conf_thres, self.nms_thres) + # FIXME: This should be another module # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format # torchmetrics wants. preds = [Detections.tensor_to_dict(det) for det in detections] - targets = [target[:length] for target, length in zip(targets, target_lengths)] + + targets = target["target"] + lengths = target["lengths"] + targets = [target[:length] for target, length in zip(targets, lengths)] targets = [Detections.tensor_to_dict(target) for target in targets] - return {"preds": preds, "target": targets} + return {"preds": preds, "targets": targets} From 597947f3667d4d2fc721a54060d72336044de140 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:47:56 -0700 Subject: [PATCH 034/106] Get rid of torch.nested --- mart/datamodules/coco.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 919c706e..ccbd799f 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -94,6 +94,20 @@ def collate_fn(batch): return tuple(zip(*batch)) +def to_padded_tensor(tensors, dim=0, fill=0.0): + sizes = np.array([list(t.shape) for t in tensors]) + max_dim_size = sizes[:, dim].max() + sizes[:, dim] = max_dim_size - sizes[:, dim] + + zeros = [ + torch.full(s.tolist(), fill, device=t.device, dtype=t.dtype) + for t, s in zip(tensors, sizes) + ] + tensors = [torch.cat((t, z), dim=dim) for t, z in zip(tensors, zeros)] + + return tensors + + def yolo_collate_fn(batch): images, targets = tuple(zip(*batch)) @@ -104,9 +118,10 @@ def yolo_collate_fn(batch): target = {k: tuple(t[k] for t in targets) for k in keys} # Pad packed using torch.nested - packed = torch.nested.nested_tensor(list(target["packed"])) - packed = torch.nested.to_padded_tensor(packed, 0.0) + packed = to_padded_tensor(target["packed"]) + packed_length = target["packed_length"] - lengths = default_collate(target["packed_length"]) + packed = default_collate(packed) + lengths = default_collate(packed_length) return images, {"target": packed, "lengths": lengths} From f9eb95d4b878590d5355543f25e4258c3b8bc0b8 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:54:07 -0700 Subject: [PATCH 035/106] Return mask of correct size even when empty --- mart/transforms/extended.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index b4aa5f00..3fc11dab 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -282,7 +282,7 @@ def resize_boxes(self, boxes, ratio): def resize_masks(self, masks, ratio): # Resize fails on empty tensors if masks.shape[0] == 0: - return masks + return torch.zeros((0, *self.size), dtype=masks.dtype, device=masks.device) return F.resize(masks, size=self.size, interpolation=F.InterpolationMode.NEAREST) From 6424ef92664019b3110c5273936f94e0a16ee9f3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 00:55:04 -0700 Subject: [PATCH 036/106] Cleanup --- mart/datamodules/coco.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index ccbd799f..41dc3a17 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -118,10 +118,11 @@ def yolo_collate_fn(batch): target = {k: tuple(t[k] for t in targets) for k in keys} # Pad packed using torch.nested - packed = to_padded_tensor(target["packed"]) - packed_length = target["packed_length"] - + packed = target["packed"] + packed = to_padded_tensor(packed) packed = default_collate(packed) - lengths = default_collate(packed_length) + + lengths = target["packed_length"] + lengths = default_collate(lengths) return images, {"target": packed, "lengths": lengths} From a30ada21b00f008174f36bef4d8ad57c957ff423 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 01:31:51 -0700 Subject: [PATCH 037/106] Use default COCO collate function --- mart/configs/datamodule/coco_yolov3.yaml | 2 +- mart/configs/model/yolov3.yaml | 16 +++++----- mart/datamodules/coco.py | 37 ++-------------------- mart/models/yolov3.py | 39 ++++++++++++++++++------ 4 files changed, 41 insertions(+), 53 deletions(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index 411ac440..b17cb804 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -59,4 +59,4 @@ test_dataset: collate_fn: _target_: hydra.utils.get_method - path: mart.datamodules.coco.yolo_collate_fn + path: mart.datamodules.coco.collate_fn diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 8234e92c..8646dc77 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -3,7 +3,7 @@ defaults: modules: logits: - _target_: yolov3.model.YoloNetV3 + _target_: mart.models.yolov3.YoloNetV3 losses: _target_: mart.models.yolov3.Loss @@ -13,7 +13,7 @@ modules: loss: _target_: mart.nn.Sum - detections: + preds: _target_: mart.models.yolov3.Detections nms: true conf_thres: 0.1 @@ -59,12 +59,12 @@ validation_sequence: loss: - losses.total_loss - seq040: detections + seq040: preds seq050: output: - preds: detections.preds - target: detections.targets + preds: preds + target: target loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss @@ -87,12 +87,12 @@ test_sequence: loss: - losses.total_loss - seq040: detections + seq040: preds seq050: output: - preds: detections.preds - target: detections.targets + preds: preds + target: target loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 41dc3a17..6a51adeb 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -47,6 +47,9 @@ def __init__( self.modalities = modalities + # We load a lot of stuff from COCO so use file system to communicate + torch.multiprocessing.set_sharing_strategy("file_system") + def _load_image(self, id: int) -> Any: if self.modalities is None: return super()._load_image(id) @@ -92,37 +95,3 @@ def __getitem__(self, index: int): # Source: https://github.com/pytorch/vision/blob/dc07ac2add8285e16a716564867d0b4b953f6735/references/detection/utils.py#L203 def collate_fn(batch): return tuple(zip(*batch)) - - -def to_padded_tensor(tensors, dim=0, fill=0.0): - sizes = np.array([list(t.shape) for t in tensors]) - max_dim_size = sizes[:, dim].max() - sizes[:, dim] = max_dim_size - sizes[:, dim] - - zeros = [ - torch.full(s.tolist(), fill, device=t.device, dtype=t.dtype) - for t, s in zip(tensors, sizes) - ] - tensors = [torch.cat((t, z), dim=dim) for t, z in zip(tensors, zeros)] - - return tensors - - -def yolo_collate_fn(batch): - images, targets = tuple(zip(*batch)) - - images = default_collate(images) - - # Turn tuple of dicts into dict of tuples - keys = targets[0].keys() - target = {k: tuple(t[k] for t in targets) for k in keys} - - # Pad packed using torch.nested - packed = target["packed"] - packed = to_padded_tensor(packed) - packed = default_collate(packed) - - lengths = target["packed_length"] - lengths = default_collate(lengths) - - return images, {"target": packed, "lengths": lengths} diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 4e069cb8..999f0613 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -4,13 +4,38 @@ # SPDX-License-Identifier: BSD-3-Clause # +import numpy as np import torch import torch.nn.functional as F from yolov3.inference import post_process +from yolov3.model import YoloNetV3 as YoloNetV3_ from yolov3.training import yolo_loss_fn from yolov3.utils import cxcywh_to_xywh +class YoloNetV3(YoloNetV3_): + def forward(self, x, *args, **kwargs): + x = torch.stack(x).contiguous() + + return super().forward(x, *args, **kwargs) + + +def to_padded_tensor(tensors, dim=0, fill=0.0): + sizes = np.array([list(t.shape) for t in tensors]) + max_dim_size = sizes[:, dim].max() + sizes[:, dim] = max_dim_size - sizes[:, dim] + + zeros = [ + torch.full(s.tolist(), fill, device=t.device, dtype=t.dtype) + for t, s in zip(tensors, sizes) + ] + tensors = [torch.cat((t, z), dim=dim) for t, z in zip(tensors, zeros)] + + tensor = torch.stack(tensors).contiguous() + + return tensor + + class Loss(torch.nn.Module): def __init__(self, image_size, average=True): super().__init__() @@ -19,8 +44,9 @@ def __init__(self, image_size, average=True): self.average = average def forward(self, logits, target, **kwargs): - targets = target["target"] - lengths = target["lengths"] + # Convert target to acceptable format for yolo_loss_fn + targets = to_padded_tensor([t["packed"] for t in target]) + lengths = [t["packed_length"] for t in target] losses = yolo_loss_fn(logits, targets, lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses @@ -99,14 +125,7 @@ def tensor_to_dict(detection): def forward(self, logits, target, **kwargs): detections = post_process(logits, self.nms, self.conf_thres, self.nms_thres) - # FIXME: This should be another module # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format # torchmetrics wants. - preds = [Detections.tensor_to_dict(det) for det in detections] - - targets = target["target"] - lengths = target["lengths"] - targets = [target[:length] for target, length in zip(targets, lengths)] - targets = [Detections.tensor_to_dict(target) for target in targets] + return [Detections.tensor_to_dict(det) for det in detections] - return {"preds": preds, "targets": targets} From 91cbd16614f3b613b082057105ac0310bebc0f08 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 01:54:18 -0700 Subject: [PATCH 038/106] Add Underlay --- mart/attack/composer.py | 31 +++++++++++++++++-- .../composer/color_jitter_warp_underlay.yaml | 4 +++ .../attack/perturber/composer/underlay.yaml | 1 + .../perturber/composer/warp_underlay.yaml | 2 ++ .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 +- mart/models/yolov3.py | 1 - 6 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml create mode 100644 mart/configs/attack/perturber/composer/underlay.yaml create mode 100644 mart/configs/attack/perturber/composer/warp_underlay.yaml diff --git a/mart/attack/composer.py b/mart/attack/composer.py index a421904d..d851d33b 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -94,6 +94,33 @@ def compose(self, perturbation, *, input, target): return input * (1 - mask) + perturbation * mask +class Underlay(Composer): + """We assume an adversary underlays a patch to the input.""" + + def __init__(self, premultiplied_alpha=False): + super().__init__() + + self.premultiplied_alpha = premultiplied_alpha + + def compose(self, perturbation, *, input, target): + # True is mutable, False is immutable. + mask = target["perturbable_mask"] + + object_mask = target["masks"].any(dim=0, keepdim=True) + + # Convert mask to a Tensor with same torch.dtype and torch.device as input, + # because some data modules (e.g. Armory) gives binary mask. + mask = mask.to(input) + + # If mask overlaps, object, then null out that part of mask + mask = mask * (1 - object_mask) + + if self.premultiplied_alpha: + return input * (1 - mask) + perturbation * (1 - object_mask) + else: + return input * (1 - mask) + perturbation * mask + + class MaskAdditive(Composer): """We assume an adversary adds masked perturbation to the input.""" @@ -105,7 +132,7 @@ def compose(self, perturbation, *, input, target): # FIXME: It would be really nice if we could compose composers just like we can compose everything else... -class WarpOverlay(Overlay): +class WarpUnderlay(Underlay): def __init__( self, warp, @@ -141,7 +168,7 @@ def compose(self, perturbation, *, input, target): # FIXME: It would be really nice if we could compose composers just like we can compose everything else... -class ColorJitterWarpOverlay(WarpOverlay): +class ColorJitterWarpUnderlay(WarpUnderlay): def __init__( self, *args, diff --git a/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml b/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml new file mode 100644 index 00000000..cb8bec0c --- /dev/null +++ b/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml @@ -0,0 +1,4 @@ +defaults: + - warp_underlay + +_target_: mart.attack.composer.ColorJitterWarpUnderlay diff --git a/mart/configs/attack/perturber/composer/underlay.yaml b/mart/configs/attack/perturber/composer/underlay.yaml new file mode 100644 index 00000000..670cac0e --- /dev/null +++ b/mart/configs/attack/perturber/composer/underlay.yaml @@ -0,0 +1 @@ +_target_: mart.attack.composer.Underlay diff --git a/mart/configs/attack/perturber/composer/warp_underlay.yaml b/mart/configs/attack/perturber/composer/warp_underlay.yaml new file mode 100644 index 00000000..791e358b --- /dev/null +++ b/mart/configs/attack/perturber/composer/warp_underlay.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.composer.WarpUnderlay +warp: ??? diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 2e9f9a44..fbfcd301 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -3,7 +3,7 @@ defaults: - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_overlay + - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_underlay - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - override /optimization: super_convergence diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 999f0613..40ce6a69 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -128,4 +128,3 @@ def forward(self, logits, target, **kwargs): # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format # torchmetrics wants. return [Detections.tensor_to_dict(det) for det in detections] - From 2d506944606a835ae85c866268741d04b39aadf6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 07:26:17 -0700 Subject: [PATCH 039/106] Revert "Use default COCO collate function" This reverts commit a30ada21b00f008174f36bef4d8ad57c957ff423. --- mart/configs/datamodule/coco_yolov3.yaml | 2 +- mart/configs/model/yolov3.yaml | 16 +++++----- mart/datamodules/coco.py | 37 ++++++++++++++++++++-- mart/models/yolov3.py | 40 +++++++----------------- 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index b17cb804..411ac440 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -59,4 +59,4 @@ test_dataset: collate_fn: _target_: hydra.utils.get_method - path: mart.datamodules.coco.collate_fn + path: mart.datamodules.coco.yolo_collate_fn diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 8646dc77..8234e92c 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -3,7 +3,7 @@ defaults: modules: logits: - _target_: mart.models.yolov3.YoloNetV3 + _target_: yolov3.model.YoloNetV3 losses: _target_: mart.models.yolov3.Loss @@ -13,7 +13,7 @@ modules: loss: _target_: mart.nn.Sum - preds: + detections: _target_: mart.models.yolov3.Detections nms: true conf_thres: 0.1 @@ -59,12 +59,12 @@ validation_sequence: loss: - losses.total_loss - seq040: preds + seq040: detections seq050: output: - preds: preds - target: target + preds: detections.preds + target: detections.targets loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss @@ -87,12 +87,12 @@ test_sequence: loss: - losses.total_loss - seq040: preds + seq040: detections seq050: output: - preds: preds - target: target + preds: detections.preds + target: detections.targets loss: loss total_loss: losses.total_loss coord_loss: losses.coord_loss diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 6a51adeb..41dc3a17 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -47,9 +47,6 @@ def __init__( self.modalities = modalities - # We load a lot of stuff from COCO so use file system to communicate - torch.multiprocessing.set_sharing_strategy("file_system") - def _load_image(self, id: int) -> Any: if self.modalities is None: return super()._load_image(id) @@ -95,3 +92,37 @@ def __getitem__(self, index: int): # Source: https://github.com/pytorch/vision/blob/dc07ac2add8285e16a716564867d0b4b953f6735/references/detection/utils.py#L203 def collate_fn(batch): return tuple(zip(*batch)) + + +def to_padded_tensor(tensors, dim=0, fill=0.0): + sizes = np.array([list(t.shape) for t in tensors]) + max_dim_size = sizes[:, dim].max() + sizes[:, dim] = max_dim_size - sizes[:, dim] + + zeros = [ + torch.full(s.tolist(), fill, device=t.device, dtype=t.dtype) + for t, s in zip(tensors, sizes) + ] + tensors = [torch.cat((t, z), dim=dim) for t, z in zip(tensors, zeros)] + + return tensors + + +def yolo_collate_fn(batch): + images, targets = tuple(zip(*batch)) + + images = default_collate(images) + + # Turn tuple of dicts into dict of tuples + keys = targets[0].keys() + target = {k: tuple(t[k] for t in targets) for k in keys} + + # Pad packed using torch.nested + packed = target["packed"] + packed = to_padded_tensor(packed) + packed = default_collate(packed) + + lengths = target["packed_length"] + lengths = default_collate(lengths) + + return images, {"target": packed, "lengths": lengths} diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 40ce6a69..4e069cb8 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -4,38 +4,13 @@ # SPDX-License-Identifier: BSD-3-Clause # -import numpy as np import torch import torch.nn.functional as F from yolov3.inference import post_process -from yolov3.model import YoloNetV3 as YoloNetV3_ from yolov3.training import yolo_loss_fn from yolov3.utils import cxcywh_to_xywh -class YoloNetV3(YoloNetV3_): - def forward(self, x, *args, **kwargs): - x = torch.stack(x).contiguous() - - return super().forward(x, *args, **kwargs) - - -def to_padded_tensor(tensors, dim=0, fill=0.0): - sizes = np.array([list(t.shape) for t in tensors]) - max_dim_size = sizes[:, dim].max() - sizes[:, dim] = max_dim_size - sizes[:, dim] - - zeros = [ - torch.full(s.tolist(), fill, device=t.device, dtype=t.dtype) - for t, s in zip(tensors, sizes) - ] - tensors = [torch.cat((t, z), dim=dim) for t, z in zip(tensors, zeros)] - - tensor = torch.stack(tensors).contiguous() - - return tensor - - class Loss(torch.nn.Module): def __init__(self, image_size, average=True): super().__init__() @@ -44,9 +19,8 @@ def __init__(self, image_size, average=True): self.average = average def forward(self, logits, target, **kwargs): - # Convert target to acceptable format for yolo_loss_fn - targets = to_padded_tensor([t["packed"] for t in target]) - lengths = [t["packed_length"] for t in target] + targets = target["target"] + lengths = target["lengths"] losses = yolo_loss_fn(logits, targets, lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses @@ -125,6 +99,14 @@ def tensor_to_dict(detection): def forward(self, logits, target, **kwargs): detections = post_process(logits, self.nms, self.conf_thres, self.nms_thres) + # FIXME: This should be another module # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format # torchmetrics wants. - return [Detections.tensor_to_dict(det) for det in detections] + preds = [Detections.tensor_to_dict(det) for det in detections] + + targets = target["target"] + lengths = target["lengths"] + targets = [target[:length] for target, length in zip(targets, lengths)] + targets = [Detections.tensor_to_dict(target) for target in targets] + + return {"preds": preds, "targets": targets} From 7b6557c8732808f008925a2eb54ff01a2fc8acdb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 08:14:35 -0700 Subject: [PATCH 040/106] Merge Underlay/Overlay into Composite --- mart/attack/composer.py | 56 +++++++------------ .../composer/color_jitter_warp_composite.yaml | 4 ++ .../composer/color_jitter_warp_overlay.yaml | 4 -- .../composer/color_jitter_warp_underlay.yaml | 4 -- .../attack/perturber/composer/composite.yaml | 1 + .../attack/perturber/composer/overlay.yaml | 1 - .../attack/perturber/composer/underlay.yaml | 1 - .../perturber/composer/warp_composite.yaml | 2 + .../perturber/composer/warp_overlay.yaml | 2 - .../perturber/composer/warp_underlay.yaml | 2 - .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 +- mart/datamodules/coco.py | 7 ++- 12 files changed, 33 insertions(+), 53 deletions(-) create mode 100644 mart/configs/attack/perturber/composer/color_jitter_warp_composite.yaml delete mode 100644 mart/configs/attack/perturber/composer/color_jitter_warp_overlay.yaml delete mode 100644 mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml create mode 100644 mart/configs/attack/perturber/composer/composite.yaml delete mode 100644 mart/configs/attack/perturber/composer/overlay.yaml delete mode 100644 mart/configs/attack/perturber/composer/underlay.yaml create mode 100644 mart/configs/attack/perturber/composer/warp_composite.yaml delete mode 100644 mart/configs/attack/perturber/composer/warp_overlay.yaml delete mode 100644 mart/configs/attack/perturber/composer/warp_underlay.yaml diff --git a/mart/attack/composer.py b/mart/attack/composer.py index d851d33b..53fa73a9 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -72,54 +72,33 @@ def compose(self, perturbation, *, input, target): return input + perturbation -class Overlay(Composer): - """We assume an adversary overlays a patch to the input.""" - - def __init__(self, premultiplied_alpha=False): - super().__init__() - - self.premultiplied_alpha = premultiplied_alpha - - def compose(self, perturbation, *, input, target): - # True is mutable, False is immutable. - mask = target["perturbable_mask"] - - # Convert mask to a Tensor with same torch.dtype and torch.device as input, - # because some data modules (e.g. Armory) gives binary mask. - mask = mask.to(input) - - if self.premultiplied_alpha: - return input * (1 - mask) + perturbation - else: - return input * (1 - mask) + perturbation * mask - - -class Underlay(Composer): +class Composite(Composer): """We assume an adversary underlays a patch to the input.""" - def __init__(self, premultiplied_alpha=False): + def __init__(self, premultiplied_alpha=False, use_masks=False): super().__init__() self.premultiplied_alpha = premultiplied_alpha + self.use_masks = use_masks def compose(self, perturbation, *, input, target): # True is mutable, False is immutable. - mask = target["perturbable_mask"] - - object_mask = target["masks"].any(dim=0, keepdim=True) + perturbable_mask = target["perturbable_mask"] # Convert mask to a Tensor with same torch.dtype and torch.device as input, # because some data modules (e.g. Armory) gives binary mask. - mask = mask.to(input) + perturbable_mask = perturbable_mask.to(input) - # If mask overlaps, object, then null out that part of mask - mask = mask * (1 - object_mask) + # Zero out perturbation and mask that overlaps any objects + if self.use_masks: + foreground_mask = target["masks"] + perturbation = perturbation * (1 - foreground_mask) + perturbable_mask = perturbable_mask * (1 - foreground_mask) - if self.premultiplied_alpha: - return input * (1 - mask) + perturbation * (1 - object_mask) - else: - return input * (1 - mask) + perturbation * mask + if not self.premultiplied_alpha: + perturbation = perturbation * perturbable_mask + return input * (1 - perturbable_mask) + perturbation class MaskAdditive(Composer): """We assume an adversary adds masked perturbation to the input.""" @@ -132,13 +111,16 @@ def compose(self, perturbation, *, input, target): # FIXME: It would be really nice if we could compose composers just like we can compose everything else... -class WarpUnderlay(Underlay): +class WarpComposite(Composite): def __init__( self, warp, + *args, clamp=(0, 255), + premultiplied_alpha=True, + **kwargs, ): - super().__init__(premultiplied_alpha=True) + super().__init__(*args, premultiplied_alpha=premultiplied_alpha, **kwargs) self.warp = warp self.clamp = clamp @@ -168,7 +150,7 @@ def compose(self, perturbation, *, input, target): # FIXME: It would be really nice if we could compose composers just like we can compose everything else... -class ColorJitterWarpUnderlay(WarpUnderlay): +class ColorJitterWarpComposite(WarpComposite): def __init__( self, *args, diff --git a/mart/configs/attack/perturber/composer/color_jitter_warp_composite.yaml b/mart/configs/attack/perturber/composer/color_jitter_warp_composite.yaml new file mode 100644 index 00000000..98fa475a --- /dev/null +++ b/mart/configs/attack/perturber/composer/color_jitter_warp_composite.yaml @@ -0,0 +1,4 @@ +defaults: + - warp_composite + +_target_: mart.attack.composer.ColorJitterWarpComposite diff --git a/mart/configs/attack/perturber/composer/color_jitter_warp_overlay.yaml b/mart/configs/attack/perturber/composer/color_jitter_warp_overlay.yaml deleted file mode 100644 index 0561022c..00000000 --- a/mart/configs/attack/perturber/composer/color_jitter_warp_overlay.yaml +++ /dev/null @@ -1,4 +0,0 @@ -defaults: - - warp_overlay - -_target_: mart.attack.composer.ColorJitterWarpOverlay diff --git a/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml b/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml deleted file mode 100644 index cb8bec0c..00000000 --- a/mart/configs/attack/perturber/composer/color_jitter_warp_underlay.yaml +++ /dev/null @@ -1,4 +0,0 @@ -defaults: - - warp_underlay - -_target_: mart.attack.composer.ColorJitterWarpUnderlay diff --git a/mart/configs/attack/perturber/composer/composite.yaml b/mart/configs/attack/perturber/composer/composite.yaml new file mode 100644 index 00000000..c75347ff --- /dev/null +++ b/mart/configs/attack/perturber/composer/composite.yaml @@ -0,0 +1 @@ +_target_: mart.attack.composer.Composite diff --git a/mart/configs/attack/perturber/composer/overlay.yaml b/mart/configs/attack/perturber/composer/overlay.yaml deleted file mode 100644 index 469f7245..00000000 --- a/mart/configs/attack/perturber/composer/overlay.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.composer.Overlay diff --git a/mart/configs/attack/perturber/composer/underlay.yaml b/mart/configs/attack/perturber/composer/underlay.yaml deleted file mode 100644 index 670cac0e..00000000 --- a/mart/configs/attack/perturber/composer/underlay.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.composer.Underlay diff --git a/mart/configs/attack/perturber/composer/warp_composite.yaml b/mart/configs/attack/perturber/composer/warp_composite.yaml new file mode 100644 index 00000000..dcc71680 --- /dev/null +++ b/mart/configs/attack/perturber/composer/warp_composite.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.composer.WarpComposite +warp: ??? diff --git a/mart/configs/attack/perturber/composer/warp_overlay.yaml b/mart/configs/attack/perturber/composer/warp_overlay.yaml deleted file mode 100644 index 14dfef6d..00000000 --- a/mart/configs/attack/perturber/composer/warp_overlay.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart.attack.composer.WarpOverlay -warp: ??? diff --git a/mart/configs/attack/perturber/composer/warp_underlay.yaml b/mart/configs/attack/perturber/composer/warp_underlay.yaml deleted file mode 100644 index 791e358b..00000000 --- a/mart/configs/attack/perturber/composer/warp_underlay.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart.attack.composer.WarpUnderlay -warp: ??? diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index fbfcd301..f1a13450 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -3,7 +3,7 @@ defaults: - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_underlay + - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_composite - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - override /optimization: super_convergence diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 41dc3a17..e3c501b6 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -125,4 +125,9 @@ def yolo_collate_fn(batch): lengths = target["packed_length"] lengths = default_collate(lengths) - return images, {"target": packed, "lengths": lengths} + # Collapse masks into single foreground mask + masks = target["masks"] + masks = [m.any(dim=0, keepdim=True) for m in masks] + masks = default_collate(masks) + + return images, {"target": packed, "lengths": lengths, "masks": masks} From 326017d91ed678059c46880080c74bcca9407bec Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 08:24:48 -0700 Subject: [PATCH 041/106] Cleanup COCO datamodule config --- mart/configs/datamodule/coco.yaml | 2 +- mart/configs/datamodule/coco_yolov3.yaml | 40 ++---------------------- 2 files changed, 4 insertions(+), 38 deletions(-) diff --git a/mart/configs/datamodule/coco.yaml b/mart/configs/datamodule/coco.yaml index a4ec3403..5416cae2 100644 --- a/mart/configs/datamodule/coco.yaml +++ b/mart/configs/datamodule/coco.yaml @@ -1,5 +1,5 @@ defaults: - - default.yaml + - default train_dataset: _target_: mart.datamodules.coco.CocoDetection diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index 411ac440..48ce2048 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -1,14 +1,10 @@ defaults: - - default + - coco num_workers: 1 train_dataset: - _target_: mart.datamodules.coco.CocoDetection - root: ${paths.data_dir}/coco/train2017 - annFile: ${paths.data_dir}/coco/annotations/instances_train2017.json transforms: - _target_: mart.transforms.Compose transforms: - _target_: torchvision.transforms.ToTensor - _target_: mart.transforms.ConvertCocoPolysToMask @@ -22,40 +18,10 @@ train_dataset: num_classes: 80 val_dataset: - _target_: mart.datamodules.coco.CocoDetection - root: ${paths.data_dir}/coco/val2017 - annFile: ${paths.data_dir}/coco/annotations/instances_val2017.json - transforms: - _target_: mart.transforms.Compose - transforms: - - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.PadToSquare - fill: 0.5 - - _target_: mart.transforms.Resize - size: [416, 416] - - _target_: mart.transforms.ConvertBoxesToCXCYHW - - _target_: mart.transforms.RemapLabels - - _target_: mart.transforms.PackTarget - num_classes: 80 + transforms: ${..train_dataset.transforms} test_dataset: - _target_: mart.datamodules.coco.CocoDetection - root: ${paths.data_dir}/coco/val2017 - annFile: ${paths.data_dir}/coco/annotations/instances_val2017.json - transforms: - _target_: mart.transforms.Compose - transforms: - - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.PadToSquare - fill: 0.5 - - _target_: mart.transforms.Resize - size: [416, 416] - - _target_: mart.transforms.ConvertBoxesToCXCYHW - - _target_: mart.transforms.RemapLabels - - _target_: mart.transforms.PackTarget - num_classes: 80 + transforms: ${..val_dataset.transforms} collate_fn: _target_: hydra.utils.get_method From 053e8cec3da4a7b3a9c18330db983e791c2ea79d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 08:44:21 -0700 Subject: [PATCH 042/106] Switch to persons-only COCO and overlay persons on top of perturbation --- .../configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index f1a13450..19ddbf1c 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -18,8 +18,8 @@ tags: ["adv"] optimized_metric: "test_metrics/map" trainer: - # 118287 training images, batch_size=16, FLOOR(118287/16) = 7392 - max_steps: 73920 # 10 epochs + # 64115 training images, batch_size=16, FLOOR(64115/16) = 4007 + max_steps: 40070 # 10 epochs # mAP can be slow to compute so limit number of images limit_val_batches: 100 limit_test_batches: 100 @@ -39,6 +39,13 @@ datamodule: num_workers: 32 ims_per_batch: 16 + train_dataset: + annFile: ${paths.data_dir}/coco/annotations/person_instances_train2017.json + val_dataset: + annFile: ${paths.data_dir}/coco/annotations/person_instances_val2017.json + test_dataset: + annFile: ${paths.data_dir}/coco/annotations/person_instances_val2017.json + model: modules: perturber: @@ -71,6 +78,7 @@ model: saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 + use_masks: True loss: weights: [1, 1, 1e-5] From 685dc08632885a4efe2a0610da1fa7866125f849 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 11:15:10 -0700 Subject: [PATCH 043/106] Fix YoloNetV3 to produce preds and logits --- mart/configs/experiment/COCO_YOLOv3.yaml | 2 +- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 12 +-- mart/configs/model/yolov3.yaml | 40 +++++++--- mart/models/yolov3.py | 75 ++++++++++++++++++- 4 files changed, 110 insertions(+), 19 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3.yaml b/mart/configs/experiment/COCO_YOLOv3.yaml index 05c6016b..130c63fa 100644 --- a/mart/configs/experiment/COCO_YOLOv3.yaml +++ b/mart/configs/experiment/COCO_YOLOv3.yaml @@ -23,7 +23,7 @@ datamodule: model: load_state_dict: - logits: ${paths.data_dir}/yolov3_original.pt + yolov3: ${paths.data_dir}/yolov3_original.pt # yolov3 model does not produce preds/targets in training sequence training_metrics: null diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 19ddbf1c..c80caf14 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -83,10 +83,10 @@ model: loss: weights: [1, 1, 1e-5] - freeze: "logits" + freeze: "yolov3" load_state_dict: - logits: ${paths.data_dir}/yolov3_original.pt + yolov3: ${paths.data_dir}/yolov3_original.pt optimizer: lr: 0.01 @@ -97,13 +97,13 @@ model: training_sequence: seq005: perturber seq010: - logits: ["perturber.input_adv"] + yolov3: ["perturber.input_adv"] seq030: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - perturber.total_variation - seq040: + seq050: output: total_variation: perturber.total_variation @@ -126,7 +126,7 @@ model: validation_sequence: seq005: perturber seq010: - logits: ["perturber.input_adv"] + yolov3: ["perturber.input_adv"] seq030: loss: - losses.hide_target_objects_loss @@ -136,7 +136,7 @@ model: test_sequence: seq005: perturber seq010: - logits: ["perturber.input_adv"] + yolov3: ["perturber.input_adv"] seq030: loss: - losses.hide_target_objects_loss diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 8234e92c..d521fc52 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -2,8 +2,8 @@ defaults: - modular modules: - logits: - _target_: yolov3.model.YoloNetV3 + yolov3: + _target_: mart.models.yolov3.YoloNetV3 losses: _target_: mart.models.yolov3.Loss @@ -27,15 +27,23 @@ training_metrics: null training_sequence: seq010: - logits: ["input"] + yolov3: ["input"] - seq020: losses + seq020: + losses: + logits: yolov3.logits + target: target seq030: loss: - losses.total_loss seq040: + detections: + preds: yolov3.preds + target: target + + seq050: output: loss: loss total_loss: losses.total_loss @@ -51,15 +59,21 @@ training_sequence: validation_sequence: seq010: - logits: ["input"] + yolov3: ["input"] - seq020: losses + seq020: + losses: + logits: yolov3.logits + target: target seq030: loss: - losses.total_loss - seq040: detections + seq040: + detections: + preds: yolov3.preds + target: target seq050: output: @@ -79,15 +93,21 @@ validation_sequence: test_sequence: seq010: - logits: ["input"] + yolov3: ["input"] - seq020: losses + seq020: + losses: + logits: yolov3.logits + target: target seq030: loss: - losses.total_loss - seq040: detections + seq040: + detections: + preds: yolov3.preds + target: target seq050: output: diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 4e069cb8..eaf74df0 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -6,9 +6,80 @@ import torch import torch.nn.functional as F +import yolov3 from yolov3.inference import post_process from yolov3.training import yolo_loss_fn from yolov3.utils import cxcywh_to_xywh +from yolov3.model import YoloNetV3 as YoloNetV3_, YoloLayer as YoloLayer_ +from yolov3.config import ANCHORS, NUM_ANCHORS_PER_SCALE, NUM_CLASSES, NUM_ATTRIB +from mart.utils import MonkeyPatch + + +class YoloNetV3(YoloNetV3_): + def __init__(self): + with MonkeyPatch(yolov3.model, "YoloLayer", YoloLayer): + super().__init__() + + def forward(self, x): + tmp1, tmp2, tmp3 = self.darknet(x) + out1, out2, out3 = self.yolo_tail(tmp1, tmp2, tmp3) + logits = torch.cat((out1["logits"], out2["logits"], out3["logits"]), 1) + preds = torch.cat((out1["preds"], out2["preds"], out3["preds"]), 1) + + return {"logits": logits, "preds": preds} + +class YoloLayer(torch.nn.Module): + def __init__(self, scale, stride): + super().__init__() + + if scale == 's': + idx = (0, 1, 2) + elif scale == 'm': + idx = (3, 4, 5) + elif scale == 'l': + idx = (6, 7, 8) + else: + idx = None + + self.anchors = torch.tensor([ANCHORS[i] for i in idx]) + self.stride = stride + + def forward(self, x): + num_batch = x.size(0) + num_grid = x.size(2) + + output_raw = x.view(num_batch, + NUM_ANCHORS_PER_SCALE, + NUM_ATTRIB, + num_grid, + num_grid).permute(0, 1, 3, 4, 2).contiguous().view(num_batch, -1, NUM_ATTRIB) + + prediction_raw = x.view(num_batch, + NUM_ANCHORS_PER_SCALE, + NUM_ATTRIB, + num_grid, + num_grid).permute(0, 1, 3, 4, 2).contiguous() + + self.anchors = self.anchors.to(x.device).float() + # Calculate offsets for each grid + grid_tensor = torch.arange(num_grid, dtype=torch.float, device=x.device).repeat(num_grid, 1) + grid_x = grid_tensor.view([1, 1, num_grid, num_grid]) + grid_y = grid_tensor.t().view([1, 1, num_grid, num_grid]) + anchor_w = self.anchors[:, 0:1].view((1, -1, 1, 1)) + anchor_h = self.anchors[:, 1:2].view((1, -1, 1, 1)) + + # Get outputs + x_center_pred = (torch.sigmoid(prediction_raw[..., 0]) + grid_x) * self.stride # Center x + y_center_pred = (torch.sigmoid(prediction_raw[..., 1]) + grid_y) * self.stride # Center y + w_pred = torch.exp(prediction_raw[..., 2]) * anchor_w # Width + h_pred = torch.exp(prediction_raw[..., 3]) * anchor_h # Height + bbox_pred = torch.stack((x_center_pred, y_center_pred, w_pred, h_pred), dim=4).view((num_batch, -1, 4)) #cxcywh + conf_pred = torch.sigmoid(prediction_raw[..., 4]).view(num_batch, -1, 1) # Conf + cls_pred = torch.sigmoid(prediction_raw[..., 5:]).view(num_batch, -1, NUM_CLASSES) # Cls pred one-hot. + + output = torch.cat((bbox_pred, conf_pred, cls_pred), -1) + + return {"logits": output_raw, "preds": output} class Loss(torch.nn.Module): @@ -96,8 +167,8 @@ def tensor_to_dict(detection): return {"boxes": boxes, "labels": labels, "scores": scores} @torch.no_grad() - def forward(self, logits, target, **kwargs): - detections = post_process(logits, self.nms, self.conf_thres, self.nms_thres) + def forward(self, preds, target, **kwargs): + detections = post_process(preds, self.nms, self.conf_thres, self.nms_thres) # FIXME: This should be another module # Convert detections and targets to List[dict[str, torch.Tensor]]. This is the format From 88d46e2e0ebd15e8240a0b5dd2fcce2202c0d40a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 11:20:27 -0700 Subject: [PATCH 044/106] Add score thresholding to losses --- mart/configs/model/yolov3.yaml | 12 ++++++++++++ mart/models/yolov3.py | 18 ++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index d521fc52..30dc3121 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -56,6 +56,8 @@ training_sequence: hide_target_objects_loss: losses.hide_target_objects_loss correct_target_class_loss: losses.correct_target_class_loss target_count: losses.target_count + score_count: losses.score_count + target_score_count: losses.target_score_count validation_sequence: seq010: @@ -90,6 +92,8 @@ validation_sequence: hide_target_objects_loss: losses.hide_target_objects_loss correct_target_class_loss: losses.correct_target_class_loss target_count: losses.target_count + score_count: losses.score_count + target_score_count: losses.target_score_count test_sequence: seq010: @@ -124,6 +128,8 @@ test_sequence: hide_target_objects_loss: losses.hide_target_objects_loss correct_target_class_loss: losses.correct_target_class_loss target_count: losses.target_count + score_count: losses.score_count + target_score_count: losses.target_score_count training_step_log: - loss @@ -137,6 +143,8 @@ training_step_log: - hide_target_objects_loss - correct_target_class_loss - target_count + - score_count + - target_score_count validation_step_log: - loss @@ -150,6 +158,8 @@ validation_step_log: - hide_target_objects_loss - correct_target_class_loss - target_count + - score_count + - target_score_count test_step_log: - loss @@ -163,3 +173,5 @@ test_step_log: - hide_target_objects_loss - correct_target_class_loss - target_count + - score_count + - target_score_count diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index eaf74df0..97899a32 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -83,11 +83,13 @@ def forward(self, x): class Loss(torch.nn.Module): - def __init__(self, image_size, average=True): + def __init__(self, image_size, average=True, score_thresh=0.1, target_idx=0): super().__init__() self.image_size = image_size self.average = average + self.score_thresh = score_thresh + self.target_idx = target_idx def forward(self, logits, target, **kwargs): targets = target["target"] @@ -98,28 +100,30 @@ def forward(self, logits, target, **kwargs): pred_conf_logit = logits[..., 4] pred_conf_score = torch.sigmoid(pred_conf_logit) + score_mask = pred_conf_score > self.score_thresh + class_logits = logits[..., 5:] - target_mask = (torch.argmax(class_logits, dim=-1) == 0) & (pred_conf_score > 0.1) + target_mask = torch.argmax(class_logits, dim=-1) == self.target_idx # make objectness go to zero tgt_zero = torch.zeros(pred_conf_logit.size(), device=pred_conf_logit.device) hide_objects_losses = F.binary_cross_entropy_with_logits( pred_conf_logit, tgt_zero, reduction="none" ) - hide_objects_loss = hide_objects_losses.sum() + hide_objects_loss = hide_objects_losses[score_mask].sum() # make target objectness go to zero - hide_target_objects_loss = hide_objects_losses[target_mask].sum() + hide_target_objects_loss = hide_objects_losses[target_mask & score_mask].sum() # make target logit go to zero target_class_logit = class_logits[..., 0] # 0 == person target_class_losses = F.binary_cross_entropy_with_logits( target_class_logit, tgt_zero, reduction="none" ) - target_class_loss = target_class_losses.sum() + target_class_loss = target_class_losses[score_mask].sum() # make correctly predicted target class logit go to zero - correct_target_class_loss = target_class_losses[target_mask].sum() + correct_target_class_loss = target_class_losses[target_mask & score_mask].sum() return { "total_loss": total_loss, @@ -131,7 +135,9 @@ def forward(self, logits, target, **kwargs): "hide_target_objects_loss": hide_target_objects_loss, "target_class_loss": target_class_loss, "correct_target_class_loss": correct_target_class_loss, + "score_count": score_mask.sum(), "target_count": target_mask.sum(), + "target_score_count": (target_mask & score_mask).sum(), } From bcdb8c7eb73a53d420da9aec774982080151d79b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 11:23:42 -0700 Subject: [PATCH 045/106] style --- mart/attack/composer.py | 1 + mart/models/yolov3.py | 48 +++++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 53fa73a9..c3362ff5 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -100,6 +100,7 @@ def compose(self, perturbation, *, input, target): return input * (1 - perturbable_mask) + perturbation + class MaskAdditive(Composer): """We assume an adversary adds masked perturbation to the input.""" diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 97899a32..4b310e71 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -7,11 +7,13 @@ import torch import torch.nn.functional as F import yolov3 +from yolov3.config import ANCHORS, NUM_ANCHORS_PER_SCALE, NUM_ATTRIB, NUM_CLASSES from yolov3.inference import post_process +from yolov3.model import YoloLayer as YoloLayer_ +from yolov3.model import YoloNetV3 as YoloNetV3_ from yolov3.training import yolo_loss_fn from yolov3.utils import cxcywh_to_xywh -from yolov3.model import YoloNetV3 as YoloNetV3_, YoloLayer as YoloLayer_ -from yolov3.config import ANCHORS, NUM_ANCHORS_PER_SCALE, NUM_CLASSES, NUM_ATTRIB + from mart.utils import MonkeyPatch @@ -28,15 +30,16 @@ def forward(self, x): return {"logits": logits, "preds": preds} + class YoloLayer(torch.nn.Module): def __init__(self, scale, stride): super().__init__() - if scale == 's': + if scale == "s": idx = (0, 1, 2) - elif scale == 'm': + elif scale == "m": idx = (3, 4, 5) - elif scale == 'l': + elif scale == "l": idx = (6, 7, 8) else: idx = None @@ -48,34 +51,41 @@ def forward(self, x): num_batch = x.size(0) num_grid = x.size(2) - output_raw = x.view(num_batch, - NUM_ANCHORS_PER_SCALE, - NUM_ATTRIB, - num_grid, - num_grid).permute(0, 1, 3, 4, 2).contiguous().view(num_batch, -1, NUM_ATTRIB) + output_raw = ( + x.view(num_batch, NUM_ANCHORS_PER_SCALE, NUM_ATTRIB, num_grid, num_grid) + .permute(0, 1, 3, 4, 2) + .contiguous() + .view(num_batch, -1, NUM_ATTRIB) + ) - prediction_raw = x.view(num_batch, - NUM_ANCHORS_PER_SCALE, - NUM_ATTRIB, - num_grid, - num_grid).permute(0, 1, 3, 4, 2).contiguous() + prediction_raw = ( + x.view(num_batch, NUM_ANCHORS_PER_SCALE, NUM_ATTRIB, num_grid, num_grid) + .permute(0, 1, 3, 4, 2) + .contiguous() + ) self.anchors = self.anchors.to(x.device).float() # Calculate offsets for each grid - grid_tensor = torch.arange(num_grid, dtype=torch.float, device=x.device).repeat(num_grid, 1) + grid_tensor = torch.arange(num_grid, dtype=torch.float, device=x.device).repeat( + num_grid, 1 + ) grid_x = grid_tensor.view([1, 1, num_grid, num_grid]) grid_y = grid_tensor.t().view([1, 1, num_grid, num_grid]) anchor_w = self.anchors[:, 0:1].view((1, -1, 1, 1)) anchor_h = self.anchors[:, 1:2].view((1, -1, 1, 1)) # Get outputs - x_center_pred = (torch.sigmoid(prediction_raw[..., 0]) + grid_x) * self.stride # Center x + x_center_pred = (torch.sigmoid(prediction_raw[..., 0]) + grid_x) * self.stride # Center x y_center_pred = (torch.sigmoid(prediction_raw[..., 1]) + grid_y) * self.stride # Center y w_pred = torch.exp(prediction_raw[..., 2]) * anchor_w # Width h_pred = torch.exp(prediction_raw[..., 3]) * anchor_h # Height - bbox_pred = torch.stack((x_center_pred, y_center_pred, w_pred, h_pred), dim=4).view((num_batch, -1, 4)) #cxcywh + bbox_pred = torch.stack((x_center_pred, y_center_pred, w_pred, h_pred), dim=4).view( + (num_batch, -1, 4) + ) # cxcywh conf_pred = torch.sigmoid(prediction_raw[..., 4]).view(num_batch, -1, 1) # Conf - cls_pred = torch.sigmoid(prediction_raw[..., 5:]).view(num_batch, -1, NUM_CLASSES) # Cls pred one-hot. + cls_pred = torch.sigmoid(prediction_raw[..., 5:]).view( + num_batch, -1, NUM_CLASSES + ) # Cls pred one-hot. output = torch.cat((bbox_pred, conf_pred, cls_pred), -1) From 53bb29e1017be625e97ef82cc3dd321f95aa6b9b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 11:50:10 -0700 Subject: [PATCH 046/106] bugfix --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index c80caf14..b4f71266 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -119,6 +119,8 @@ model: - hide_target_objects_loss - correct_target_class_loss - target_count + - score_count + - target_score_count - total_variation training_metrics: null From 2aaeeb221b8e6c14c7b100ed2254e2b4002c4dbf Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 15:23:03 -0700 Subject: [PATCH 047/106] Turn off random erasing --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index b4f71266..6596908d 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -63,10 +63,6 @@ model: warp: _target_: torchvision.transforms.Compose transforms: - - _target_: torchvision.transforms.RandomErasing - p: 0.75 - scale: [0.2, 0.7] - ratio: [0.3, 3.3] - _target_: torchvision.transforms.RandomAffine degrees: [-5, 5] scale: [0.3, 0.5] From 5d0386814335ca3b7146dec72e6203103eec2639 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 15:31:00 -0700 Subject: [PATCH 048/106] Only apply color jitter during training --- mart/attack/composer.py | 7 ++++--- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 8 +------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index c3362ff5..5a919599 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -17,8 +17,8 @@ from torchvision.transforms.functional import InterpolationMode -class Composer(abc.ABC): - def __call__( +class Composer(torch.nn.Module): + def forward( self, perturbation: torch.Tensor | Iterable[torch.Tensor], *, @@ -169,6 +169,7 @@ def __init__( def compose(self, perturbation, *, input, target): # ColorJitter and friends assume floating point tensors are between [0, 1]... - perturbation = self.color_jitter(perturbation / self.pixel_scale) * self.pixel_scale + if self.training: + perturbation = self.color_jitter(perturbation / self.pixel_scale) * self.pixel_scale return super().compose(perturbation, input=input, target=target) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 6596908d..f9f25919 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -10,7 +10,7 @@ defaults: - override /datamodule: coco_yolov3 - override /model: yolov3 - override /metric: average_precision - - override /callbacks: [perturbation_visualizer, lr_monitor, override_mode] + - override /callbacks: [perturbation_visualizer, lr_monitor] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] @@ -29,12 +29,6 @@ callbacks: perturbation_visualizer: frequency: 500 - override_mode: - # YOLOv3 uses training/eval modes to switch functionality. We disable this and just always use training mode. - training_mode: "train" - validation_mode: "train" - test_mode: "train" - datamodule: num_workers: 32 ims_per_batch: 16 From 9cbfd3b115ce25d8a5536b50857013cdf5c2a41d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 24 Apr 2023 15:34:00 -0700 Subject: [PATCH 049/106] Add Image attack initializer --- mart/attack/initializer.py | 17 +++++++++++++++++ .../attack/perturber/initializer/image.yaml | 2 ++ 2 files changed, 19 insertions(+) create mode 100644 mart/configs/attack/perturber/initializer/image.yaml diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index d66bcf9f..6d0a8fcf 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -9,6 +9,8 @@ from typing import Iterable import torch +import torchvision +import torchvision.transforms.functional as F class Initializer: @@ -58,3 +60,18 @@ def initialize_(self, parameter: torch.Tensor) -> None: # We don't do tensor.renorm_() because the first dim is not the batch dim. pert_norm = parameter.norm(p=self.p) parameter.mul_(self.eps / pert_norm) + + +class Image(Initializer): + def __init__(self, path: str): + self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) + + @torch.no_grad() + def initialize_(self, parameter: torch.Tensor) -> None: + image = self.image + + if self.image.shape != parameter.shape: + print(f"Resizing image from {image.shape} to {parameter.shape}...") + image = F.resize(image, parameter.shape[1:]) + + parameter.copy_(image) diff --git a/mart/configs/attack/perturber/initializer/image.yaml b/mart/configs/attack/perturber/initializer/image.yaml new file mode 100644 index 00000000..904995c4 --- /dev/null +++ b/mart/configs/attack/perturber/initializer/image.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.initializer.Image +path: ??? From 494a47f0011d1ccf69cef1530534c6d8b271fd3e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 10:57:14 -0700 Subject: [PATCH 050/106] PackTarget -> PackBoxesAndLabels --- mart/configs/datamodule/coco_yolov3.yaml | 2 +- mart/transforms/extended.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index 48ce2048..c82778cb 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -14,7 +14,7 @@ train_dataset: size: [416, 416] - _target_: mart.transforms.ConvertBoxesToCXCYHW - _target_: mart.transforms.RemapLabels - - _target_: mart.transforms.PackTarget + - _target_: mart.transforms.PackBoxesAndLabels num_classes: 80 val_dataset: diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 3fc11dab..108f90d7 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -35,7 +35,7 @@ "Resize", "ConvertBoxesToCXCYHW", "RemapLabels", - "PackTarget", + "PackBoxesAndLabels", ] @@ -421,7 +421,7 @@ def __call__( return image, target -class PackTarget(ExTransform): +class PackBoxesAndLabels(ExTransform): def __init__(self, num_classes: int): self.num_classes = num_classes From bbb73f144de6ab7b1682ae2486824b2cb9d78bbf Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 11:23:11 -0700 Subject: [PATCH 051/106] Add CreateBackgroundMask transform --- mart/attack/composer.py | 14 +++++++------- mart/configs/datamodule/coco_yolov3.yaml | 1 + .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 2 +- mart/datamodules/coco.py | 7 +++---- mart/transforms/extended.py | 19 +++++++++++++++++++ 5 files changed, 31 insertions(+), 12 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 5a919599..4ea6bb0a 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -75,11 +75,11 @@ def compose(self, perturbation, *, input, target): class Composite(Composer): """We assume an adversary underlays a patch to the input.""" - def __init__(self, premultiplied_alpha=False, use_masks=False): + def __init__(self, premultiplied_alpha=False, bg_mask_key=None): super().__init__() self.premultiplied_alpha = premultiplied_alpha - self.use_masks = use_masks + self.bg_mask_key = None def compose(self, perturbation, *, input, target): # True is mutable, False is immutable. @@ -89,11 +89,11 @@ def compose(self, perturbation, *, input, target): # because some data modules (e.g. Armory) gives binary mask. perturbable_mask = perturbable_mask.to(input) - # Zero out perturbation and mask that overlaps any objects - if self.use_masks: - foreground_mask = target["masks"] - perturbation = perturbation * (1 - foreground_mask) - perturbable_mask = perturbable_mask * (1 - foreground_mask) + # Keep portion of perturbation that is background + if self.bg_mask_key: + bg_mask = target[self.bg_mask_key] + perturbation = perturbation * bg_mask + perturbable_mask = perturbable_mask * bg_mask if not self.premultiplied_alpha: perturbation = perturbation * perturbable_mask diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index c82778cb..e416fadc 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -16,6 +16,7 @@ train_dataset: - _target_: mart.transforms.RemapLabels - _target_: mart.transforms.PackBoxesAndLabels num_classes: 80 + - _target_: mart.transforms.CreateBackgroundMask val_dataset: transforms: ${..train_dataset.transforms} diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index f9f25919..6e66cd63 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -68,7 +68,7 @@ model: saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 - use_masks: True + bg_mask_key: "bg_mask" loss: weights: [1, 1, 1e-5] diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index e3c501b6..920b366d 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -126,8 +126,7 @@ def yolo_collate_fn(batch): lengths = default_collate(lengths) # Collapse masks into single foreground mask - masks = target["masks"] - masks = [m.any(dim=0, keepdim=True) for m in masks] - masks = default_collate(masks) + bg_masks = target["bg_mask"] + bg_masks = default_collate(bg_masks) - return images, {"target": packed, "lengths": lengths, "masks": masks} + return images, {"target": packed, "lengths": lengths, "bg_masks": bg_masks} diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 108f90d7..871d6d7b 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -36,6 +36,7 @@ "ConvertBoxesToCXCYHW", "RemapLabels", "PackBoxesAndLabels", + "CreateBackgroundMask", ] @@ -440,3 +441,21 @@ def __call__( target["packed_length"] = target["packed"].shape[0] return image, target + + +class CreateBackgroundMask(ExTransform): + def __call__( + self, + image: Tensor, + target: dict[str, Tensor], + ): + assert "masks" in target + masks = target["masks"] + + # Collapse masks into single foreground mask + fg_mask = masks.any(dim=0) + + # Turn foreground mask into background mask + target["bg_mask"] = 1 - fg_mask + + return image, target From f410dc9019e7faeb591d696335268484414bf720 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 11:24:59 -0700 Subject: [PATCH 052/106] Pass all targets in yolo_collate_fn --- mart/datamodules/coco.py | 19 ++++++++++--------- mart/models/yolov3.py | 8 ++++---- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 920b366d..634e89e8 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -47,6 +47,10 @@ def __init__( self.modalities = modalities + # Targets can contain a lot of information... + # https://discuss.pytorch.org/t/runtimeerror-received-0-items-of-ancdata/4999/4 + torch.multiprocessing.set_sharing_strategy("file_system") + def _load_image(self, id: int) -> Any: if self.modalities is None: return super()._load_image(id) @@ -118,15 +122,12 @@ def yolo_collate_fn(batch): target = {k: tuple(t[k] for t in targets) for k in keys} # Pad packed using torch.nested - packed = target["packed"] - packed = to_padded_tensor(packed) - packed = default_collate(packed) + target["packed"] = to_padded_tensor(target["packed"]) - lengths = target["packed_length"] - lengths = default_collate(lengths) + COLLATE_KEYS = ["packed", "packed_length", "bg_masks"] - # Collapse masks into single foreground mask - bg_masks = target["bg_mask"] - bg_masks = default_collate(bg_masks) + for key in target.keys(): + if key in COLLATE_KEYS: + target[key] = default_collate(target[key]) - return images, {"target": packed, "lengths": lengths, "bg_masks": bg_masks} + return images, target diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 4b310e71..11885545 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -102,8 +102,8 @@ def __init__(self, image_size, average=True, score_thresh=0.1, target_idx=0): self.target_idx = target_idx def forward(self, logits, target, **kwargs): - targets = target["target"] - lengths = target["lengths"] + targets = target["packed"] + lengths = target["packed_length"] losses = yolo_loss_fn(logits, targets, lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses @@ -191,8 +191,8 @@ def forward(self, preds, target, **kwargs): # torchmetrics wants. preds = [Detections.tensor_to_dict(det) for det in detections] - targets = target["target"] - lengths = target["lengths"] + targets = target["packed"] + lengths = target["packed_length"] targets = [target[:length] for target, length in zip(targets, lengths)] targets = [Detections.tensor_to_dict(target) for target in targets] From 4133885551700f79b6ba1a50efa65ba586405087 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 11:33:59 -0700 Subject: [PATCH 053/106] Bugfix CreateBackgroundMask --- mart/attack/composer.py | 2 +- mart/datamodules/coco.py | 2 +- mart/transforms/extended.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 4ea6bb0a..f73fb869 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -79,7 +79,7 @@ def __init__(self, premultiplied_alpha=False, bg_mask_key=None): super().__init__() self.premultiplied_alpha = premultiplied_alpha - self.bg_mask_key = None + self.bg_mask_key = bg_mask_key def compose(self, perturbation, *, input, target): # True is mutable, False is immutable. diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 634e89e8..7f4f8348 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -124,7 +124,7 @@ def yolo_collate_fn(batch): # Pad packed using torch.nested target["packed"] = to_padded_tensor(target["packed"]) - COLLATE_KEYS = ["packed", "packed_length", "bg_masks"] + COLLATE_KEYS = ["packed", "packed_length", "bg_mask"] for key in target.keys(): if key in COLLATE_KEYS: diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 871d6d7b..86f69af6 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -453,7 +453,7 @@ def __call__( masks = target["masks"] # Collapse masks into single foreground mask - fg_mask = masks.any(dim=0) + fg_mask = masks.any(dim=0, keepdim=True) # Turn foreground mask into background mask target["bg_mask"] = 1 - fg_mask From 5730a3db6d3a90a0518851a15137075634661bdb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 11:53:00 -0700 Subject: [PATCH 054/106] CreateBackgroundMask -> CreateBackgroundMaskFromCOCOMasks --- mart/configs/datamodule/coco_yolov3.yaml | 2 +- mart/transforms/extended.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index e416fadc..15183e08 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -16,7 +16,7 @@ train_dataset: - _target_: mart.transforms.RemapLabels - _target_: mart.transforms.PackBoxesAndLabels num_classes: 80 - - _target_: mart.transforms.CreateBackgroundMask + - _target_: mart.transforms.CreateBackgroundMaskFromCOCOMasks val_dataset: transforms: ${..train_dataset.transforms} diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 86f69af6..4daf5900 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -36,7 +36,7 @@ "ConvertBoxesToCXCYHW", "RemapLabels", "PackBoxesAndLabels", - "CreateBackgroundMask", + "CreateBackgroundMaskFromCOCOMasks", ] @@ -443,7 +443,7 @@ def __call__( return image, target -class CreateBackgroundMask(ExTransform): +class CreateBackgroundMaskFromCOCOMasks(ExTransform): def __call__( self, image: Tensor, From 40b567c13d3cb2ec0a084b7a09d0e52beb179796 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 11:53:48 -0700 Subject: [PATCH 055/106] Resize bg_masks --- mart/transforms/extended.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 4daf5900..ac999ad3 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -270,6 +270,9 @@ def __call__( if "keypoints" in target: target["keypoints"] = self.resize_keypoints(target["keypoints"], (dw, dh)) + if "bg_mask" in target: + target["bg_mask"] = self.resize_masks(target["bg_mask"], (dw, dh)) + return image, target def resize_boxes(self, boxes, ratio): @@ -281,6 +284,8 @@ def resize_boxes(self, boxes, ratio): return boxes def resize_masks(self, masks, ratio): + assert len(masks.shape) == 3 + # Resize fails on empty tensors if masks.shape[0] == 0: return torch.zeros((0, *self.size), dtype=masks.dtype, device=masks.device) From ec6fdbd911716e42989f066b04c710123e4afac3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 12:08:44 -0700 Subject: [PATCH 056/106] Add CreateBackgroundMaskFromImage --- mart/transforms/extended.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index ac999ad3..95380961 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -37,6 +37,7 @@ "RemapLabels", "PackBoxesAndLabels", "CreateBackgroundMaskFromCOCOMasks", + "CreateBackgroundMaskFromImage", ] @@ -464,3 +465,23 @@ def __call__( target["bg_mask"] = 1 - fg_mask return image, target + + +class CreateBackgroundMaskFromImage(ExTransform): + def __init__(self, chroma_key, threshold): + self.chroma_key = torch.tensor(chroma_key) + self.threshold = threshold + + def __call__( + self, + image: Tensor, + target: dict[str, Tensor], + ): + self.chroma_key = self.chroma_key.to(image.device) + + l2_dist = ((image - self.chroma_key[:, None, None]) ** 2).sum(dim=0, keepdim=True).sqrt() + bg_mask = l2_dist <= self.threshold + + target["bg_mask"] = bg_mask + + return image, target From 9704ff8fab7493b32a38aaedcbc2687c303625f7 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 12:12:01 -0700 Subject: [PATCH 057/106] Pad bg_masks --- mart/transforms/extended.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 95380961..4905b835 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -230,6 +230,8 @@ def __call__( target["masks"] = self.pad_masks(target["masks"], padding) if "keypoints" in target: target["keypoints"] = self.pad_keypoints(target["keypoints"], padding) + if "bg_mask" in target: + target["bg_mask"] = self.pad_masks(target["bg_mask"], padding) return image, target @@ -270,7 +272,6 @@ def __call__( target["masks"] = self.resize_masks(target["masks"], (dw, dh)) if "keypoints" in target: target["keypoints"] = self.resize_keypoints(target["keypoints"], (dw, dh)) - if "bg_mask" in target: target["bg_mask"] = self.resize_masks(target["bg_mask"], (dw, dh)) From fb5c49b01f8cd2fea68a84c64ab5b8eecdb8d0a9 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 12:24:22 -0700 Subject: [PATCH 058/106] Add LoadTensors transform --- mart/transforms/extended.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 4905b835..e0ab51d3 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -28,6 +28,7 @@ "Lambda", "SplitLambda", "LoadPerturbableMask", + "LoadTensors", "ConvertInstanceSegmentationToPerturbable", "RandomHorizontalFlip", "ConvertCocoPolysToMask", @@ -148,6 +149,26 @@ def __call__(self, image, target): return image, target +class LoadTensors(ExTransform): + def __init__(self, root, ext=".pt") -> None: + self.root = root + self.ext = ext + + def __call__(self, image, target): + filename, ext = os.path.splitext(target["file_name"]) + + metadata = torch.load( + os.path.join(self.root, filename + self.ext), map_location=image.device + ) + assert isinstance(metadata, dict) + + for key in metadata: + assert key not in target + target[key] = metadata[key] + + return image, target + + class RandomHorizontalFlip(T.RandomHorizontalFlip, ExTransform): """Flip the image and annotations including boxes, masks, keypoints and the perturable_masks.""" From 3aba51c094e0d974e6fa897fc8d63399703b99f5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 12:58:34 -0700 Subject: [PATCH 059/106] Remove bg_mask in favor of perturbable_mask --- mart/attack/composer.py | 21 +++++------ mart/configs/datamodule/coco_yolov3.yaml | 2 +- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 1 - mart/datamodules/coco.py | 4 +- mart/transforms/extended.py | 37 +++++-------------- 5 files changed, 22 insertions(+), 43 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index f73fb869..83238f5d 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -75,11 +75,10 @@ def compose(self, perturbation, *, input, target): class Composite(Composer): """We assume an adversary underlays a patch to the input.""" - def __init__(self, premultiplied_alpha=False, bg_mask_key=None): + def __init__(self, premultiplied_alpha=False): super().__init__() self.premultiplied_alpha = premultiplied_alpha - self.bg_mask_key = bg_mask_key def compose(self, perturbation, *, input, target): # True is mutable, False is immutable. @@ -89,12 +88,6 @@ def compose(self, perturbation, *, input, target): # because some data modules (e.g. Armory) gives binary mask. perturbable_mask = perturbable_mask.to(input) - # Keep portion of perturbation that is background - if self.bg_mask_key: - bg_mask = target[self.bg_mask_key] - perturbation = perturbation * bg_mask - perturbable_mask = perturbable_mask * bg_mask - if not self.premultiplied_alpha: perturbation = perturbation * perturbable_mask @@ -140,12 +133,18 @@ def compose(self, perturbation, *, input, target): mask_perturbation = self.warp(mask_perturbation) mask_perturbation = crop(mask_perturbation) - # Clamp perturbation to input min/max - perturbation = mask_perturbation[1:] + # Set/update perturbable mask + perturbable_mask = 1 + if "perturbable_mask" in target: + perturbable_mask = target["perturbable_mask"] # NCHW + perturbable_mask *= mask_perturbation[:1] # CHW + + # Pre multiply perturbation and clamp it to input min/max + perturbation = mask_perturbation[1:] * perturbable_mask perturbation.clamp_(*self.clamp) # Set mask for super().compose - target["perturbable_mask"] = mask_perturbation[:1] + target["perturbable_mask"] = perturbable_mask return super().compose(perturbation, input=input, target=target) diff --git a/mart/configs/datamodule/coco_yolov3.yaml b/mart/configs/datamodule/coco_yolov3.yaml index 15183e08..27be09d1 100644 --- a/mart/configs/datamodule/coco_yolov3.yaml +++ b/mart/configs/datamodule/coco_yolov3.yaml @@ -16,7 +16,7 @@ train_dataset: - _target_: mart.transforms.RemapLabels - _target_: mart.transforms.PackBoxesAndLabels num_classes: 80 - - _target_: mart.transforms.CreateBackgroundMaskFromCOCOMasks + - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable val_dataset: transforms: ${..train_dataset.transforms} diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 6e66cd63..455e4d21 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -68,7 +68,6 @@ model: saturation: [0.5, 1.5] hue: [-0.05, 0.05] pixel_scale: 1.0 - bg_mask_key: "bg_mask" loss: weights: [1, 1, 1e-5] diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 7f4f8348..795f492d 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -124,10 +124,10 @@ def yolo_collate_fn(batch): # Pad packed using torch.nested target["packed"] = to_padded_tensor(target["packed"]) - COLLATE_KEYS = ["packed", "packed_length", "bg_mask"] + COLLATABLE_KEYS = ["packed", "packed_length", "perturbable_mask"] for key in target.keys(): - if key in COLLATE_KEYS: + if key in COLLATABLE_KEYS: target[key] = default_collate(target[key]) return images, target diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index e0ab51d3..56064762 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -37,8 +37,7 @@ "ConvertBoxesToCXCYHW", "RemapLabels", "PackBoxesAndLabels", - "CreateBackgroundMaskFromCOCOMasks", - "CreateBackgroundMaskFromImage", + "CreatePerturbableMaskFromImage", ] @@ -125,7 +124,7 @@ class ConvertInstanceSegmentationToPerturbable(ExTransform): """Merge all instance masks and reverse.""" def __call__(self, image, target): - perturbable_mask = torch.sum(target["masks"], dim=0) == 0 + perturbable_mask = torch.sum(target["masks"], dim=0, keepdim=True) == 0 # Convert to float to be differentiable. target["perturbable_mask"] = perturbable_mask.float() @@ -251,8 +250,8 @@ def __call__( target["masks"] = self.pad_masks(target["masks"], padding) if "keypoints" in target: target["keypoints"] = self.pad_keypoints(target["keypoints"], padding) - if "bg_mask" in target: - target["bg_mask"] = self.pad_masks(target["bg_mask"], padding) + if "perturbable_mask" in target: + target["perturbable_mask"] = self.pad_masks(target["perturbable_mask"], padding) return image, target @@ -293,8 +292,8 @@ def __call__( target["masks"] = self.resize_masks(target["masks"], (dw, dh)) if "keypoints" in target: target["keypoints"] = self.resize_keypoints(target["keypoints"], (dw, dh)) - if "bg_mask" in target: - target["bg_mask"] = self.resize_masks(target["bg_mask"], (dw, dh)) + if "perturbable_mask" in target: + target["perturbable_mask"] = self.resize_masks(target["perturbable_mask"], (dw, dh)) return image, target @@ -471,25 +470,7 @@ def __call__( return image, target -class CreateBackgroundMaskFromCOCOMasks(ExTransform): - def __call__( - self, - image: Tensor, - target: dict[str, Tensor], - ): - assert "masks" in target - masks = target["masks"] - - # Collapse masks into single foreground mask - fg_mask = masks.any(dim=0, keepdim=True) - - # Turn foreground mask into background mask - target["bg_mask"] = 1 - fg_mask - - return image, target - - -class CreateBackgroundMaskFromImage(ExTransform): +class CreatePerturbableMaskFromImage(ExTransform): def __init__(self, chroma_key, threshold): self.chroma_key = torch.tensor(chroma_key) self.threshold = threshold @@ -502,8 +483,8 @@ def __call__( self.chroma_key = self.chroma_key.to(image.device) l2_dist = ((image - self.chroma_key[:, None, None]) ** 2).sum(dim=0, keepdim=True).sqrt() - bg_mask = l2_dist <= self.threshold + perturbable_mask = l2_dist <= self.threshold - target["bg_mask"] = bg_mask + target["perturbable_mask"] = perturbable_mask.float() return image, target From a5704906b672157c93af5a16f16973f184e8b680 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 14:00:39 -0700 Subject: [PATCH 060/106] Add support for batch warping --- mart/attack/composer.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 83238f5d..ef92f56f 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -116,31 +116,47 @@ def __init__( ): super().__init__(*args, premultiplied_alpha=premultiplied_alpha, **kwargs) - self.warp = warp + self._warp = warp self.clamp = clamp + def warp(self, perturbation, *, input, target): + # Support for batch warping + if len(input.shape) == 4 and len(perturbation.shape) == 3: + return torch.stack([self.warp(perturbation, input=inp, target=target) for inp in input]) + + return self._warp(perturbation) + def compose(self, perturbation, *, input, target): + # FIXME: This is a hack to make the perturbation the same shape as the input. This shouldn't + # actually crop but pad the perturbation instead. crop = T.RandomCrop(input.shape[-2:], pad_if_needed=True) # Create mask of ones to keep track of filled in pixels mask = torch.ones_like(perturbation[:1]) - # Add mask to perturbation so we can keep track of warping. Note the use of - # premultiplied alpha here. - mask_perturbation = torch.cat((mask, mask * perturbation)) + # Add mask to perturbation so we can keep track of warping. + perturbation = torch.cat((mask, perturbation)) - # Apply warp transform and crop/pad to input size - mask_perturbation = self.warp(mask_perturbation) - mask_perturbation = crop(mask_perturbation) + # Apply warp transform + perturbation = self.warp(perturbation, input=input, target=target) + perturbation = crop(perturbation) + + # Extract mask from perturbation. The use of channels first forces this hack. + if len(perturbation.shape) == 4: + mask = perturbation[:, :1, ...] + perturbation = perturbation[:, 1:, ...] + else: + mask = perturbation[:1, ...] + perturbation = perturbation[1:, ...] # Set/update perturbable mask perturbable_mask = 1 if "perturbable_mask" in target: - perturbable_mask = target["perturbable_mask"] # NCHW - perturbable_mask *= mask_perturbation[:1] # CHW + perturbable_mask = target["perturbable_mask"] + perturbable_mask = perturbable_mask * mask # Pre multiply perturbation and clamp it to input min/max - perturbation = mask_perturbation[1:] * perturbable_mask + perturbation = perturbation * perturbable_mask perturbation.clamp_(*self.clamp) # Set mask for super().compose From a5a5ca8be33e7910c98cd771fc441a7f5e4924b3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 16:25:08 -0700 Subject: [PATCH 061/106] Implement proper Composite for all cases --- mart/attack/composer.py | 62 ++++++++++++++++++++++++++++++++----- mart/transforms/extended.py | 24 +++++++++++++- 2 files changed, 78 insertions(+), 8 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index ef92f56f..a1ce8dfa 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -121,16 +121,65 @@ def __init__( def warp(self, perturbation, *, input, target): # Support for batch warping + if self._warp is not None: + if len(input.shape) == 4 and len(perturbation.shape) == 3: + return torch.stack( + [self.warp(perturbation, input=inp, target=target) for inp in input] + ) + else: + pert_w, pert_h = F.get_image_size(perturbation) + image_w, image_h = F.get_image_size(input) + + # Pad perturbation to image size + if pert_w < image_w or pert_h < image_h: + # left, top, right and bottom + padding = [0, 0, max(image_w - pert_w, 0), max(image_h - pert_h, 0)] + perturbation = F.pad(perturbation, padding) + + perturbation = self._warp(perturbation) + + # Crop perturbation to image size + if pert_w != image_w or pert_h != image_h: + perturbation = F.crop(perturbation, 0, 0, image_h, image_w) + return perturbation + + # Use gs_coords to do fixed perspective warp if len(input.shape) == 4 and len(perturbation.shape) == 3: - return torch.stack([self.warp(perturbation, input=inp, target=target) for inp in input]) + return torch.stack( + [ + self.warp(perturbation, input=inp, target={"gs_coords": endpoints}) + for inp, endpoints in zip(input, target["gs_coords"]) + ] + ) + else: + # coordinates are [[left, top], [right, top], [right, bottom], [left, bottom]] + # perturbation is CHW + startpoints = [ + [0, 0], + [perturbation.shape[2], 0], + [perturbation.shape[2], perturbation.shape[1]], + [0, perturbation.shape[1]], + ] + endpoints = target["gs_coords"] - return self._warp(perturbation) + pert_w, pert_h = F.get_image_size(perturbation) + image_w, image_h = F.get_image_size(input) - def compose(self, perturbation, *, input, target): - # FIXME: This is a hack to make the perturbation the same shape as the input. This shouldn't - # actually crop but pad the perturbation instead. - crop = T.RandomCrop(input.shape[-2:], pad_if_needed=True) + # Pad perturbation to image size + if pert_w < image_w or pert_h < image_h: + # left, top, right and bottom + padding = [0, 0, max(image_w - pert_w, 0), max(image_h - pert_h, 0)] + perturbation = F.pad(perturbation, padding) + + perturbation = F.perspective(perturbation, startpoints, endpoints) + # Crop perturbation to image size + if pert_w != image_w or pert_h != image_h: + perturbation = F.crop(perturbation, 0, 0, image_h, image_w) + return perturbation + + + def compose(self, perturbation, *, input, target): # Create mask of ones to keep track of filled in pixels mask = torch.ones_like(perturbation[:1]) @@ -139,7 +188,6 @@ def compose(self, perturbation, *, input, target): # Apply warp transform perturbation = self.warp(perturbation, input=input, target=target) - perturbation = crop(perturbation) # Extract mask from perturbation. The use of channels first forces this hack. if len(perturbation.shape) == 4: diff --git a/mart/transforms/extended.py b/mart/transforms/extended.py index 56064762..87f13ae0 100644 --- a/mart/transforms/extended.py +++ b/mart/transforms/extended.py @@ -252,6 +252,8 @@ def __call__( target["keypoints"] = self.pad_keypoints(target["keypoints"], padding) if "perturbable_mask" in target: target["perturbable_mask"] = self.pad_masks(target["perturbable_mask"], padding) + if "gs_coords" in target: + target["gs_coords"] = self.pad_coordinates(target["gs_coords"], padding) return image, target @@ -269,6 +271,14 @@ def pad_masks(self, masks, padding): def pad_keypoints(self, keypoints, padding): raise NotImplementedError + def pad_coordinates(self, coordinates, padding): + # coordinates are [[left, top], [right, top], [right, bottom], [left, bottom]] + # padding is [left, top, right bottom] + coordinates[:, 0] += padding[0] # left padding + coordinates[:, 1] += padding[1] # top padding + + return coordinates + class Resize(ExTransform): def __init__(self, size): @@ -293,7 +303,11 @@ def __call__( if "keypoints" in target: target["keypoints"] = self.resize_keypoints(target["keypoints"], (dw, dh)) if "perturbable_mask" in target: - target["perturbable_mask"] = self.resize_masks(target["perturbable_mask"], (dw, dh)) + target["perturbable_mask"] = self.resize_masks( + target["perturbable_mask"], (dw, dh) + ) + if "gs_coords" in target: + target["gs_coords"] = self.resize_coordinates(target["gs_coords"], (dw, dh)) return image, target @@ -317,6 +331,14 @@ def resize_masks(self, masks, ratio): def resize_keypoints(self, keypoints, ratio): raise NotImplementedError + def resize_coordinates(self, coordinates, ratio): + # coordinates are [[left, top], [right, top], [right, bottom], [left, bottom]] + # ratio is [width, height] + coordinates[:, 0] = (coordinates[:, 0] * ratio[0]).to(int) # width ratio + coordinates[:, 1] = (coordinates[:, 1] * ratio[1]).to(int) # height ratio + + return coordinates + class ConvertBoxesToCXCYHW(ExTransform): def __call__( From 45639a16aeb3aaac8c9709d9e9f41917584132cb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 27 Apr 2023 16:45:00 -0700 Subject: [PATCH 062/106] Image initializer scale to 0-1 --- mart/attack/initializer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index 6d0a8fcf..ba04198a 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -64,7 +64,8 @@ def initialize_(self, parameter: torch.Tensor) -> None: class Image(Initializer): def __init__(self, path: str): - self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) + self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) / 255 + @torch.no_grad() def initialize_(self, parameter: torch.Tensor) -> None: From 592e27f58f31cc954fae7bb547473fc4072eda17 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 08:18:03 -0700 Subject: [PATCH 063/106] Add override mode --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 455e4d21..2cb3d1d7 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -10,7 +10,7 @@ defaults: - override /datamodule: coco_yolov3 - override /model: yolov3 - override /metric: average_precision - - override /callbacks: [perturbation_visualizer, lr_monitor] + - override /callbacks: [perturbation_visualizer, lr_monitor, override_mode] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] @@ -29,6 +29,11 @@ callbacks: perturbation_visualizer: frequency: 500 + override_mode: + training_mode: eval + validation_mode: eval + test_mode: eval + datamodule: num_workers: 32 ims_per_batch: 16 From 349c56c938dc8bf88ba057ad151cd286202d8db8 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 08:18:48 -0700 Subject: [PATCH 064/106] Decrease score threshold in loss --- mart/models/yolov3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 11885545..45eca22f 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -93,7 +93,7 @@ def forward(self, x): class Loss(torch.nn.Module): - def __init__(self, image_size, average=True, score_thresh=0.1, target_idx=0): + def __init__(self, image_size, average=True, score_thresh=0.01, target_idx=0): super().__init__() self.image_size = image_size From 8a8f73a98d1061d7e363995274035d62e213f170 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 08:19:00 -0700 Subject: [PATCH 065/106] Normalize losses by batch size --- mart/models/yolov3.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 45eca22f..431c8882 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -121,9 +121,11 @@ def forward(self, logits, target, **kwargs): pred_conf_logit, tgt_zero, reduction="none" ) hide_objects_loss = hide_objects_losses[score_mask].sum() + hide_objects_loss = hide_objects_loss / logits.shape[0] # make target objectness go to zero hide_target_objects_loss = hide_objects_losses[target_mask & score_mask].sum() + hide_target_objects_loss = hide_target_objects_loss / logits.shape[0] # make target logit go to zero target_class_logit = class_logits[..., 0] # 0 == person @@ -131,9 +133,15 @@ def forward(self, logits, target, **kwargs): target_class_logit, tgt_zero, reduction="none" ) target_class_loss = target_class_losses[score_mask].sum() + target_class_loss = target_class_loss / logits.shape[0] # make correctly predicted target class logit go to zero correct_target_class_loss = target_class_losses[target_mask & score_mask].sum() + correct_target_class_loss = correct_target_class_loss / logits.shape[0] + + score_count = score_mask.sum() / logits.shape[0] + target_count = target_mask.sum() / logits.shape[0] + target_score_count = (target_mask & score_mask).sum() / logits.shape[0] return { "total_loss": total_loss, @@ -145,9 +153,9 @@ def forward(self, logits, target, **kwargs): "hide_target_objects_loss": hide_target_objects_loss, "target_class_loss": target_class_loss, "correct_target_class_loss": correct_target_class_loss, - "score_count": score_mask.sum(), - "target_count": target_mask.sum(), - "target_score_count": (target_mask & score_mask).sum(), + "score_count": score_count, + "target_count": target_count, + "target_score_count": target_score_count, } From e52c92f22062a84e3af7b12cd10b28e1e96bcdde Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 09:29:54 -0700 Subject: [PATCH 066/106] Move override mode into callback --- mart/callbacks/__init__.py | 2 +- mart/callbacks/freeze.py | 41 +++++++++++++++ mart/callbacks/mode.py | 51 ------------------- mart/configs/callbacks/freeze.yaml | 3 ++ mart/configs/callbacks/override_mode.yaml | 5 -- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 10 ++-- mart/models/modular.py | 13 ----- 7 files changed, 48 insertions(+), 77 deletions(-) create mode 100644 mart/callbacks/freeze.py delete mode 100644 mart/callbacks/mode.py create mode 100644 mart/configs/callbacks/freeze.yaml delete mode 100644 mart/configs/callbacks/override_mode.yaml diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 56bdc7af..7f5a2724 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,6 +1,6 @@ from .eval_mode import * +from .freeze import * from .gradients import * -from .mode import * from .no_grad_mode import * from .progress_bar import * from .visualizer import * diff --git a/mart/callbacks/freeze.py b/mart/callbacks/freeze.py new file mode 100644 index 00000000..56d6d226 --- /dev/null +++ b/mart/callbacks/freeze.py @@ -0,0 +1,41 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +import torch +from pytorch_lightning.callbacks import Callback +from pytorch_lightning.utilities.exceptions import MisconfigurationException + +__all__ = ["FreezeModule"] + + +class FreezeModule(Callback): + def __init__( + self, + module="backbone", + ): + self.name = module + + def setup(self, trainer, pl_module, stage): + module = getattr(pl_module.model, self.name, None) + + if module is None or not isinstance(module, torch.nn.Module): + raise MisconfigurationException( + f"The LightningModule should have a nn.Module `{self.name}` attribute" + ) + + module.eval() + + def on_train_epoch_start(self, trainer, pl_module): + module = getattr(pl_module.model, self.name, None) + + if module is None or not isinstance(module, torch.nn.Module): + raise MisconfigurationException( + f"The LightningModule should have a nn.Module `{self.name}` attribute" + ) + + module.eval() diff --git a/mart/callbacks/mode.py b/mart/callbacks/mode.py deleted file mode 100644 index 65544de4..00000000 --- a/mart/callbacks/mode.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from __future__ import annotations - -from pytorch_lightning.callbacks import Callback - -__all__ = ["OverrideMode"] - - -class OverrideMode(Callback): - def __init__( - self, - training_mode: str = "train", - validation_mode: str = "eval", - test_mode: str = "eval", - ): - self.training_mode = training_mode == "train" - self.validation_mode = validation_mode == "train" - self.test_mode = test_mode == "train" - - self.mode = None - - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, unused=0): - self.mode = pl_module.training - pl_module.train(self.training_mode) - - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0): - pl_module.train(self.mode) - self.mode = None - - def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - self.mode = pl_module.training - pl_module.train(self.validation_mode) - - def on_validation_batch_end( - self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx - ): - pl_module.train(self.mode) - self.mode = None - - def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - self.mode = pl_module.training - pl_module.train(self.test_mode) - - def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): - pl_module.train(self.mode) - self.mode = None diff --git a/mart/configs/callbacks/freeze.yaml b/mart/configs/callbacks/freeze.yaml new file mode 100644 index 00000000..d5471f24 --- /dev/null +++ b/mart/configs/callbacks/freeze.yaml @@ -0,0 +1,3 @@ +freeze_module: + _target_: mart.callbacks.FreezeModule + module_name: ??? diff --git a/mart/configs/callbacks/override_mode.yaml b/mart/configs/callbacks/override_mode.yaml deleted file mode 100644 index d69d7050..00000000 --- a/mart/configs/callbacks/override_mode.yaml +++ /dev/null @@ -1,5 +0,0 @@ -override_mode: - _target_: mart.callbacks.OverrideMode - training_mode: ??? - validation_mode: ??? - test_mode: ??? diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 2cb3d1d7..d7b4fd4c 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -10,7 +10,7 @@ defaults: - override /datamodule: coco_yolov3 - override /model: yolov3 - override /metric: average_precision - - override /callbacks: [perturbation_visualizer, lr_monitor, override_mode] + - override /callbacks: [perturbation_visualizer, lr_monitor, freeze] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] @@ -29,10 +29,8 @@ callbacks: perturbation_visualizer: frequency: 500 - override_mode: - training_mode: eval - validation_mode: eval - test_mode: eval + freeze: + module: "yolov3" datamodule: num_workers: 32 @@ -77,8 +75,6 @@ model: loss: weights: [1, 1, 1e-5] - freeze: "yolov3" - load_state_dict: yolov3: ${paths.data_dir}/yolov3_original.pt diff --git a/mart/models/modular.py b/mart/models/modular.py index 8054e75d..4b23da49 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -33,7 +33,6 @@ def __init__( test_step_log=None, test_metrics=None, gradient_modifier=None, - freeze=None, load_state_dict=None, ): super().__init__() @@ -76,18 +75,6 @@ def __init__( self.gradient_modifier = gradient_modifier - # Turn of gradients for parameters - for name, param in self.model.named_parameters(): - if re.match(freeze, name): - logger.info(f"Setting requires_grad to False for {name}.") - param.requires_grad_(False) - - # Turn off BatchNorm updating - for name, module in self.model.named_modules(): - if re.match(freeze, name) and "Norm" in module.__class__.__name__: - logger.info(f"Setting track_running_stats to False for {name}.") - module.track_running_stats = False - # Load state dict for given modules load_state_dict = load_state_dict or {} for name, path in load_state_dict.items(): From ae4d22ff98c52729175932cbfa6917a56c63b24a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 09:31:31 -0700 Subject: [PATCH 067/106] style --- mart/attack/composer.py | 1 - mart/attack/initializer.py | 1 - 2 files changed, 2 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index a1ce8dfa..c0f1bdec 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -178,7 +178,6 @@ def warp(self, perturbation, *, input, target): perturbation = F.crop(perturbation, 0, 0, image_h, image_w) return perturbation - def compose(self, perturbation, *, input, target): # Create mask of ones to keep track of filled in pixels mask = torch.ones_like(perturbation[:1]) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index ba04198a..30209107 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -66,7 +66,6 @@ class Image(Initializer): def __init__(self, path: str): self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) / 255 - @torch.no_grad() def initialize_(self, parameter: torch.Tensor) -> None: image = self.image From 9c5e715b5d5f60d83522460b5f334de55a0c0a47 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 09:35:39 -0700 Subject: [PATCH 068/106] bugfix --- mart/configs/callbacks/freeze.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/configs/callbacks/freeze.yaml b/mart/configs/callbacks/freeze.yaml index d5471f24..61678512 100644 --- a/mart/configs/callbacks/freeze.yaml +++ b/mart/configs/callbacks/freeze.yaml @@ -1,3 +1,3 @@ -freeze_module: +freeze: _target_: mart.callbacks.FreezeModule - module_name: ??? + module: ??? From 9cb5321f85193f260007c672ab708533b8e6a5bf Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 10:10:36 -0700 Subject: [PATCH 069/106] bugfix freeze --- mart/callbacks/freeze.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/freeze.py b/mart/callbacks/freeze.py index 56d6d226..05dcf54f 100644 --- a/mart/callbacks/freeze.py +++ b/mart/callbacks/freeze.py @@ -28,7 +28,8 @@ def setup(self, trainer, pl_module, stage): f"The LightningModule should have a nn.Module `{self.name}` attribute" ) - module.eval() + for param in module.parameters(): + param.requires_grad_(False) def on_train_epoch_start(self, trainer, pl_module): module = getattr(pl_module.model, self.name, None) From b6d5d65b03e76ca96950160f9590ecfbb6b277ce Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 13:30:02 -0700 Subject: [PATCH 070/106] Add alpha-aware ColorJitter --- mart/attack/composer.py | 10 +++++----- mart/transforms/transforms.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index c0f1bdec..4169a536 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -183,18 +183,18 @@ def compose(self, perturbation, *, input, target): mask = torch.ones_like(perturbation[:1]) # Add mask to perturbation so we can keep track of warping. - perturbation = torch.cat((mask, perturbation)) + perturbation = torch.cat((perturbation, mask)) # Apply warp transform perturbation = self.warp(perturbation, input=input, target=target) # Extract mask from perturbation. The use of channels first forces this hack. if len(perturbation.shape) == 4: - mask = perturbation[:, :1, ...] - perturbation = perturbation[:, 1:, ...] + mask = perturbation[:, 3:, ...] + perturbation = perturbation[:, :3, ...] else: - mask = perturbation[:1, ...] - perturbation = perturbation[1:, ...] + mask = perturbation[3:, ...] + perturbation = perturbation[:3, ...] # Set/update perturbable mask perturbable_mask = 1 diff --git a/mart/transforms/transforms.py b/mart/transforms/transforms.py index 4c7f29f7..4524f6b7 100644 --- a/mart/transforms/transforms.py +++ b/mart/transforms/transforms.py @@ -16,6 +16,7 @@ "Chunk", "TupleTransforms", "GetItems", + "ColorJitter", ] @@ -101,3 +102,21 @@ def __init__(self, keys): def __call__(self, x): x_list = [x[key] for key in self.keys] return x_list + + +class ColorJitter(T.ColorJitter): + def forward(self, img): + # Assume final channel is alpha + if len(img.shape) == 3: + alpha = img[3:4, ...] + rgb = img[:3, ...] + dim = 0 + elif len(img.shape) == 4: + alpha = img[:, 3:4, ...] + rgb = img[:, :3, ...] + dim = 1 + else: + raise NotImplementedError + + rgb = super().forward(rgb) + return torch.cat([rgb, alpha], dim=dim) From f6a174e73c1b5dcc50219f5f06e7bbced3d973fd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 13:58:29 -0700 Subject: [PATCH 071/106] Batch ColorJitter --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index d7b4fd4c..80170823 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -3,7 +3,7 @@ defaults: - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/composer@model.modules.perturber.composer: color_jitter_warp_composite + - /attack/perturber/composer@model.modules.perturber.composer: warp_composite - /attack/perturber/projector@model.modules.perturber.projector: range - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - override /optimization: super_convergence @@ -60,6 +60,11 @@ model: warp: _target_: torchvision.transforms.Compose transforms: + - _target_: mart.transforms.ColorJitter + brightness: [0.5, 1.5] + contrast: [0.5, 1.5] + saturation: [0.5, 1.0] + hue: [-0.05, 0.05] - _target_: torchvision.transforms.RandomAffine degrees: [-5, 5] scale: [0.3, 0.5] From 622698b687b5bb51ac86300f1fa8c1ebf87734a0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 14:00:28 -0700 Subject: [PATCH 072/106] Update RandomAffine parameters --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 80170823..eb6ed6dc 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -67,7 +67,8 @@ model: hue: [-0.05, 0.05] - _target_: torchvision.transforms.RandomAffine degrees: [-5, 5] - scale: [0.3, 0.5] + translate: [0.1, 0.25] + scale: [0.4, 0.6] shear: [-3, 3, -3, 3] interpolation: 2 # BILINEAR clamp: [0, 1] From ffb67550454698ad20d0b4150381f535798e5dae Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 14:17:30 -0700 Subject: [PATCH 073/106] bugfix --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index eb6ed6dc..9ec4a39e 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -72,11 +72,6 @@ model: shear: [-3, 3, -3, 3] interpolation: 2 # BILINEAR clamp: [0, 1] - brightness: [0.5, 1.5] - contrast: [0.5, 1.5] - saturation: [0.5, 1.5] - hue: [-0.05, 0.05] - pixel_scale: 1.0 loss: weights: [1, 1, 1e-5] From 6f11f2b51afa55b15f2bac98e19a0ef33ab64674 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 28 Apr 2023 14:29:37 -0700 Subject: [PATCH 074/106] Apply warp only in training mode --- mart/attack/composer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 4169a536..0b4ad4c2 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -120,8 +120,8 @@ def __init__( self.clamp = clamp def warp(self, perturbation, *, input, target): - # Support for batch warping - if self._warp is not None: + if self._warp is not None and self.training: + # Support for batch warping if len(input.shape) == 4 and len(perturbation.shape) == 3: return torch.stack( [self.warp(perturbation, input=inp, target=target) for inp in input] From 7628f8fd1356b29b6582d81938e5bfbad00ba48a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 3 May 2023 13:31:04 -0700 Subject: [PATCH 075/106] Normalize individual losses --- mart/models/yolov3.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mart/models/yolov3.py b/mart/models/yolov3.py index 431c8882..38ed9db7 100644 --- a/mart/models/yolov3.py +++ b/mart/models/yolov3.py @@ -108,6 +108,12 @@ def forward(self, logits, target, **kwargs): losses = yolo_loss_fn(logits, targets, lengths, self.image_size, self.average) total_loss, coord_loss, obj_loss, noobj_loss, class_loss = losses + # normalize individual losses by batch size + coord_loss = coord_loss / logits.shape[0] + obj_loss = obj_loss / logits.shape[0] + noobj_loss = noobj_loss / logits.shape[0] + class_loss = class_loss / logits.shape[0] + pred_conf_logit = logits[..., 4] pred_conf_score = torch.sigmoid(pred_conf_logit) score_mask = pred_conf_score > self.score_thresh From 7ebe5bc7c5de0219e992dd74aa8e17314e924c73 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 5 May 2023 08:42:22 -0700 Subject: [PATCH 076/106] Add image initializer --- mart/attack/initializer.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index d66bcf9f..30209107 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -9,6 +9,8 @@ from typing import Iterable import torch +import torchvision +import torchvision.transforms.functional as F class Initializer: @@ -58,3 +60,18 @@ def initialize_(self, parameter: torch.Tensor) -> None: # We don't do tensor.renorm_() because the first dim is not the batch dim. pert_norm = parameter.norm(p=self.p) parameter.mul_(self.eps / pert_norm) + + +class Image(Initializer): + def __init__(self, path: str): + self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) / 255 + + @torch.no_grad() + def initialize_(self, parameter: torch.Tensor) -> None: + image = self.image + + if self.image.shape != parameter.shape: + print(f"Resizing image from {image.shape} to {parameter.shape}...") + image = F.resize(image, parameter.shape[1:]) + + parameter.copy_(image) From 92d9965075ccb1a264aef5610e3b48d13433359d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 5 May 2023 08:43:47 -0700 Subject: [PATCH 077/106] Add scale parameter --- mart/attack/initializer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index 30209107..65886bbf 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -63,14 +63,14 @@ def initialize_(self, parameter: torch.Tensor) -> None: class Image(Initializer): - def __init__(self, path: str): - self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) / 255 + def __init__(self, path: str, scale: int = 1): + self.image = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB) / scale @torch.no_grad() def initialize_(self, parameter: torch.Tensor) -> None: image = self.image - if self.image.shape != parameter.shape: + if image.shape != parameter.shape: print(f"Resizing image from {image.shape} to {parameter.shape}...") image = F.resize(image, parameter.shape[1:]) From eb3172d5b5b0ce5be9de8423e71ae3ebca7f4ba8 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 22 May 2023 15:58:50 -0700 Subject: [PATCH 078/106] Fix sequences --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 9ec4a39e..d541c3f9 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -3,8 +3,8 @@ defaults: - /attack/perturber@model.modules.perturber: default - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/composer@model.modules.perturber.composer: warp_composite - /attack/perturber/projector@model.modules.perturber.projector: range + - /attack/composer@model.modules.composer: warp_composite - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - override /optimization: super_convergence - override /datamodule: coco_yolov3 @@ -56,22 +56,22 @@ model: min: 0.0 max: 1.0 - composer: - warp: - _target_: torchvision.transforms.Compose - transforms: - - _target_: mart.transforms.ColorJitter - brightness: [0.5, 1.5] - contrast: [0.5, 1.5] - saturation: [0.5, 1.0] - hue: [-0.05, 0.05] - - _target_: torchvision.transforms.RandomAffine - degrees: [-5, 5] - translate: [0.1, 0.25] - scale: [0.4, 0.6] - shear: [-3, 3, -3, 3] - interpolation: 2 # BILINEAR - clamp: [0, 1] + composer: + warp: + _target_: torchvision.transforms.Compose + transforms: + - _target_: mart.transforms.ColorJitter + brightness: [0.5, 1.5] + contrast: [0.5, 1.5] + saturation: [0.5, 1.0] + hue: [-0.05, 0.05] + - _target_: torchvision.transforms.RandomAffine + degrees: [-5, 5] + translate: [0.1, 0.25] + scale: [0.4, 0.6] + shear: [-3, 3, -3, 3] + interpolation: 2 # BILINEAR + clamp: [0, 1] loss: weights: [1, 1, 1e-5] @@ -87,8 +87,13 @@ model: training_sequence: seq005: perturber + seq006: + composer: + perturbation: "perturber" + input: "input" + target: "target" seq010: - yolov3: ["perturber.input_adv"] + yolov3: composer seq030: loss: - losses.hide_target_objects_loss @@ -118,8 +123,13 @@ model: validation_sequence: seq005: perturber + seq006: + composer: + perturbation: "perturber" + input: "input" + target: "target" seq010: - yolov3: ["perturber.input_adv"] + yolov3: composer seq030: loss: - losses.hide_target_objects_loss @@ -128,8 +138,13 @@ model: test_sequence: seq005: perturber + seq006: + composer: + perturbation: "perturber" + input: "input" + target: "target" seq010: - yolov3: ["perturber.input_adv"] + yolov3: composer seq030: loss: - losses.hide_target_objects_loss From f42c1532704eb9ed9946136951039907e35ba9b8 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 23 May 2023 08:45:09 -0700 Subject: [PATCH 079/106] fix config --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index d541c3f9..578b2632 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -93,7 +93,7 @@ model: input: "input" target: "target" seq010: - yolov3: composer + yolov3: ["composer"] seq030: loss: - losses.hide_target_objects_loss @@ -129,7 +129,7 @@ model: input: "input" target: "target" seq010: - yolov3: composer + yolov3: ["composer"] seq030: loss: - losses.hide_target_objects_loss @@ -144,7 +144,7 @@ model: input: "input" target: "target" seq010: - yolov3: composer + yolov3: ["composer"] seq030: loss: - losses.hide_target_objects_loss From 9b6d386f87227fcfb9cc6cab7f3ca067310ea080 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 23 May 2023 08:58:29 -0700 Subject: [PATCH 080/106] fix configs --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 53 ++++++++----------- mart/configs/model/yolov3.yaml | 9 ++-- 2 files changed, 28 insertions(+), 34 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 578b2632..e1d2aefe 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -1,10 +1,10 @@ # @package _global_ defaults: - - /attack/perturber@model.modules.perturber: default - - /attack/perturber/initializer@model.modules.perturber.initializer: uniform - - /attack/perturber/projector@model.modules.perturber.projector: range - - /attack/composer@model.modules.composer: warp_composite + - /attack/perturber@model.modules.perturbation: default + - /attack/perturber/initializer@model.modules.perturbation.initializer: uniform + - /attack/perturber/projector@model.modules.perturbation.projector: range + - /attack/composer@model.modules.input_adv: warp_composite - /attack/gradient_modifier@model.gradient_modifier: lp_normalizer - override /optimization: super_convergence - override /datamodule: coco_yolov3 @@ -45,7 +45,7 @@ datamodule: model: modules: - perturber: + perturbation: size: [3, 416, 234] initializer: @@ -56,7 +56,7 @@ model: min: 0.0 max: 1.0 - composer: + input_adv: warp: _target_: torchvision.transforms.Compose transforms: @@ -86,22 +86,19 @@ model: gradient_modifier: null training_sequence: - seq005: perturber - seq006: - composer: - perturbation: "perturber" - input: "input" - target: "target" + seq005: perturbation + seq006: input_adv seq010: - yolov3: ["composer"] + yolov3: + x: "input_adv" seq030: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturber.total_variation + - perturbation.total_variation seq050: output: - total_variation: perturber.total_variation + total_variation: perturbation.total_variation training_step_log: - loss @@ -122,31 +119,25 @@ model: training_metrics: null validation_sequence: - seq005: perturber - seq006: - composer: - perturbation: "perturber" - input: "input" - target: "target" + seq005: perturbation + seq006: input_adv seq010: - yolov3: ["composer"] + yolov3: + x: "input_adv" seq030: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturber.total_variation + - perturbation.total_variation test_sequence: - seq005: perturber - seq006: - composer: - perturbation: "perturber" - input: "input" - target: "target" + seq005: perturbation + seq006: input_adv seq010: - yolov3: ["composer"] + yolov3: + x: "input_adv" seq030: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturber.total_variation + - perturbation.total_variation diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index 30dc3121..c2778335 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -27,7 +27,8 @@ training_metrics: null training_sequence: seq010: - yolov3: ["input"] + yolov3: + x: "input" seq020: losses: @@ -61,7 +62,8 @@ training_sequence: validation_sequence: seq010: - yolov3: ["input"] + yolov3: + x: "input" seq020: losses: @@ -97,7 +99,8 @@ validation_sequence: test_sequence: seq010: - yolov3: ["input"] + yolov3: + x: "input" seq020: losses: From 5aa8c47e994a0eb1446952b48cd72604367d5ff5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 23 May 2023 10:33:57 -0700 Subject: [PATCH 081/106] Fix callbacks --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index e1d2aefe..c59c760d 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -10,7 +10,14 @@ defaults: - override /datamodule: coco_yolov3 - override /model: yolov3 - override /metric: average_precision - - override /callbacks: [perturbation_visualizer, lr_monitor, freeze] + - override /callbacks: + [ + model_checkpoint, + lr_monitor, + perturbation_visualizer, + gradient_monitor, + freeze, + ] task_name: "COCO_YOLOv3_ShapeShifter" tags: ["adv"] @@ -26,12 +33,17 @@ trainer: precision: 32 callbacks: - perturbation_visualizer: - frequency: 500 + model_checkpoint: + monitor: "validation_metrics/map" + mode: "min" freeze: module: "yolov3" + perturbation_visualizer: + perturbation: "model.perturbation.perturbation" + frequency: 500 + datamodule: num_workers: 32 ims_per_batch: 16 From 628904836f345a0527048c77c53fea46033595ad Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 23 May 2023 10:42:19 -0700 Subject: [PATCH 082/106] remove total variation --- mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index c59c760d..8b18afd7 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -86,7 +86,7 @@ model: clamp: [0, 1] loss: - weights: [1, 1, 1e-5] + weights: [1, 1] load_state_dict: yolov3: ${paths.data_dir}/yolov3_original.pt @@ -107,10 +107,6 @@ model: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturbation.total_variation - seq050: - output: - total_variation: perturbation.total_variation training_step_log: - loss @@ -126,7 +122,6 @@ model: - target_count - score_count - target_score_count - - total_variation training_metrics: null @@ -140,7 +135,6 @@ model: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturbation.total_variation test_sequence: seq005: perturbation @@ -152,4 +146,3 @@ model: loss: - losses.hide_target_objects_loss - losses.correct_target_class_loss - - perturbation.total_variation From 382cd47f6677bad795a185f2c9cedaca5413ca2d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 2 Jun 2023 14:03:25 -0700 Subject: [PATCH 083/106] Add YOLOv3 dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 6440af7b..a79b7311 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ dependencies = [ # ----- object detection----- # "pycocotools ~= 2.0.5", + "yolov3 @ git+https://github.com/mzweilin/YOLOv3-in-PyTorch.git#release", # -------- Adversary ---------# "robustbench @ git+https://github.com/RobustBench/robustbench.git@9a590683b7daecf963244dea402529f0d728c727", From 99a76696d6fb069c96576550d2ebc9c51aa189f2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 09:45:01 -0700 Subject: [PATCH 084/106] Use attrgetter --- mart/callbacks/freeze.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mart/callbacks/freeze.py b/mart/callbacks/freeze.py index 894c684d..a57a284e 100644 --- a/mart/callbacks/freeze.py +++ b/mart/callbacks/freeze.py @@ -6,6 +6,8 @@ from __future__ import annotations +from operator import attrgetter + import torch from pytorch_lightning.callbacks import Callback from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -25,10 +27,9 @@ def __init__( self.name = module def setup(self, trainer, pl_module, stage): - # FIXME: Use DotDict? - module = getattr(pl_module.model, self.name, None) + module = attrgetter(self.name)(pl_module.model) - if module is None or not isinstance(module, torch.nn.Module): + if not isinstance(module, torch.nn.Module): raise MisconfigurationException( f"The LightningModule should have a nn.Module `{self.name}` attribute" ) @@ -43,10 +44,9 @@ def setup(self, trainer, pl_module, stage): logger.info(f"Setting eval mode for {name} ({module_kind})") def on_train_epoch_start(self, trainer, pl_module): - # FIXME: Use DotDict? - module = getattr(pl_module.model, self.name, None) + module = attrgetter(self.name)(pl_module.model) - if module is None or not isinstance(module, torch.nn.Module): + if not isinstance(module, torch.nn.Module): raise MisconfigurationException( f"The LightningModule should have a nn.Module `{self.name}` attribute" ) From 588068cd5e42b6b8822ac5b27024b0ab574ff77c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:04:00 -0700 Subject: [PATCH 085/106] Better implementation of ModelParamsNoGrad --- mart/callbacks/no_grad_mode.py | 47 ++++++++++++++++++++++++ mart/configs/callbacks/no_grad_mode.yaml | 2 + 2 files changed, 49 insertions(+) create mode 100644 mart/callbacks/no_grad_mode.py create mode 100644 mart/configs/callbacks/no_grad_mode.yaml diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py new file mode 100644 index 00000000..2fc0bc1b --- /dev/null +++ b/mart/callbacks/no_grad_mode.py @@ -0,0 +1,47 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from operator import attrgetter + +import torch +from pytorch_lightning.callbacks import Callback +from pytorch_lightning.utilities.exceptions import MisconfigurationException + +__all__ = ["ModelParamsNoGrad"] + + +class ModelParamsNoGrad(Callback): + """No gradient for model parameters during attack. + + This callback should not change the result. Don't use unless an attack runs faster. + """ + + def __init__(self, pl_module_attr: str = None): + self._attr = pl_module_attr + + def get_module(self, pl_module): + module = pl_module + if self._attr is not None: + module = attrgetter(self._attr)(module) + + if not isinstance(module, torch.nn.Module): + raise MisconfigurationException( + f"The LightningModule should have a nn.Module `{self._attr}` attribute" + ) + + return module + + def setup(self, trainer, pl_module, stage): + module = self.get_module(pl_module) + + for param in module.parameters(): + param.requires_grad_(False) + + def teardown(self, trainer, pl_module, stage): + module = self.get_module(pl_module) + + for param in module.parameters(): + param.requires_grad_(True) diff --git a/mart/configs/callbacks/no_grad_mode.yaml b/mart/configs/callbacks/no_grad_mode.yaml new file mode 100644 index 00000000..6b4312fd --- /dev/null +++ b/mart/configs/callbacks/no_grad_mode.yaml @@ -0,0 +1,2 @@ +attack_in_eval_mode: + _target_: mart.callbacks.ModelParamsNoGrad From 3832d22a6958cca91aa4a0455f499fcb2dcea3e4 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:33:43 -0700 Subject: [PATCH 086/106] Better implementation of AttackInEvalMode --- mart/callbacks/eval_mode.py | 41 +++++++++++++++++++ .../callbacks/attack_in_eval_mode.yaml | 2 + 2 files changed, 43 insertions(+) create mode 100644 mart/callbacks/eval_mode.py create mode 100644 mart/configs/callbacks/attack_in_eval_mode.yaml diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py new file mode 100644 index 00000000..17a1b99e --- /dev/null +++ b/mart/callbacks/eval_mode.py @@ -0,0 +1,41 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from pytorch_lightning.callbacks import Callback + +from mart import utils + +logger = utils.get_pylogger(__name__) + +__all__ = ["AttackInEvalMode"] + + +class AttackInEvalMode(Callback): + """Switch the model into eval mode during attack.""" + + def __init__(self, *module_kinds): + self.module_kinds = module_kinds + + def setup(self, trainer, pl_module, stage): + # This just logs to the console so the user can see visually see which modules will be in eval mode during training. + for name, module in pl_module.named_modules(): + module_kind = module.__class__.__name__ + if module_kind in self.module_kinds: + logger.info(f"Setting eval mode for {name} ({module_kind})") + + def on_train_epoch_start(self, trainer, pl_module): + # We must use on_train_epoch_start because PL will set pl_module to train mode right before this callback. + for name, module in pl_module.named_modules(): + module_kind = module.__class__.__name__ + if module_kind in self.module_kinds: + module.eval() + + def on_train_epoch_end(self, trainer, pl_module): + # FIXME: Why this is necessary? + for name, module in pl_module.named_modules(): + module_kind = module.__class__.__name__ + if module_kind in self.module_kinds: + module.train() diff --git a/mart/configs/callbacks/attack_in_eval_mode.yaml b/mart/configs/callbacks/attack_in_eval_mode.yaml new file mode 100644 index 00000000..2acdc953 --- /dev/null +++ b/mart/configs/callbacks/attack_in_eval_mode.yaml @@ -0,0 +1,2 @@ +attack_in_eval_mode: + _target_: mart.callbacks.AttackInEvalMode From a9348dfe582b04404a7ea28ce4ce7693edcaf3e6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:33:58 -0700 Subject: [PATCH 087/106] Log which params will have gradients disabled --- mart/callbacks/no_grad_mode.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index 2fc0bc1b..e9cf5091 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -10,6 +10,10 @@ from pytorch_lightning.callbacks import Callback from pytorch_lightning.utilities.exceptions import MisconfigurationException +from mart import utils + +logger = utils.get_pylogger(__name__) + __all__ = ["ModelParamsNoGrad"] @@ -37,7 +41,8 @@ def get_module(self, pl_module): def setup(self, trainer, pl_module, stage): module = self.get_module(pl_module) - for param in module.parameters(): + for name, param in module.named_parameters(): + logger.debug(f"Disabling gradient for {name}") param.requires_grad_(False) def teardown(self, trainer, pl_module, stage): From 9c955df0b08e574c25caf41e1335bc4849129848 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:35:32 -0700 Subject: [PATCH 088/106] Remove Freeze callback --- mart/callbacks/freeze.py | 57 ------------------------------ mart/configs/callbacks/freeze.yaml | 3 -- 2 files changed, 60 deletions(-) delete mode 100644 mart/callbacks/freeze.py delete mode 100644 mart/configs/callbacks/freeze.yaml diff --git a/mart/callbacks/freeze.py b/mart/callbacks/freeze.py deleted file mode 100644 index a57a284e..00000000 --- a/mart/callbacks/freeze.py +++ /dev/null @@ -1,57 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from __future__ import annotations - -from operator import attrgetter - -import torch -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException - -from mart import utils - -logger = utils.get_pylogger(__name__) - -__all__ = ["FreezeModule"] - - -class FreezeModule(Callback): - def __init__( - self, - module="backbone", - ): - self.name = module - - def setup(self, trainer, pl_module, stage): - module = attrgetter(self.name)(pl_module.model) - - if not isinstance(module, torch.nn.Module): - raise MisconfigurationException( - f"The LightningModule should have a nn.Module `{self.name}` attribute" - ) - - for name, param in module.named_parameters(): - logger.debug(f"Disabling gradient for {name}") - param.requires_grad_(False) - - for name, module in module.named_modules(): - module_kind = module.__class__.__name__ - if "BatchNorm" in module_kind: - logger.info(f"Setting eval mode for {name} ({module_kind})") - - def on_train_epoch_start(self, trainer, pl_module): - module = attrgetter(self.name)(pl_module.model) - - if not isinstance(module, torch.nn.Module): - raise MisconfigurationException( - f"The LightningModule should have a nn.Module `{self.name}` attribute" - ) - - for name, module in module.named_modules(): - module_kind = module.__class__.__name__ - if "BatchNorm" in module_kind or "Dropout" in module_kind: - module.eval() diff --git a/mart/configs/callbacks/freeze.yaml b/mart/configs/callbacks/freeze.yaml deleted file mode 100644 index 61678512..00000000 --- a/mart/configs/callbacks/freeze.yaml +++ /dev/null @@ -1,3 +0,0 @@ -freeze: - _target_: mart.callbacks.FreezeModule - module: ??? From d278aba09782ae707eed2e30af9d8f09d431e085 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:35:39 -0700 Subject: [PATCH 089/106] bugfix --- mart/callbacks/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 5a515a57..8e117180 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,4 +1,5 @@ -from .freeze import * +from .eval_mode import * from .gradients import * +from .no_grad_mode import * from .progress_bar import * from .visualizer import * From 55a6161d81cd893aaacae9a6a11af86ad0f46200 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:47:33 -0700 Subject: [PATCH 090/106] comments --- mart/callbacks/eval_mode.py | 2 +- mart/callbacks/no_grad_mode.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index 17a1b99e..dcf52ee5 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -34,7 +34,7 @@ def on_train_epoch_start(self, trainer, pl_module): module.eval() def on_train_epoch_end(self, trainer, pl_module): - # FIXME: Why this is necessary? + # FIXME: Why is this necessary? for name, module in pl_module.named_modules(): module_kind = module.__class__.__name__ if module_kind in self.module_kinds: diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index e9cf5091..198da739 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -39,6 +39,7 @@ def get_module(self, pl_module): return module def setup(self, trainer, pl_module, stage): + # We use setup, and not on_train_start, so that optimizers can ignore parameters with no gradients. module = self.get_module(pl_module) for name, param in module.named_parameters(): From 830e765f6497093b8644725774ec40455295faba Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:53:22 -0700 Subject: [PATCH 091/106] comments --- mart/callbacks/eval_mode.py | 2 +- mart/callbacks/no_grad_mode.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index dcf52ee5..23ba4fb1 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -20,7 +20,7 @@ def __init__(self, *module_kinds): self.module_kinds = module_kinds def setup(self, trainer, pl_module, stage): - # This just logs to the console so the user can see visually see which modules will be in eval mode during training. + # Log to the console so the user can see visually see which modules will be in eval mode during training. for name, module in pl_module.named_modules(): module_kind = module.__class__.__name__ if module_kind in self.module_kinds: diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index 198da739..a21c8b2e 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -39,7 +39,7 @@ def get_module(self, pl_module): return module def setup(self, trainer, pl_module, stage): - # We use setup, and not on_train_start, so that optimizers can ignore parameters with no gradients. + # We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients. module = self.get_module(pl_module) for name, param in module.named_parameters(): @@ -47,6 +47,7 @@ def setup(self, trainer, pl_module, stage): param.requires_grad_(False) def teardown(self, trainer, pl_module, stage): + # FIXME: Why is this necessary? module = self.get_module(pl_module) for param in module.parameters(): From be8ae5dd8bc6f6b52f37375180a398ac2cd25df0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 10:59:49 -0700 Subject: [PATCH 092/106] comments --- mart/callbacks/eval_mode.py | 1 + mart/callbacks/no_grad_mode.py | 1 + 2 files changed, 2 insertions(+) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index 23ba4fb1..3d6e8950 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -28,6 +28,7 @@ def setup(self, trainer, pl_module, stage): def on_train_epoch_start(self, trainer, pl_module): # We must use on_train_epoch_start because PL will set pl_module to train mode right before this callback. + # See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks for name, module in pl_module.named_modules(): module_kind = module.__class__.__name__ if module_kind in self.module_kinds: diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index a21c8b2e..e82ade7a 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -40,6 +40,7 @@ def get_module(self, pl_module): def setup(self, trainer, pl_module, stage): # We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients. + # See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks module = self.get_module(pl_module) for name, param in module.named_parameters(): From 04069b9948f023a1260964b46f86da96ed0afe14 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 11:48:48 -0700 Subject: [PATCH 093/106] Even better AttackInEvalMode --- mart/callbacks/eval_mode.py | 20 ++++++++++--------- .../callbacks/attack_in_eval_mode.yaml | 7 +++++++ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index 3d6e8950..c7907634 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + from pytorch_lightning.callbacks import Callback from mart import utils @@ -16,27 +18,27 @@ class AttackInEvalMode(Callback): """Switch the model into eval mode during attack.""" - def __init__(self, *module_kinds): - self.module_kinds = module_kinds + def __init__(self, module_classes: list[str]): + # FIXME: convert strings to classes using hydra.utils.get_class? This will clean up some verbosity in configuration but will require importing hydra in this callback. + self.module_classes = tuple(module_classes) def setup(self, trainer, pl_module, stage): # Log to the console so the user can see visually see which modules will be in eval mode during training. for name, module in pl_module.named_modules(): - module_kind = module.__class__.__name__ - if module_kind in self.module_kinds: - logger.info(f"Setting eval mode for {name} ({module_kind})") + if isinstance(module, self.module_classes): + logger.info( + f"Setting eval mode for {name} ({module.__class__.__module__}.{module.__class__.__name__})" + ) def on_train_epoch_start(self, trainer, pl_module): # We must use on_train_epoch_start because PL will set pl_module to train mode right before this callback. # See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks for name, module in pl_module.named_modules(): - module_kind = module.__class__.__name__ - if module_kind in self.module_kinds: + if isinstance(module, self.module_classes): module.eval() def on_train_epoch_end(self, trainer, pl_module): # FIXME: Why is this necessary? for name, module in pl_module.named_modules(): - module_kind = module.__class__.__name__ - if module_kind in self.module_kinds: + if isinstance(module, self.module_classes): module.train() diff --git a/mart/configs/callbacks/attack_in_eval_mode.yaml b/mart/configs/callbacks/attack_in_eval_mode.yaml index 2acdc953..98710997 100644 --- a/mart/configs/callbacks/attack_in_eval_mode.yaml +++ b/mart/configs/callbacks/attack_in_eval_mode.yaml @@ -1,2 +1,9 @@ attack_in_eval_mode: _target_: mart.callbacks.AttackInEvalMode + module_classes: ??? + # - _target_: hydra.utils.get_class + # path: mart.models.LitModular + # - _target_: hydra.utils.get_class + # path: torch.nn.BatchNorm2d + # - _target_: hydra.utils.get_class + # path: torch.nn.Dropout From 113d483eed5a999cbf35663a6c53d9d4d67031ea Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 12:33:32 -0700 Subject: [PATCH 094/106] Fix type --- mart/callbacks/eval_mode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index c7907634..6fce06ba 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -18,7 +18,7 @@ class AttackInEvalMode(Callback): """Switch the model into eval mode during attack.""" - def __init__(self, module_classes: list[str]): + def __init__(self, module_classes: list[type]): # FIXME: convert strings to classes using hydra.utils.get_class? This will clean up some verbosity in configuration but will require importing hydra in this callback. self.module_classes = tuple(module_classes) From 3dbdfd40024d6508314d7a6025986d59361bb001 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 12:34:06 -0700 Subject: [PATCH 095/106] Even better ModelParamsNoGrad --- mart/callbacks/no_grad_mode.py | 38 ++++++++---------------- mart/configs/callbacks/no_grad_mode.yaml | 3 +- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index e82ade7a..d4f8541d 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -4,11 +4,10 @@ # SPDX-License-Identifier: BSD-3-Clause # -from operator import attrgetter +from __future__ import annotations import torch from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException from mart import utils @@ -23,33 +22,22 @@ class ModelParamsNoGrad(Callback): This callback should not change the result. Don't use unless an attack runs faster. """ - def __init__(self, pl_module_attr: str = None): - self._attr = pl_module_attr + def __init__(self, module_names: str | list[str] = None): + if isinstance(module_names, str): + module_names = [module_names] - def get_module(self, pl_module): - module = pl_module - if self._attr is not None: - module = attrgetter(self._attr)(module) - - if not isinstance(module, torch.nn.Module): - raise MisconfigurationException( - f"The LightningModule should have a nn.Module `{self._attr}` attribute" - ) - - return module + self.module_names = module_names def setup(self, trainer, pl_module, stage): # We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients. # See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks - module = self.get_module(pl_module) - - for name, param in module.named_parameters(): - logger.debug(f"Disabling gradient for {name}") - param.requires_grad_(False) + for name, param in pl_module.named_parameters(): + if any(name.startswith(module_name) for module_name in self.module_names): + logger.info(f"Disabling gradient for {name}") + param.requires_grad_(False) def teardown(self, trainer, pl_module, stage): - # FIXME: Why is this necessary? - module = self.get_module(pl_module) - - for param in module.parameters(): - param.requires_grad_(True) + for name, param in pl_module.named_parameters(): + if any(name.startswith(module_name) for module_name in self.module_names): + # FIXME: Why is this necessary? + param.requires_grad_(True) diff --git a/mart/configs/callbacks/no_grad_mode.yaml b/mart/configs/callbacks/no_grad_mode.yaml index 6b4312fd..d12d18e9 100644 --- a/mart/configs/callbacks/no_grad_mode.yaml +++ b/mart/configs/callbacks/no_grad_mode.yaml @@ -1,2 +1,3 @@ -attack_in_eval_mode: +no_grad_mode: _target_: mart.callbacks.ModelParamsNoGrad + module_names: ??? From 48577adc1704e0d9d86b944fe47d5673b1c92d43 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 12:40:59 -0700 Subject: [PATCH 096/106] more lenient --- mart/callbacks/eval_mode.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index 6fce06ba..c180d276 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -18,8 +18,11 @@ class AttackInEvalMode(Callback): """Switch the model into eval mode during attack.""" - def __init__(self, module_classes: list[type]): + def __init__(self, module_classes: type | list[type]): # FIXME: convert strings to classes using hydra.utils.get_class? This will clean up some verbosity in configuration but will require importing hydra in this callback. + if isinstance(module_classes, type): + module_classes = [module_classes] + self.module_classes = tuple(module_classes) def setup(self, trainer, pl_module, stage): From cc0e057f8a2fd55ffafdbabb4f1225fcf74d18b3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 13:57:15 -0700 Subject: [PATCH 097/106] Manually merge general_visualizer --- mart/callbacks/visualizer.py | 47 ++++++++++--------- .../callbacks/perturbation_visualizer.yaml | 4 ++ 2 files changed, 28 insertions(+), 23 deletions(-) create mode 100644 mart/configs/callbacks/perturbation_visualizer.yaml diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 3354321e..8c34b879 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -7,35 +7,36 @@ import os from pytorch_lightning.callbacks import Callback -from torchvision.transforms import ToPILImage -__all__ = ["PerturbedImageVisualizer"] +import mart +__all__ = ["ImageVisualizer"] -class PerturbedImageVisualizer(Callback): - """Save adversarial images as files.""" - def __init__(self, folder): - super().__init__() +class ImageVisualizer(Callback): + def __init__(self, frequency: int = 100, **tag_paths): + self.frequency = frequency + self.tag_paths = tag_paths - # FIXME: This should use the Trainer's logging directory. - self.folder = folder - self.convert = ToPILImage() + def log_image(self, trainer, tag, image): + # Add image to each logger + for logger in trainer.loggers: + # FIXME: Should we just use isinstance(logger.experiment, SummaryWriter)? + if not hasattr(logger.experiment, "add_image"): + continue - if not os.path.isdir(self.folder): - os.makedirs(self.folder) + logger.experiment.add_image(tag, image, global_step=trainer.global_step) - def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): - # Save input and target for on_train_end - self.input = batch["input"] - self.target = batch["target"] + def log_images(self, trainer, pl_module): + for tag, path in self.tag_paths.items(): + image = mart.utils.get_object(pl_module, path) + self.log_image(trainer, tag, image) - def on_train_end(self, trainer, model): - # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): + if batch_idx % self.frequency != 0: + return - for img, tgt in zip(adv_input, self.target): - fname = tgt["file_name"] - fpath = os.path.join(self.folder, fname) - im = self.convert(img / 255) - im.save(fpath) + self.log_images(trainer, pl_module) + + def on_train_end(self, trainer, pl_module): + self.log_images(trainer, pl_module) diff --git a/mart/configs/callbacks/perturbation_visualizer.yaml b/mart/configs/callbacks/perturbation_visualizer.yaml new file mode 100644 index 00000000..5a673db5 --- /dev/null +++ b/mart/configs/callbacks/perturbation_visualizer.yaml @@ -0,0 +1,4 @@ +perturbation_visualizer: + _target_: mart.callbacks.ImageVisualizer + frequency: 100 + perturbation: ??? From 2e9545d1e0f506aee14a125602674f83ac992ee3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 13:58:22 -0700 Subject: [PATCH 098/106] Use attrgetter --- mart/callbacks/visualizer.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 8c34b879..a81a94b7 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -4,12 +4,10 @@ # SPDX-License-Identifier: BSD-3-Clause # -import os +from operator import attrgetter from pytorch_lightning.callbacks import Callback -import mart - __all__ = ["ImageVisualizer"] @@ -29,7 +27,7 @@ def log_image(self, trainer, tag, image): def log_images(self, trainer, pl_module): for tag, path in self.tag_paths.items(): - image = mart.utils.get_object(pl_module, path) + image = attrgetter(path)(pl_module) self.log_image(trainer, tag, image) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): From 10accf56d959f529d87acc303c50311b96c869c4 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 13:58:22 -0700 Subject: [PATCH 099/106] Use attrgetter --- mart/callbacks/visualizer.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 8c34b879..a81a94b7 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -4,12 +4,10 @@ # SPDX-License-Identifier: BSD-3-Clause # -import os +from operator import attrgetter from pytorch_lightning.callbacks import Callback -import mart - __all__ = ["ImageVisualizer"] @@ -29,7 +27,7 @@ def log_image(self, trainer, tag, image): def log_images(self, trainer, pl_module): for tag, path in self.tag_paths.items(): - image = mart.utils.get_object(pl_module, path) + image = attrgetter(path)(pl_module) self.log_image(trainer, tag, image) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): From d8fe8a08a83b752ad777d88a952ba24c18ddeaa5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 13:59:21 -0700 Subject: [PATCH 100/106] Restore image_visualizer config --- mart/configs/callbacks/image_visualizer.yaml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 mart/configs/callbacks/image_visualizer.yaml diff --git a/mart/configs/callbacks/image_visualizer.yaml b/mart/configs/callbacks/image_visualizer.yaml new file mode 100644 index 00000000..65b9f8dd --- /dev/null +++ b/mart/configs/callbacks/image_visualizer.yaml @@ -0,0 +1,3 @@ +image_visualizer: + _target_: mart.callbacks.PerturbedImageVisualizer + folder: ${paths.output_dir}/adversarial_examples From e348a8bb909cd36d007343e39e642f36b53e3be9 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 14:00:14 -0700 Subject: [PATCH 101/106] bugfix configs --- .../configs/experiment/COCO_YOLOv3_ShapeShifter.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 8b18afd7..9ec7b8d6 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -16,7 +16,8 @@ defaults: lr_monitor, perturbation_visualizer, gradient_monitor, - freeze, + attack_in_eval_mode, + no_grad_mode, ] task_name: "COCO_YOLOv3_ShapeShifter" @@ -37,8 +38,13 @@ callbacks: monitor: "validation_metrics/map" mode: "min" - freeze: - module: "yolov3" + attack_in_eval_mode: + module_classes: + - _target_: hydra.utils.get_class + path: torch.nn.BatchNorm2d + + no_grad_mode: + module_names: "model.yolov3" perturbation_visualizer: perturbation: "model.perturbation.perturbation" From 353c281c0f906d6354d811a2c7f3d14802d847c3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 9 Jun 2023 22:40:47 -0700 Subject: [PATCH 102/106] ImageVisualizer consumes outputs --- mart/callbacks/visualizer.py | 23 +++++++++---------- mart/configs/callbacks/image_visualizer.yaml | 4 ++-- .../callbacks/perturbation_visualizer.yaml | 4 ---- 3 files changed, 13 insertions(+), 18 deletions(-) delete mode 100644 mart/configs/callbacks/perturbation_visualizer.yaml diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index a81a94b7..40f914c9 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -12,9 +12,9 @@ class ImageVisualizer(Callback): - def __init__(self, frequency: int = 100, **tag_paths): + def __init__(self, frequency: int = 100, **tag_keys): self.frequency = frequency - self.tag_paths = tag_paths + self.tag_keys = tag_keys def log_image(self, trainer, tag, image): # Add image to each logger @@ -23,18 +23,17 @@ def log_image(self, trainer, tag, image): if not hasattr(logger.experiment, "add_image"): continue - logger.experiment.add_image(tag, image, global_step=trainer.global_step) - - def log_images(self, trainer, pl_module): - for tag, path in self.tag_paths.items(): - image = attrgetter(path)(pl_module) - self.log_image(trainer, tag, image) + if len(image.shape) == 4: + logger.experiment.add_images(tag, image, global_step=trainer.global_step) + elif len(image.shape) == 3: + logger.experiment.add_image(tag, image, global_step=trainer.global_step) + else: + raise ValueError(f"Unsupported image shape: {image.shape}") def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if batch_idx % self.frequency != 0: return - self.log_images(trainer, pl_module) - - def on_train_end(self, trainer, pl_module): - self.log_images(trainer, pl_module) + for tag, output_key in self.tag_keys.items(): + image = outputs[output_key] + self.log_image(trainer, tag, image) diff --git a/mart/configs/callbacks/image_visualizer.yaml b/mart/configs/callbacks/image_visualizer.yaml index 65b9f8dd..1477a433 100644 --- a/mart/configs/callbacks/image_visualizer.yaml +++ b/mart/configs/callbacks/image_visualizer.yaml @@ -1,3 +1,3 @@ image_visualizer: - _target_: mart.callbacks.PerturbedImageVisualizer - folder: ${paths.output_dir}/adversarial_examples + _target_: mart.callbacks.ImageVisualizer + frequency: 100 diff --git a/mart/configs/callbacks/perturbation_visualizer.yaml b/mart/configs/callbacks/perturbation_visualizer.yaml deleted file mode 100644 index 5a673db5..00000000 --- a/mart/configs/callbacks/perturbation_visualizer.yaml +++ /dev/null @@ -1,4 +0,0 @@ -perturbation_visualizer: - _target_: mart.callbacks.ImageVisualizer - frequency: 100 - perturbation: ??? From 3d04ef78cd2f707f7bd7ea9a01af2e561d5a789b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 12 Jun 2023 15:36:45 -0700 Subject: [PATCH 103/106] Update example modules to run in eval mode --- mart/configs/callbacks/attack_in_eval_mode.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/configs/callbacks/attack_in_eval_mode.yaml b/mart/configs/callbacks/attack_in_eval_mode.yaml index 98710997..4ca096b0 100644 --- a/mart/configs/callbacks/attack_in_eval_mode.yaml +++ b/mart/configs/callbacks/attack_in_eval_mode.yaml @@ -7,3 +7,5 @@ attack_in_eval_mode: # path: torch.nn.BatchNorm2d # - _target_: hydra.utils.get_class # path: torch.nn.Dropout + # - _target_: hydra.utils.get_class + # path: torch.nn.SyncBatchNorm From 77c2350787d484346517716f46adfd34a2c5e288 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 12 Jun 2023 15:46:24 -0700 Subject: [PATCH 104/106] Only log and run in fit stage --- mart/callbacks/eval_mode.py | 3 +++ mart/callbacks/no_grad_mode.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/mart/callbacks/eval_mode.py b/mart/callbacks/eval_mode.py index c180d276..639444c9 100644 --- a/mart/callbacks/eval_mode.py +++ b/mart/callbacks/eval_mode.py @@ -26,6 +26,9 @@ def __init__(self, module_classes: type | list[type]): self.module_classes = tuple(module_classes) def setup(self, trainer, pl_module, stage): + if stage != "fit": + return + # Log to the console so the user can see visually see which modules will be in eval mode during training. for name, module in pl_module.named_modules(): if isinstance(module, self.module_classes): diff --git a/mart/callbacks/no_grad_mode.py b/mart/callbacks/no_grad_mode.py index d4f8541d..4a86d985 100644 --- a/mart/callbacks/no_grad_mode.py +++ b/mart/callbacks/no_grad_mode.py @@ -29,6 +29,9 @@ def __init__(self, module_names: str | list[str] = None): self.module_names = module_names def setup(self, trainer, pl_module, stage): + if stage != "fit": + return + # We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients. # See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks for name, param in pl_module.named_parameters(): From 76c88f929d74bf3219d304b6cc61e5121ad2147e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 15 Jun 2023 16:35:42 -0700 Subject: [PATCH 105/106] Revert "ImageVisualizer consumes outputs" This reverts commit 353c281c0f906d6354d811a2c7f3d14802d847c3. --- mart/callbacks/visualizer.py | 23 ++++++++++--------- mart/configs/callbacks/image_visualizer.yaml | 4 ++-- .../callbacks/perturbation_visualizer.yaml | 4 ++++ 3 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 mart/configs/callbacks/perturbation_visualizer.yaml diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 40f914c9..a81a94b7 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -12,9 +12,9 @@ class ImageVisualizer(Callback): - def __init__(self, frequency: int = 100, **tag_keys): + def __init__(self, frequency: int = 100, **tag_paths): self.frequency = frequency - self.tag_keys = tag_keys + self.tag_paths = tag_paths def log_image(self, trainer, tag, image): # Add image to each logger @@ -23,17 +23,18 @@ def log_image(self, trainer, tag, image): if not hasattr(logger.experiment, "add_image"): continue - if len(image.shape) == 4: - logger.experiment.add_images(tag, image, global_step=trainer.global_step) - elif len(image.shape) == 3: - logger.experiment.add_image(tag, image, global_step=trainer.global_step) - else: - raise ValueError(f"Unsupported image shape: {image.shape}") + logger.experiment.add_image(tag, image, global_step=trainer.global_step) + + def log_images(self, trainer, pl_module): + for tag, path in self.tag_paths.items(): + image = attrgetter(path)(pl_module) + self.log_image(trainer, tag, image) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if batch_idx % self.frequency != 0: return - for tag, output_key in self.tag_keys.items(): - image = outputs[output_key] - self.log_image(trainer, tag, image) + self.log_images(trainer, pl_module) + + def on_train_end(self, trainer, pl_module): + self.log_images(trainer, pl_module) diff --git a/mart/configs/callbacks/image_visualizer.yaml b/mart/configs/callbacks/image_visualizer.yaml index 1477a433..65b9f8dd 100644 --- a/mart/configs/callbacks/image_visualizer.yaml +++ b/mart/configs/callbacks/image_visualizer.yaml @@ -1,3 +1,3 @@ image_visualizer: - _target_: mart.callbacks.ImageVisualizer - frequency: 100 + _target_: mart.callbacks.PerturbedImageVisualizer + folder: ${paths.output_dir}/adversarial_examples diff --git a/mart/configs/callbacks/perturbation_visualizer.yaml b/mart/configs/callbacks/perturbation_visualizer.yaml new file mode 100644 index 00000000..5a673db5 --- /dev/null +++ b/mart/configs/callbacks/perturbation_visualizer.yaml @@ -0,0 +1,4 @@ +perturbation_visualizer: + _target_: mart.callbacks.ImageVisualizer + frequency: 100 + perturbation: ??? From 1192acb4ff3f0e21adc596fedf6e29cb107f2b02 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 15 Jun 2023 16:54:24 -0700 Subject: [PATCH 106/106] Set weights in forward --- .../experiment/COCO_YOLOv3_ShapeShifter.yaml | 24 ++++++++++++++----- mart/configs/model/yolov3.yaml | 9 ++++--- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml index 9ec7b8d6..f9e80aa3 100644 --- a/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml +++ b/mart/configs/experiment/COCO_YOLOv3_ShapeShifter.yaml @@ -111,8 +111,12 @@ model: x: "input_adv" seq030: loss: - - losses.hide_target_objects_loss - - losses.correct_target_class_loss + _call_with_args_: + - losses.hide_target_objects_loss + - losses.correct_target_class_loss + weights: + - 10 + - 1 training_step_log: - loss @@ -139,8 +143,12 @@ model: x: "input_adv" seq030: loss: - - losses.hide_target_objects_loss - - losses.correct_target_class_loss + _call_with_args_: + - losses.hide_target_objects_loss + - losses.correct_target_class_loss + weights: + - 10 + - 1 test_sequence: seq005: perturbation @@ -150,5 +158,9 @@ model: x: "input_adv" seq030: loss: - - losses.hide_target_objects_loss - - losses.correct_target_class_loss + _call_with_args_: + - losses.hide_target_objects_loss + - losses.correct_target_class_loss + weights: + - 10 + - 1 diff --git a/mart/configs/model/yolov3.yaml b/mart/configs/model/yolov3.yaml index c2778335..aa814310 100644 --- a/mart/configs/model/yolov3.yaml +++ b/mart/configs/model/yolov3.yaml @@ -37,7 +37,8 @@ training_sequence: seq030: loss: - - losses.total_loss + _call_with_args_: + - losses.total_loss seq040: detections: @@ -72,7 +73,8 @@ validation_sequence: seq030: loss: - - losses.total_loss + _call_with_args_: + - losses.total_loss seq040: detections: @@ -109,7 +111,8 @@ test_sequence: seq030: loss: - - losses.total_loss + _call_with_args_: + - losses.total_loss seq040: detections: