From 9bb04cee8803d5f8479ae58197c21931c75b5613 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Wed, 14 Jan 2026 16:08:31 +0900 Subject: [PATCH 01/22] Update docker installation commit for perception_evaluation --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c5240c498..22d18f5ba 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,7 +61,7 @@ RUN python3 -m pip --no-cache-dir install \ RUN python3 -m pip install git+https://github.com/tier4/t4-devkit@v0.5.1 # Install autoware-perception-evaluation -RUN python3 -m pip install git+https://github.com/tier4/autoware_perception_evaluation@develop +RUN python3 -m pip install git+https://github.com/tier4/autoware_perception_evaluation@dd37a546352f953565033f1d4b8cb443df1232c59 # Need to dowgrade setuptools to 60.2.0 to fix setup RUN python3 -m pip --no-cache-dir install \ From 9de5ca0234c257231e12200f3ed8aa17e09d15ec Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 2 Feb 2026 14:24:42 +0900 Subject: [PATCH 02/22] Add them temp --- ...usion_camera_point_bev_tensorrt_dynamic.py | 94 ++++++++++++ projects/BEVFusion/deploy/__init__.py | 2 - projects/BEVFusion/deploy/containers.py | 36 +++++ projects/BEVFusion/deploy/torch2onnx.py | 139 ++++++++++++------ projects/BEVFusion/deploy/utils.py | 115 +++++++++++++++ projects/BEVFusion/deploy/voxel_detection.py | 126 +++++++++------- 6 files changed, 410 insertions(+), 102 deletions(-) create mode 100644 projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py create mode 100644 projects/BEVFusion/deploy/utils.py diff --git a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py new file mode 100644 index 000000000..e14f34597 --- /dev/null +++ b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py @@ -0,0 +1,94 @@ +# Deploy camera with lidar inputs + +codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end") + +custom_imports = dict( + imports=[ + "projects.BEVFusion.deploy", + "projects.BEVFusion.bevfusion", + "projects.SparseConvolution", + ], + allow_failed_imports=False, +) + +depth_bins = 129 +feature_dims = (60, 80) +# image_dims = (640, 576) + +backend_config = dict( + type="tensorrt", + common_config=dict(max_workspace_size=1 << 32), + model_inputs=[ + dict( + input_shapes=dict( + # TODO(TIERIV): Optimize. Now, using points will increase latency significantly + points=dict(min_shape=[5000, 4], opt_shape=[50000, 4], max_shape=[200000, 4]), + lidar2image=dict(min_shape=[1, 4, 4], opt_shape=[6, 4, 4], max_shape=[6, 4, 4]), + img_aug_matrix=dict(min_shape=[1, 4, 4], opt_shape=[6, 4, 4], max_shape=[6, 4, 4]), + geom_feats=dict( + min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1], 4], + opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2, 4], + max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1], 4], + ), + kept=dict( + min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]], + opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]], + max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]], + ), + ranks=dict( + min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]], + opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2], + max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]], + ), + indices=dict( + min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]], + opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2], + max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]], + ), + image_feats=dict( + min_shape=[0, 256, feature_dims[0], feature_dims[1]], + opt_shape=[6, 256, feature_dims[0], feature_dims[1]], + max_shape=[6, 256, feature_dims[0], feature_dims[1]], + ), + ) + ) + ], +) + +onnx_config = dict( + type="onnx", + export_params=True, + keep_initializers_as_inputs=False, + opset_version=17, + save_file="camera_point_bev_network.onnx", + input_names=["points", "lidar2image", "img_aug_matrix", "geom_feats", "kept", "ranks", "indices", "image_feats"], + output_names=["bbox_pred", "score", "label_pred"], + dynamic_axes={ + "points": { + 0: "num_points", + }, + "lidar2image": { + 0: "num_imgs", + }, + "img_aug_matrix": { + 0: "num_imgs", + }, + "geom_feats": { + 0: "num_kept", + }, + "kept": { + 0: "num_geom_feats", + }, + "ranks": { + 0: "num_kept", + }, + "indices": { + 0: "num_kept", + }, + "image_feats": { + 0: "num_imgs", + }, + }, + input_shape=None, + verbose=True, +) diff --git a/projects/BEVFusion/deploy/__init__.py b/projects/BEVFusion/deploy/__init__.py index 356b5149c..5eee97a02 100644 --- a/projects/BEVFusion/deploy/__init__.py +++ b/projects/BEVFusion/deploy/__init__.py @@ -1,7 +1,5 @@ -from . import base from .voxel_detection import VoxelDetection __all__ = [ - "base", "VoxelDetection", ] diff --git a/projects/BEVFusion/deploy/containers.py b/projects/BEVFusion/deploy/containers.py index ddc0b7937..046934ee8 100644 --- a/projects/BEVFusion/deploy/containers.py +++ b/projects/BEVFusion/deploy/containers.py @@ -87,3 +87,39 @@ def forward( ) return bbox_pred, score, outputs["query_labels"][0] + + +class TrtBevFusionCameraOnlyContainer(TrtBevFusionMainContainer): + def __init__(self, mod, *args, **kwargs) -> None: + super().__init__(mod=mod, *args, **kwargs) + + def forward( + self, + lidar2img, + img_aug_matrix, + geom_feats, + kept, + ranks, + indices, + image_feats, + points=None, + ): + mod = self.mod + lidar_aug_matrix = torch.eye(4).unsqueeze(0).to(image_feats.device) + batch_inputs_dict = { + "imgs": image_feats.unsqueeze(0), + "lidar2img": lidar2img.unsqueeze(0), + "cam2img": None, + "cam2lidar": None, + "img_aug_matrix": img_aug_matrix.unsqueeze(0), + "img_aug_matrix_inverse": None, + "lidar_aug_matrix": lidar_aug_matrix, + "lidar_aug_matrix_inverse": lidar_aug_matrix, + "geom_feats": (geom_feats, kept, ranks, indices), + "points": [points] if points is not None else None, + } + + outputs = mod._forward(batch_inputs_dict, using_image_features=True) + bbox_pred, score, label_pred = self.postprocessing(outputs) + + return bbox_pred, score, label_pred diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index 7e13434ea..3740e758b 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -4,13 +4,12 @@ import os import os.path as osp from copy import deepcopy -from functools import partial from typing import Any import numpy as np import onnx import torch -from containers import TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer +from containers import TrtBevFusionCameraOnlyContainer, TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer from mmdeploy.apis import build_task_processor from mmdeploy.apis.onnx.passes import optimize_onnx from mmdeploy.core import RewriterContext, patch_model @@ -26,7 +25,6 @@ ) from mmdet3d.registry import MODELS from mmengine.registry import RUNNERS -from mmengine.runner import load_checkpoint from torch.multiprocessing import set_start_method @@ -44,7 +42,7 @@ def parse_args(): help="module to export", required=True, default="main_body", - choices=["main_body", "image_backbone"], + choices=["main_body", "image_backbone", "camera_bev_only_network"], ) args = parser.parse_args() return args @@ -62,21 +60,44 @@ def parse_args(): checkpoint_path = args.checkpoint device = args.device work_dir = args.work_dir + os.makedirs(work_dir, exist_ok=True) deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) - model_cfg.randomness = dict(seed=0, diff_rank_seed=False, deterministic=False) model_cfg.launcher = "none" + onnx_cfg = get_onnx_config(deploy_cfg) + input_names = onnx_cfg["input_names"] + output_names = onnx_cfg["output_names"] + + extract_pts_inputs = True if "points" in input_names or "voxels" in input_names else False data_preprocessor_cfg = deepcopy(model_cfg.model.data_preprocessor) - voxelize_cfg = data_preprocessor_cfg.pop("voxelize_cfg") - voxelize_cfg.pop("voxelize_reduce") - data_preprocessor_cfg["voxel_layer"] = voxelize_cfg - data_preprocessor_cfg.voxel = True + # TODO(KokSeang): Move out from data_preprocessor + voxelize_cfg = deepcopy(model_cfg.get("voxelize_cfg", None)) + + if extract_pts_inputs and voxelize_cfg is None: + # TODO(KokSeang): Remove this + # Default voxelize_layer + voxelize_cfg = dict( + max_num_points=10, + voxel_size=[0.17, 0.17, 0.2], + point_cloud_range=[-122.4, -122.4, -3.0, 122.4, 122.4, 5.0], + max_voxels=[120000, 160000], + deterministic=True, + ) + + if voxelize_cfg is not None: + voxelize_cfg.pop("voxelize_reduce", None) + data_preprocessor_cfg["voxel_layer"] = voxelize_cfg + data_preprocessor_cfg.voxel = True data_preprocessor = MODELS.build(data_preprocessor_cfg) + # load a sample + if "work_dir" not in model_cfg: + model_cfg["work_dir"] = work_dir + # load a sample runner = RUNNERS.build(model_cfg) runner.load_or_resume() @@ -152,6 +173,7 @@ def _add_or_update(cfg: dict, key: str, val: Any): verbose=verbose, keep_initializers_as_inputs=keep_initializers_as_inputs, ) + _add_or_update(deploy_cfg, "ir_config", ir_config) ir = IR.get(get_ir_config(deploy_cfg)["type"]) if isinstance(backend, Backend): @@ -173,6 +195,7 @@ def _add_or_update(cfg: dict, key: str, val: Any): if "onnx_custom_passes" not in context_info: onnx_custom_passes = optimize_onnx if optimize else None context_info["onnx_custom_passes"] = onnx_custom_passes + with RewriterContext(**context_info), torch.no_grad(): image_feats = None @@ -184,7 +207,7 @@ def _add_or_update(cfg: dict, key: str, val: Any): model_inputs = (imgs.to(device=device, dtype=torch.uint8),) if args.module == "image_backbone": - return_value = torch.onnx.export( + torch.onnx.export( image_backbone_container, model_inputs, output_path, @@ -196,10 +219,27 @@ def _add_or_update(cfg: dict, key: str, val: Any): keep_initializers_as_inputs=keep_initializers_as_inputs, verbose=verbose, ) - else: - image_feats = image_backbone_container(*model_inputs) + logger.info(f"Image backbone exported to {output_path}") + exit() + + image_feats = image_backbone_container(*model_inputs) + logger.info(f"Converted Image backbone") - if args.module == "main_body": + if args.module == "camera_bev_only_network": + main_container = TrtBevFusionCameraOnlyContainer(patched_model) + model_inputs = ( + lidar2img.to(device).float(), + img_aug_matrix.to(device).float(), + geom_feats.to(device).float(), + kept.to(device), + ranks.to(device).long(), + indices.to(device).long(), + image_feats, + ) + if "points" in input_names: + model_inputs += (points.to(device).float(),) + + elif args.module == "main_body": main_container = TrtBevFusionMainContainer(patched_model) model_inputs = ( voxels.to(device), @@ -217,40 +257,41 @@ def _add_or_update(cfg: dict, key: str, val: Any): indices.to(device).long(), image_feats, ) - torch.onnx.export( - main_container, - model_inputs, - output_path.replace(".onnx", "_temp_to_be_fixed.onnx"), - export_params=True, - input_names=input_names, - output_names=output_names, - opset_version=opset_version, - dynamic_axes=dynamic_axes, - keep_initializers_as_inputs=keep_initializers_as_inputs, - verbose=verbose, - ) - - logger.info("Attempting to fix the graph (TopK's K becoming a tensor)") - - import onnx_graphsurgeon as gs - - model = onnx.load(output_path.replace(".onnx", "_temp_to_be_fixed.onnx")) - graph = gs.import_onnx(model) - - # Fix TopK - topk_nodes = [node for node in graph.nodes if node.op == "TopK"] - assert len(topk_nodes) == 1 - topk = topk_nodes[0] - k = model_cfg.num_proposals - topk.inputs[1] = gs.Constant("K", values=np.array([k], dtype=np.int64)) - topk.outputs[0].shape = [1, k] - topk.outputs[0].dtype = topk.inputs[0].dtype if topk.inputs[0].dtype else np.float32 - topk.outputs[1].shape = [1, k] - topk.outputs[1].dtype = np.int64 - - graph.cleanup().toposort() - onnx.save_model(gs.export_onnx(graph), output_path) - - logger.info(f"(Fixed) ONNX exported to {output_path}") - logger.info(f"ONNX exported to {output_path}") + torch.onnx.export( + main_container, + model_inputs, + output_path.replace(".onnx", "_temp_to_be_fixed.onnx"), + export_params=True, + input_names=input_names, + output_names=output_names, + opset_version=opset_version, + dynamic_axes=dynamic_axes, + keep_initializers_as_inputs=keep_initializers_as_inputs, + verbose=verbose, + ) + + logger.info("Attempting to fix the graph (TopK's K becoming a tensor)") + + import onnx_graphsurgeon as gs + + model = onnx.load(output_path.replace(".onnx", "_temp_to_be_fixed.onnx")) + graph = gs.import_onnx(model) + + # Fix TopK + topk_nodes = [node for node in graph.nodes if node.op == "TopK"] + assert len(topk_nodes) == 1 + topk = topk_nodes[0] + k = model_cfg.num_proposals + topk.inputs[1] = gs.Constant("K", values=np.array([k], dtype=np.int64)) + topk.outputs[0].shape = [1, k] + topk.outputs[0].dtype = topk.inputs[0].dtype if topk.inputs[0].dtype else np.float32 + topk.outputs[1].shape = [1, k] + topk.outputs[1].dtype = np.int64 + + graph.cleanup().toposort() + onnx.save_model(gs.export_onnx(graph), output_path) + + logger.info(f"(Fixed) ONNX exported to {output_path}") + + logger.info(f"ONNX exported to {output_path}") diff --git a/projects/BEVFusion/deploy/utils.py b/projects/BEVFusion/deploy/utils.py new file mode 100644 index 000000000..807212204 --- /dev/null +++ b/projects/BEVFusion/deploy/utils.py @@ -0,0 +1,115 @@ +import os +from copy import deepcopy +from dataclasses import dataclass + +from mmdeploy.utils import ( + IR, + Backend, + get_backend, + get_dynamic_axes, + get_ir_config, + get_onnx_config, + get_root_logger, + load_config, +) +from mmdet3d.registry import MODELS +from mmengine.registry import RUNNERS + + +@dataclass(frozen=True) +class SetupConfigs: + deploy_cfg_path: str + model_cfg: dict + checkpoint_path: str + device: str + work_dir: str + sample_idx: int + data_preprocessor_cfg: dict + + +def setup_configs(deploy_cfg_path, model_cfg_path, checkpoint_path, device, work_dir, sample_idx, module): + + os.makedirs(work_dir, exist_ok=True) + + deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) + model_cfg.randomness = dict(seed=0, diff_rank_seed=False, deterministic=False) + model_cfg.launcher = "none" + + onnx_cfg = get_onnx_config(deploy_cfg) + input_names = onnx_cfg["input_names"] + output_names = onnx_cfg["output_names"] + + extract_pts_inputs = True if "points" in input_names or "voxels" in input_names else False + data_preprocessor_cfg = deepcopy(model_cfg.model.data_preprocessor) + + # TODO(KokSeang): Move out from data_preprocessor + voxelize_cfg = deepcopy(model_cfg.get("voxelize_cfg", None)) + + if extract_pts_inputs and voxelize_cfg is None: + # TODO(KokSeang): Remove this + # Default voxelize_layer + voxelize_cfg = dict( + max_num_points=10, + voxel_size=[0.17, 0.17, 0.2], + point_cloud_range=[-122.4, -122.4, -3.0, 122.4, 122.4, 5.0], + max_voxels=[120000, 160000], + deterministic=True, + ) + + if voxelize_cfg is not None: + voxelize_cfg.pop("voxelize_reduce", None) + data_preprocessor_cfg["voxel_layer"] = voxelize_cfg + data_preprocessor_cfg.voxel = True + + # load a sample + if "work_dir" not in model_cfg: + model_cfg["work_dir"] = work_dir + + return SetupConfigs( + deploy_cfg_path=deploy_cfg_path, + model_cfg_path=model_cfg_path, + checkpoint_path=checkpoint_path, + device=device, + work_dir=work_dir, + data_preprocessor_cfg=data_preprocessor_cfg, + ) + + + +def build_model(model_cfg, checkpoint_path, device): + + data_preprocessor = MODELS.build(data_preprocessor_cfg) + + + # load a sample + runner = RUNNERS.build(model_cfg) + runner.load_or_resume() + + data = runner.test_dataloader.dataset[args.sample_idx] + + # create model an inputs + task_processor = build_task_processor(model_cfg, deploy_cfg, device) + + torch_model = task_processor.build_pytorch_model(checkpoint_path) + data, model_inputs = task_processor.create_input(data, data_preprocessor=data_preprocessor, model=torch_model) + + if isinstance(model_inputs, list) and len(model_inputs) == 1: + model_inputs = model_inputs[0] + data_samples = data["data_samples"] + input_metas = {"data_samples": data_samples, "mode": "predict", "data_preprocessor": data_preprocessor} + + ( + voxels, + coors, + num_points_per_voxel, + points, + camera_mask, + imgs, + lidar2img, + cam2image, + camera2lidar, + geom_feats, + kept, + ranks, + indices, + ) = model_inputs diff --git a/projects/BEVFusion/deploy/voxel_detection.py b/projects/BEVFusion/deploy/voxel_detection.py index edb850af1..3f2df11d7 100644 --- a/projects/BEVFusion/deploy/voxel_detection.py +++ b/projects/BEVFusion/deploy/voxel_detection.py @@ -26,44 +26,14 @@ class VoxelDetection(_VoxelDetection): def __init__(self, model_cfg: mmengine.Config, deploy_cfg: mmengine.Config, device: str): super().__init__(model_cfg, deploy_cfg, device) - def create_input( - self, - batch: Union[str, Sequence[str]], - data_preprocessor: Optional[BaseDataPreprocessor] = None, - model: Optional[torch.nn.Module] = None, - ) -> Tuple[Dict, torch.Tensor]: + def extract_pts_inputs(self, collate_data): + """ """ - data = [batch] - collate_data = pseudo_collate(data) - data[0]["inputs"]["points"] = data[0]["inputs"]["points"].to(self.device) - - """ cam2img = data[0]["data_samples"].cam2img - cam2lidar = data[0]["data_samples"].cam2lidar - lidar2image = data[0]["data_samples"].lidar2img - lidar2camera = data[0]["data_samples"].lidar2cam - img_aux_matrix = data[0]["data_samples"].img_aug_matrix - - import pickle - d = {} - d["cam2img"] = cam2img - d["cam2lidar"] = cam2lidar - d["lidar2image"] = lidar2image - d["lidar2camera"] = lidar2camera - d["img_aux_matrix"] = img_aux_matrix - d["points"] = data[0]['inputs']['points'].cpu().numpy() - - with open("example.pkl", "wb") as f: - pickle.dump(d, f) """ - - assert data_preprocessor is not None - collate_data = data_preprocessor(collate_data, False) points = collate_data["inputs"]["points"][0] voxels = collate_data["inputs"]["voxels"] - inputs = [voxels["voxels"], voxels["num_points"], voxels["coors"]] feats = voxels["voxels"] num_points_per_voxel = voxels["num_points"] - # NOTE(knzo25): preprocessing in BEVFusion and the # data_preprocessor work different. # The original code/model uses [batch, x, y, z] @@ -74,17 +44,18 @@ def create_input( coors = voxels["coors"] coors = coors[:, 1:] - if "img_backbone" not in self.model_cfg.model: - return collate_data, [feats, coors, num_points_per_voxel] + [None] * 10 + return feats, coors, num_points_per_voxel, points + def extract_img_inputs(self, batch, collate_data, model): + """ """ # NOTE(knzo25): we want to load images from the camera # directly to the model in TensorRT img = batch["inputs"]["img"].type(torch.uint8) data_samples = collate_data["data_samples"][0] - lidar2image = feats.new_tensor(data_samples.lidar2img) - cam2image = feats.new_tensor(data_samples.cam2img) - camera2lidar = feats.new_tensor(data_samples.cam2lidar) + lidar2image = torch.tensor(data_samples.lidar2img).type(torch.float32) + cam2image = lidar2image.new_tensor(data_samples.cam2img) + camera2lidar = lidar2image.new_tensor(data_samples.cam2lidar) # NOTE(knzo25): ONNX/TensorRT do not support matrix inversion, # so they are taken out of the graph @@ -93,7 +64,7 @@ def create_input( # The extrinsics-related variables should only be computed once, # so we bring them outside the graph. Additionally, they require # argsort over the threshold available in TensorRT - img_aux_matrix = feats.new_tensor(np.stack(collate_data["data_samples"][0].img_aug_matrix)) + img_aux_matrix = lidar2image.new_tensor(np.stack(collate_data["data_samples"][0].img_aug_matrix)) img_aux_matrix_inverse = torch.inverse(img_aux_matrix) geom = model.view_transform.get_geometry( camera2lidar[..., :3, :3].unsqueeze(0).to(torch.device("cuda")), @@ -104,25 +75,78 @@ def create_input( ) geom_feats, kept, ranks, indices = model.view_transform.bev_pool_aux(geom) + camera_mask = torch.ones((img.size(0)), device=img.device) + return ( + camera_mask, + img, + lidar2image, + # NOTE(knzo25): not used during export + # but needed to comply with the signature + cam2image, + # NOTE(knzo25): not used during export + # but needed to comply with the signature + camera2lidar, + geom_feats.int(), + kept.bool(), # TensorRT treats bool as uint8 + ranks, + indices, + ) - # TODO(knzo25): just a test. remove - """ import pickle - data = {} - data["geom"] = geom.cpu() - data["geom_feats"] = geom_feats.cpu() - data["kept"] = kept.cpu() - data["ranks"] = ranks.cpu() - data["indices"] = indices.cpu() + def create_input( + self, + batch: Union[str, Sequence[str]], + data_preprocessor: Optional[BaseDataPreprocessor] = None, + model: Optional[torch.nn.Module] = None, + extract_pts_inputs: bool = True, + ) -> Tuple[Dict, torch.Tensor]: - with open("precomputed_features.pkl", "wb") as f: - pickle.dump(data, f) """ + data = [batch] + collate_data = pseudo_collate(data) + + """ cam2img = data[0]["data_samples"].cam2img + cam2lidar = data[0]["data_samples"].cam2lidar + lidar2image = data[0]["data_samples"].lidar2img + lidar2camera = data[0]["data_samples"].lidar2cam + img_aux_matrix = data[0]["data_samples"].img_aug_matrix + + import pickle + d = {} + d["cam2img"] = cam2img + d["cam2lidar"] = cam2lidar + d["lidar2image"] = lidar2image + d["lidar2camera"] = lidar2camera + d["img_aux_matrix"] = img_aux_matrix + d["points"] = data[0]['inputs']['points'].cpu().numpy() + + with open("example.pkl", "wb") as f: + pickle.dump(d, f) """ + + assert data_preprocessor is not None + collate_data = data_preprocessor(collate_data, False) + + if extract_pts_inputs: + data[0]["inputs"]["points"] = data[0]["inputs"]["points"].to(self.device) + feats, coors, num_points_per_voxel, points = self.extract_pts_inputs(collate_data=collate_data) + else: + feats = None + coors = None + num_points_per_voxel = None + points = None + + if "img_backbone" not in self.model_cfg.model: + assert feats is not None, f"lidar feats shouldn't be None!" + return collate_data, [feats, coors, num_points_per_voxel] + [None] * 10 + + camera_mask, img, lidar2image, cam2image, camera2lidar, geom_feats, kept, ranks, indices = ( + self.extract_img_inputs(batch=batch, model=model, collate_data=collate_data) + ) inputs = [ feats, coors, num_points_per_voxel, points, - torch.ones((img.size(0)), device=img.device), + camera_mask, img, lidar2image, # NOTE(knzo25): not used during export @@ -131,8 +155,8 @@ def create_input( # NOTE(knzo25): not used during export # but needed to comply with the signature camera2lidar, - geom_feats.int(), - kept.bool(), # TensorRT treats bool as uint8 + geom_feats, + kept, # TensorRT treats bool as uint8 ranks, indices, ] From e16ddeec77d1fd2ee02748f6ace232795809e575 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 17:06:39 +0900 Subject: [PATCH 03/22] Fix broken bevfusion exporter script --- projects/BEVFusion/deploy/torch2onnx.py | 278 ++---------------------- projects/BEVFusion/deploy/utils.py | 103 +++------ 2 files changed, 46 insertions(+), 335 deletions(-) diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index 3740e758b..9ec01ce1f 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -2,30 +2,10 @@ import argparse import logging import os -import os.path as osp -from copy import deepcopy -from typing import Any -import numpy as np -import onnx -import torch -from containers import TrtBevFusionCameraOnlyContainer, TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer -from mmdeploy.apis import build_task_processor -from mmdeploy.apis.onnx.passes import optimize_onnx -from mmdeploy.core import RewriterContext, patch_model -from mmdeploy.utils import ( - IR, - Backend, - get_backend, - get_dynamic_axes, - get_ir_config, - get_onnx_config, - get_root_logger, - load_config, -) -from mmdet3d.registry import MODELS -from mmengine.registry import RUNNERS +from exporter import Torch2OnnxExporter from torch.multiprocessing import set_start_method +from utils import setup_configs def parse_args(): @@ -51,247 +31,17 @@ def parse_args(): if __name__ == "__main__": args = parse_args() set_start_method("spawn", force=True) - logger = get_root_logger() - log_level = logging.getLevelName(args.log_level) - logger.setLevel(log_level) - - deploy_cfg_path = args.deploy_cfg - model_cfg_path = args.model_cfg - checkpoint_path = args.checkpoint - device = args.device - work_dir = args.work_dir - os.makedirs(work_dir, exist_ok=True) - - deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) - model_cfg.randomness = dict(seed=0, diff_rank_seed=False, deterministic=False) - model_cfg.launcher = "none" - - onnx_cfg = get_onnx_config(deploy_cfg) - input_names = onnx_cfg["input_names"] - output_names = onnx_cfg["output_names"] - - extract_pts_inputs = True if "points" in input_names or "voxels" in input_names else False - data_preprocessor_cfg = deepcopy(model_cfg.model.data_preprocessor) - - # TODO(KokSeang): Move out from data_preprocessor - voxelize_cfg = deepcopy(model_cfg.get("voxelize_cfg", None)) - - if extract_pts_inputs and voxelize_cfg is None: - # TODO(KokSeang): Remove this - # Default voxelize_layer - voxelize_cfg = dict( - max_num_points=10, - voxel_size=[0.17, 0.17, 0.2], - point_cloud_range=[-122.4, -122.4, -3.0, 122.4, 122.4, 5.0], - max_voxels=[120000, 160000], - deterministic=True, - ) - - if voxelize_cfg is not None: - voxelize_cfg.pop("voxelize_reduce", None) - data_preprocessor_cfg["voxel_layer"] = voxelize_cfg - data_preprocessor_cfg.voxel = True - - data_preprocessor = MODELS.build(data_preprocessor_cfg) - - # load a sample - if "work_dir" not in model_cfg: - model_cfg["work_dir"] = work_dir - - # load a sample - runner = RUNNERS.build(model_cfg) - runner.load_or_resume() - - data = runner.test_dataloader.dataset[args.sample_idx] - - # create model an inputs - task_processor = build_task_processor(model_cfg, deploy_cfg, device) - - torch_model = task_processor.build_pytorch_model(checkpoint_path) - data, model_inputs = task_processor.create_input(data, data_preprocessor=data_preprocessor, model=torch_model) - - if isinstance(model_inputs, list) and len(model_inputs) == 1: - model_inputs = model_inputs[0] - data_samples = data["data_samples"] - input_metas = {"data_samples": data_samples, "mode": "predict", "data_preprocessor": data_preprocessor} - - ( - voxels, - coors, - num_points_per_voxel, - points, - camera_mask, - imgs, - lidar2img, - cam2image, - camera2lidar, - geom_feats, - kept, - ranks, - indices, - ) = model_inputs - - # export to onnx - context_info = dict() - context_info["deploy_cfg"] = deploy_cfg - output_prefix = osp.join(work_dir, osp.splitext(osp.basename(deploy_cfg.onnx_config.save_file))[0]) - os.makedirs(work_dir, exist_ok=True) - backend = get_backend(deploy_cfg).value - - onnx_cfg = get_onnx_config(deploy_cfg) - opset_version = onnx_cfg.get("opset_version", 11) - - input_names = onnx_cfg["input_names"] - output_names = onnx_cfg["output_names"] - axis_names = input_names + output_names - dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names) - verbose = not onnx_cfg.get("strip_doc_string", True) or onnx_cfg.get("verbose", False) - keep_initializers_as_inputs = onnx_cfg.get("keep_initializers_as_inputs", True) - optimize = onnx_cfg.get("optimize", False) - if backend == Backend.NCNN.value: - """NCNN backend needs a precise blob counts, while using onnx optimizer - will merge duplicate initilizers without reference count.""" - optimize = False - - output_path = output_prefix + ".onnx" - - logger = get_root_logger() - logger.info(f"Export PyTorch model to ONNX: {output_path}.") - - def _add_or_update(cfg: dict, key: str, val: Any): - if key in cfg and isinstance(cfg[key], dict) and isinstance(val, dict): - cfg[key].update(val) - else: - cfg[key] = val - - ir_config = dict( - type="onnx", - input_names=input_names, - output_names=output_names, - opset_version=opset_version, - dynamic_axes=dynamic_axes, - verbose=verbose, - keep_initializers_as_inputs=keep_initializers_as_inputs, + setup_config = setup_configs( + args.deploy_cfg_path, + args.model_cfg_path, + args.checkpoint, + args.device, + args.work_dir, + args.sample_idx, + args.module, ) + # Build the exporter + exporter = Torch2OnnxExporter(setup_config, args.log_level) - _add_or_update(deploy_cfg, "ir_config", ir_config) - ir = IR.get(get_ir_config(deploy_cfg)["type"]) - if isinstance(backend, Backend): - backend = backend.value - backend_config = dict(type=backend) - _add_or_update(deploy_cfg, "backend_config", backend_config) - - context_info["cfg"] = deploy_cfg - context_info["ir"] = ir - if "backend" not in context_info: - context_info["backend"] = backend - if "opset" not in context_info: - context_info["opset"] = opset_version - - # patch model - patched_model = patch_model(torch_model, cfg=deploy_cfg, backend=backend, ir=ir) - patched_model.eval() - patched_model.to(device) - if "onnx_custom_passes" not in context_info: - onnx_custom_passes = optimize_onnx if optimize else None - context_info["onnx_custom_passes"] = onnx_custom_passes - - with RewriterContext(**context_info), torch.no_grad(): - image_feats = None - - if "img_backbone" in model_cfg.model: - img_aug_matrix = imgs.new_tensor(np.stack(data_samples[0].img_aug_matrix)) - images_mean = data_preprocessor.mean.to(device) - images_std = data_preprocessor.std.to(device) - image_backbone_container = TrtBevFusionImageBackboneContainer(patched_model, images_mean, images_std) - model_inputs = (imgs.to(device=device, dtype=torch.uint8),) - - if args.module == "image_backbone": - torch.onnx.export( - image_backbone_container, - model_inputs, - output_path, - export_params=True, - input_names=input_names, - output_names=output_names, - opset_version=opset_version, - dynamic_axes=dynamic_axes, - keep_initializers_as_inputs=keep_initializers_as_inputs, - verbose=verbose, - ) - logger.info(f"Image backbone exported to {output_path}") - exit() - - image_feats = image_backbone_container(*model_inputs) - logger.info(f"Converted Image backbone") - - if args.module == "camera_bev_only_network": - main_container = TrtBevFusionCameraOnlyContainer(patched_model) - model_inputs = ( - lidar2img.to(device).float(), - img_aug_matrix.to(device).float(), - geom_feats.to(device).float(), - kept.to(device), - ranks.to(device).long(), - indices.to(device).long(), - image_feats, - ) - if "points" in input_names: - model_inputs += (points.to(device).float(),) - - elif args.module == "main_body": - main_container = TrtBevFusionMainContainer(patched_model) - model_inputs = ( - voxels.to(device), - coors.to(device), - num_points_per_voxel.to(device), - ) - if image_feats is not None: - model_inputs += ( - points.to(device).float(), - lidar2img.to(device).float(), - img_aug_matrix.to(device).float(), - geom_feats.to(device).float(), - kept.to(device), - ranks.to(device).long(), - indices.to(device).long(), - image_feats, - ) - - torch.onnx.export( - main_container, - model_inputs, - output_path.replace(".onnx", "_temp_to_be_fixed.onnx"), - export_params=True, - input_names=input_names, - output_names=output_names, - opset_version=opset_version, - dynamic_axes=dynamic_axes, - keep_initializers_as_inputs=keep_initializers_as_inputs, - verbose=verbose, - ) - - logger.info("Attempting to fix the graph (TopK's K becoming a tensor)") - - import onnx_graphsurgeon as gs - - model = onnx.load(output_path.replace(".onnx", "_temp_to_be_fixed.onnx")) - graph = gs.import_onnx(model) - - # Fix TopK - topk_nodes = [node for node in graph.nodes if node.op == "TopK"] - assert len(topk_nodes) == 1 - topk = topk_nodes[0] - k = model_cfg.num_proposals - topk.inputs[1] = gs.Constant("K", values=np.array([k], dtype=np.int64)) - topk.outputs[0].shape = [1, k] - topk.outputs[0].dtype = topk.inputs[0].dtype if topk.inputs[0].dtype else np.float32 - topk.outputs[1].shape = [1, k] - topk.outputs[1].dtype = np.int64 - - graph.cleanup().toposort() - onnx.save_model(gs.export_onnx(graph), output_path) - - logger.info(f"(Fixed) ONNX exported to {output_path}") - - logger.info(f"ONNX exported to {output_path}") + # Export the model + exporter.export() diff --git a/projects/BEVFusion/deploy/utils.py b/projects/BEVFusion/deploy/utils.py index 807212204..587789343 100644 --- a/projects/BEVFusion/deploy/utils.py +++ b/projects/BEVFusion/deploy/utils.py @@ -1,43 +1,42 @@ import os from copy import deepcopy -from dataclasses import dataclass from mmdeploy.utils import ( - IR, - Backend, - get_backend, - get_dynamic_axes, - get_ir_config, get_onnx_config, - get_root_logger, load_config, ) -from mmdet3d.registry import MODELS -from mmengine.registry import RUNNERS - - -@dataclass(frozen=True) -class SetupConfigs: - deploy_cfg_path: str - model_cfg: dict - checkpoint_path: str - device: str - work_dir: str - sample_idx: int - data_preprocessor_cfg: dict - - -def setup_configs(deploy_cfg_path, model_cfg_path, checkpoint_path, device, work_dir, sample_idx, module): +from .data_classes import SetupConfigs + + +def setup_configs( + deploy_cfg_path: str, + model_cfg_path: str, + checkpoint_path: str, + device: str, + work_dir: str, + sample_idx: int, + module: str, +) -> SetupConfigs: + """ + Setup configuration for the model. + + Args: + deploy_cfg_path: Path to the deploy config file. + model_cfg_path: Path to the model config file. + checkpoint_path: Path to the checkpoint file. + device: Device to use for the model. + work_dir: Directory to save the model. + sample_idx: Index of the sample to use for the model. + module: Module to export. + """ os.makedirs(work_dir, exist_ok=True) - deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) model_cfg.randomness = dict(seed=0, diff_rank_seed=False, deterministic=False) model_cfg.launcher = "none" onnx_cfg = get_onnx_config(deploy_cfg) input_names = onnx_cfg["input_names"] - output_names = onnx_cfg["output_names"] extract_pts_inputs = True if "points" in input_names or "voxels" in input_names else False data_preprocessor_cfg = deepcopy(model_cfg.model.data_preprocessor) @@ -66,50 +65,12 @@ def setup_configs(deploy_cfg_path, model_cfg_path, checkpoint_path, device, work model_cfg["work_dir"] = work_dir return SetupConfigs( - deploy_cfg_path=deploy_cfg_path, - model_cfg_path=model_cfg_path, - checkpoint_path=checkpoint_path, - device=device, - work_dir=work_dir, - data_preprocessor_cfg=data_preprocessor_cfg, + deploy_cfg=deploy_cfg, + model_cfg=model_cfg, + checkpoint_path=checkpoint_path, + device=device, + data_preprocessor_cfg=data_preprocessor_cfg, + sample_idx=sample_idx, + module=module, + onnx_cfg=onnx_cfg, ) - - - -def build_model(model_cfg, checkpoint_path, device): - - data_preprocessor = MODELS.build(data_preprocessor_cfg) - - - # load a sample - runner = RUNNERS.build(model_cfg) - runner.load_or_resume() - - data = runner.test_dataloader.dataset[args.sample_idx] - - # create model an inputs - task_processor = build_task_processor(model_cfg, deploy_cfg, device) - - torch_model = task_processor.build_pytorch_model(checkpoint_path) - data, model_inputs = task_processor.create_input(data, data_preprocessor=data_preprocessor, model=torch_model) - - if isinstance(model_inputs, list) and len(model_inputs) == 1: - model_inputs = model_inputs[0] - data_samples = data["data_samples"] - input_metas = {"data_samples": data_samples, "mode": "predict", "data_preprocessor": data_preprocessor} - - ( - voxels, - coors, - num_points_per_voxel, - points, - camera_mask, - imgs, - lidar2img, - cam2image, - camera2lidar, - geom_feats, - kept, - ranks, - indices, - ) = model_inputs From f4b02c6ae24d7c51a707850c16fd0fbeaa579b7a Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 17:06:45 +0900 Subject: [PATCH 04/22] Fix broken bevfusion exporter script --- projects/BEVFusion/deploy/builder.py | 238 +++++++++++++++++++++ projects/BEVFusion/deploy/data_classes.py | 53 +++++ projects/BEVFusion/deploy/exporter.py | 244 ++++++++++++++++++++++ 3 files changed, 535 insertions(+) create mode 100644 projects/BEVFusion/deploy/builder.py create mode 100644 projects/BEVFusion/deploy/data_classes.py create mode 100644 projects/BEVFusion/deploy/exporter.py diff --git a/projects/BEVFusion/deploy/builder.py b/projects/BEVFusion/deploy/builder.py new file mode 100644 index 000000000..e7fb3798e --- /dev/null +++ b/projects/BEVFusion/deploy/builder.py @@ -0,0 +1,238 @@ +import logging +import os.path as osp +from typing import Any + +import numpy as np +import torch +from containers import TrtBevFusionCameraOnlyContainer, TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer +from data_classes import BackendConfigs, BuilderData, ModelData, ModelInputs, SetupConfigs +from mmdeploy.apis import build_task_processor +from mmdeploy.apis.onnx.passes import optimize_onnx +from mmdeploy.core import RewriterContext, patch_model +from mmdeploy.utils import ( + IR, + Backend, + get_backend, + get_dynamic_axes, + get_ir_config, + get_onnx_config, + get_root_logger, + load_config, +) +from mmdet3d.registry import MODELS +from mmengine.registry import RUNNERS + +from projects.BEVFusion.deploy.torch2onnx import backend + + +class ExportBuilder: + + def __init__(self, setup_configs: SetupConfigs): + self.setup_configs = setup_configs + + def build(self): + """Build the model. + + Returns: + Model data. + """ + # Build the model data + model_data = self._build_model_data() + + # Build the backend configs + backend = self._build_backend() + + # Build the optimize configs + optimize = self._build_optimize_configs(backend) + + # Build the IR configs + ir_configs = self._build_ir_configs() + + # Update the deploy config + self._update_dpeloy_cfg(ir_configs, backend) + + # Build the intermediate representations + ir = self._build_intermediate_representations(ir_configs) + + # Build the context info + context_info = self._build_context_info(ir, ir_configs, backend, optimize) + + # Patch the model + patched_model = self._build_patched_model(model_data, backend, ir) + + return BuilderData( + model_data=model_data, + ir_configs=ir_configs, + context_info=context_info, + patched_model=patched_model, + ) + + def _build_model_data(self): + """Build the model. + + Args: + setup_config: Setup configuration for the model. + + Returns: + Model data. + """ + data_preprocessor = MODELS.build(self.setup_configs.data_preprocessor_cfg) + + # load a sample + runner = RUNNERS.build(self.setup_configs.model_cfg) + runner.load_or_resume() + data = runner.test_dataloader.dataset[self.setup_configs.sample_idx] + + # create model an inputs + task_processor = build_task_processor( + self.setup_configs.model_cfg, self.setup_configs.deploy_cfg, self.setup_configs.device + ) + + torch_model = task_processor.build_pytorch_model(self.setup_configs.checkpoint_path) + data, model_inputs = task_processor.create_input(data, data_preprocessor=data_preprocessor, model=torch_model) + + if isinstance(model_inputs, list) and len(model_inputs) == 1: + model_inputs = model_inputs[0] + + data_samples = data["data_samples"] + input_metas = {"data_samples": data_samples, "mode": "predict", "data_preprocessor": data_preprocessor} + + ( + voxels, + coors, + num_points_per_voxel, + points, + camera_mask, + imgs, + lidar2img, + cam2image, + camera2lidar, + geom_feats, + kept, + ranks, + indices, + ) = model_inputs + + return ModelData( + model_inputs=ModelInputs( + voxels=voxels, + coors=coors, + num_points_per_voxel=num_points_per_voxel, + points=points, + camera_mask=camera_mask, + imgs=imgs, + lidar2img=lidar2img, + cam2image=cam2image, + camera2lidar=camera2lidar, + geom_feats=geom_feats, + kept=kept, + ranks=ranks, + indices=indices, + ), + torch_model=torch_model, + input_metas=input_metas, + ) + + @staticmethod + def _add_or_update(cfg: dict, key: str, val: Any) -> None: + if key in cfg and isinstance(cfg[key], dict) and isinstance(val, dict): + cfg[key].update(val) + else: + cfg[key] = val + + def update_deploy_cfg(self, ir_configs: dict, backend: Backend) -> None: + """Update the deploy config. + + Args: + ir_configs: IR configs. + backend_configs: Backend configs. + """ + self._add_or_update(self.setup_configs.deploy_cfg, "ir_config", ir_configs) + self._add_or_update(self.setup_configs.deploy_cfg, "backend_config", dict(type=backend)) + + def _build_patched_model(self, model_data: ModelData, backend: str, ir: IR) -> torch.nn.Module: + """Build the patched model. + + Returns: + Patched model. + """ + patched_model = patch_model(model_data.torch_model, cfg=self.setup_configs.deploy_cfg, backend=backend, ir=ir) + # Set Patched model to eval() for inference status + patched_model.eval() + patched_model.to(self.setup_configs.device) + return patched_model + + def _build_backend(self) -> str: + """Build the backend configs. + + Returns: + Backend configs. + """ + return get_backend(self.setup_configs.deploy_cfg).value + + def _build_optimize_configs(self, backend: str) -> dict: + """Build the optimize configs. + + Returns: + Optimize configs. + """ + optimize = self.setup_configs.onnx_cfg.get("optimize", False) + if backend == Backend.NCNN.value: + """NCNN backend needs a precise blob counts, while using onnx optimizer + will merge duplicate initilizers without reference count.""" + optimize = False + return optimize + + def _build_ir_configs(self) -> dict: + """Build the IR configs. + + Returns: + IR configs. + """ + onnx_cfg = self.setup_configs.onnx_cfg + input_names = onnx_cfg["input_names"] + output_names = onnx_cfg["output_names"] + axis_names = input_names + output_names + dynamic_axes = get_dynamic_axes(self.setup_configs.deploy_cfg, axis_names) + verbose = not onnx_cfg.get("strip_doc_string", True) or onnx_cfg.get("verbose", False) + keep_initializers_as_inputs = onnx_cfg.get("keep_initializers_as_inputs", True) + opset_version = onnx_cfg.get("opset_version", 11) + + ir_configs = dict( + type="onnx", + input_names=input_names, + output_names=output_names, + opset_version=opset_version, + dynamic_axes=dynamic_axes, + verbose=verbose, + keep_initializers_as_inputs=keep_initializers_as_inputs, + ) + return ir_configs + + def _build_intermediate_representations(self) -> IR: + """Build the intermediate representations (IR). + + Returns: + Intermediate representation (IR). + """ + return IR.get(get_ir_config(self.setup_configs.deploy_cfg)["type"]) + + def _build_context_info(self, ir: IR, ir_configs: dict, backend: str, optimize: bool) -> dict: + """Build the context info. + + Returns: + Context info. + """ + if optimize: + onnx_custom_passes = optimize_onnx + else: + onnx_custom_passes = None + + return dict( + deploy_cfg=self.setup_configs.deploy_cfg, + ir=ir, + backend=backend, + opset=ir_configs["opset_version"], + cfg=self.setup_configs.deploy_cfg, + onnx_custom_passes=onnx_custom_passes, + ) diff --git a/projects/BEVFusion/deploy/data_classes.py b/projects/BEVFusion/deploy/data_classes.py new file mode 100644 index 000000000..f65970f03 --- /dev/null +++ b/projects/BEVFusion/deploy/data_classes.py @@ -0,0 +1,53 @@ +from dataclasses import dataclass + +import torch + + +@dataclass(frozen=True) +class SetupConfigs: + deploy_cfg: dict + model_cfg: dict + checkpoint_path: str + device: str + data_preprocessor_cfg: dict + sample_idx: int + module: str + onnx_cfg: dict + + +@dataclass(frozen=True) +class ModelInputs: + voxels: torch.Tensor + coors: torch.Tensor + num_points_per_voxel: torch.Tensor + points: torch.Tensor + camera_mask: torch.Tensor + imgs: torch.Tensor + lidar2img: torch.Tensor + cam2image: torch.Tensor + camera2lidar: torch.Tensor + geom_feats: torch.Tensor + kept: torch.Tensor + ranks: torch.Tensor + indices: torch.Tensor + + +@dataclass(frozen=True) +class ModelData: + model_inputs: ModelInputs + torch_model: torch.nn.Module + input_metas: dict + + +@dataclass(frozen=True) +class BackendConfigs: + type: str + optimize: bool + + +@dataclass(frozen=True) +class BuilderData: + model_data: ModelData + ir_configs: dict + context_info: dict + patched_model: torch.nn.Module diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py new file mode 100644 index 000000000..c18480b66 --- /dev/null +++ b/projects/BEVFusion/deploy/exporter.py @@ -0,0 +1,244 @@ +import logging +import os.path as osp +from typing import Optional + +import numpy as np +import onnx +import onnx_graphsurgeon as gs +import torch +from builder import ExportBuilder +from containers import TrtBevFusionCameraOnlyContainer, TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer +from data_classes import ModelData, SetupConfigs +from mmdeploy.core import RewriterContext +from mmdeploy.utils import ( + get_root_logger, +) + + +class Torch2OnnxExporter: + + def __init__(self, setup_configs: SetupConfigs, log_level: str): + self.setup_configs = setup_configs + log_level = logging.getLevelName(log_level) + self.logger = get_root_logger() + self.logger.setLevel(log_level) + self.output_prefix = osp.join( + self.setup_configs.work_dir, + osp.splitext(osp.basename(self.setup_configs.deploy_cfg.onnx_config.save_file))[0], + ) + self.output_path = self.output_prefix + ".onnx" + self.builder = ExportBuilder(self.setup_configs) + + def export(self): + self.logger.info(f"Export PyTorch model to ONNX: {self.output_path}.") + + # Build the model data and configs + builder_data = self.builder.build() + + # Export the model + self._export_model( + model_data=builder_data.model_data, + context_info=builder_data.context_info, + patched_model=builder_data.patched_model, + ir_configs=builder_data.ir_configs, + ) + + # Fix the ONNX graph + self._fix_onnx_graph() + + self.logger.info(f"ONNX exported to {self.output_path}") + + def _export_model( + self, model_data: ModelData, context_info: dict, patched_model: torch.nn.Module, ir_configs: dict + ) -> None: + """Rewrite the context info. + + Returns: + Context info. + """ + with RewriterContext(**context_info), torch.no_grad(): + image_feats = None + if "img_backbone" in self.setup_configs.model_cfg.model: + image_feats = self._export_image_backbone(model_data, ir_configs, patched_model) + # If the image backbone feat is None, it's exported to ONNX and exit + if image_feats is None: + return + + # Export the camera bev only network + if self.setup_configs.module == "camera_bev_only_network": + self._export_camera_bev_only_network( + model_data=model_data, ir_configs=ir_configs, patched_model=patched_model, image_feats=image_feats + ) + + # Export the main network with camera or lidar-only + elif self.setup_configs.module == "main_body": + self._export_main_body(model_data=model_data, ir_configs=ir_configs, patched_model=patched_model) + + def _export_image_backbone(self, model_data: ModelData, ir_configs: dict, patched_model: torch.nn.Module) -> None: + """Export the image backbone. + + Returns: + Image backbone. + """ + data_preprocessor = model_data.input_metas["data_preprocessor"] + model_inputs_data = model_data.model_inputs + device = self.setup_configs.device + + imgs = model_inputs_data.imgs + images_mean = data_preprocessor.mean.to(device) + images_std = data_preprocessor.std.to(device) + image_backbone_container = TrtBevFusionImageBackboneContainer(patched_model, images_mean, images_std) + model_inputs = (imgs.to(device=device, dtype=torch.uint8),) + + if self.setup_configs.module == "image_backbone": + torch.onnx.export( + image_backbone_container, + model_inputs, + self.output_path, + export_params=True, + input_names=ir_configs["input_names"], + output_names=ir_configs["output_names"], + opset_version=ir_configs["opset_version"], + dynamic_axes=ir_configs["dynamic_axes"], + keep_initializers_as_inputs=ir_configs["keep_initializers_as_inputs"], + verbose=ir_configs["verbose"], + ) + self.logger.info(f"Image backbone exported to {self.output_path}") + return + + image_feats = image_backbone_container(*model_inputs) + self.logger.info(f"Converted Image backbone") + return image_feats + + def _export_camera_bev_only_network( + self, + model_data: ModelData, + ir_configs: dict, + patched_model: torch.nn.Module, + image_feats: Optional[torch.Tensor], + ) -> None: + """Export the camera bev only network. + + Returns: + Camera bev only network. + """ + main_container = TrtBevFusionCameraOnlyContainer(patched_model) + data_samples = model_data.input_metas["data_samples"] + imgs = model_data.model_inputs.imgs + lidar2img = model_data.model_inputs.lidar2img + geom_feats = model_data.model_inputs.geom_feats + kept = model_data.model_inputs.kept + ranks = model_data.model_inputs.ranks + indices = model_data.model_inputs.indices + points = model_data.model_inputs.points + img_aug_matrix = imgs.new_tensor(np.stack(data_samples[0].img_aug_matrix)) + device = self.setup_configs.device + + model_inputs = ( + lidar2img.to(device).float(), + img_aug_matrix.to(device).float(), + geom_feats.to(device).float(), + kept.to(device), + ranks.to(device).long(), + indices.to(device).long(), + image_feats, + ) + + if "points" in ir_configs["input_names"]: + model_inputs += (points.to(device).float(),) + + torch.onnx.export( + main_container, + model_inputs, + self.output_path.replace(".onnx", "_temp_to_be_fixed.onnx"), + export_params=True, + input_names=ir_configs["input_names"], + output_names=ir_configs["output_names"], + opset_version=ir_configs["opset_version"], + dynamic_axes=ir_configs["dynamic_axes"], + keep_initializers_as_inputs=ir_configs["keep_initializers_as_inputs"], + verbose=ir_configs["verbose"], + ) + self.logger.info(f"Camera bev only network exported to {self.output_path}") + + def _export_main_body( + self, + model_data: ModelData, + ir_configs: dict, + patched_model: torch.nn.Module, + image_feats: Optional[torch.Tensor], + ) -> None: + """Export the main body. + + Returns: + Main body. + """ + main_container = TrtBevFusionMainContainer(patched_model) + voxels = model_data.model_inputs.voxels + coors = model_data.model_inputs.coors + num_points_per_voxel = model_data.model_inputs.num_points_per_voxel + device = self.setup_configs.device + model_inputs = ( + voxels.to(device), + coors.to(device), + num_points_per_voxel.to(device), + ) + + if image_feats is not None: + points = model_data.model_inputs.points + lidar2img = model_data.model_inputs.lidar2img + img_aug_matrix = model_data.model_inputs.img_aug_matrix + geom_feats = model_data.model_inputs.geom_feats + kept = model_data.model_inputs.kept + ranks = model_data.model_inputs.ranks + indices = model_data.model_inputs.indices + model_inputs += ( + points.to(device).float(), + lidar2img.to(device).float(), + img_aug_matrix.to(device).float(), + geom_feats.to(device).float(), + kept.to(device), + ranks.to(device).long(), + indices.to(device).long(), + image_feats, + ) + + torch.onnx.export( + main_container, + model_inputs, + self.output_path.replace(".onnx", "_temp_to_be_fixed.onnx"), + export_params=True, + input_names=ir_configs["input_names"], + output_names=ir_configs["output_names"], + opset_version=ir_configs["opset_version"], + dynamic_axes=ir_configs["dynamic_axes"], + keep_initializers_as_inputs=ir_configs["keep_initializers_as_inputs"], + verbose=ir_configs["verbose"], + ) + self.logger.info(f"Camera bev only network exported to {self.output_path}") + + def _fix_onnx_graph(self) -> None: + """Fix the ONNX graph. + + Returns: + ONNX graph. + """ + self.logger.info("Attempting to fix the graph (TopK's K becoming a tensor)") + model = onnx.load(self.output_path.replace(".onnx", "_temp_to_be_fixed.onnx")) + graph = gs.import_onnx(model) + + # Fix TopK + topk_nodes = [node for node in graph.nodes if node.op == "TopK"] + assert len(topk_nodes) == 1 + topk = topk_nodes[0] + k = self.setup_configs.model_cfg.num_proposals + topk.inputs[1] = gs.Constant("K", values=np.array([k], dtype=np.int64)) + topk.outputs[0].shape = [1, k] + topk.outputs[0].dtype = topk.inputs[0].dtype if topk.inputs[0].dtype else np.float32 + topk.outputs[1].shape = [1, k] + topk.outputs[1].dtype = np.int64 + + graph.cleanup().toposort() + onnx.save_model(gs.export_onnx(graph), self.output_path) + + self.logger.info(f"(Fixed) ONNX exported to {self.output_path}") From aa8c322095814e7e92fac26e8982debe789441bd Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 17:08:19 +0900 Subject: [PATCH 05/22] Fix broken bevfusion exporter script --- projects/BEVFusion/deploy/builder.py | 12 +++--------- projects/BEVFusion/deploy/data_classes.py | 12 ++++++++++++ projects/BEVFusion/deploy/exporter.py | 2 ++ projects/BEVFusion/deploy/utils.py | 2 ++ 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/projects/BEVFusion/deploy/builder.py b/projects/BEVFusion/deploy/builder.py index e7fb3798e..9c1a66d01 100644 --- a/projects/BEVFusion/deploy/builder.py +++ b/projects/BEVFusion/deploy/builder.py @@ -1,23 +1,17 @@ -import logging -import os.path as osp +# Copyright (c) OpenMMLab. All rights reserved. from typing import Any -import numpy as np import torch -from containers import TrtBevFusionCameraOnlyContainer, TrtBevFusionImageBackboneContainer, TrtBevFusionMainContainer -from data_classes import BackendConfigs, BuilderData, ModelData, ModelInputs, SetupConfigs +from data_classes import BuilderData, ModelData, ModelInputs, SetupConfigs from mmdeploy.apis import build_task_processor from mmdeploy.apis.onnx.passes import optimize_onnx -from mmdeploy.core import RewriterContext, patch_model +from mmdeploy.core import patch_model from mmdeploy.utils import ( IR, Backend, get_backend, get_dynamic_axes, get_ir_config, - get_onnx_config, - get_root_logger, - load_config, ) from mmdet3d.registry import MODELS from mmengine.registry import RUNNERS diff --git a/projects/BEVFusion/deploy/data_classes.py b/projects/BEVFusion/deploy/data_classes.py index f65970f03..7d3c41a71 100644 --- a/projects/BEVFusion/deploy/data_classes.py +++ b/projects/BEVFusion/deploy/data_classes.py @@ -1,3 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + from dataclasses import dataclass import torch @@ -5,6 +7,8 @@ @dataclass(frozen=True) class SetupConfigs: + """Setup configurations for the model.""" + deploy_cfg: dict model_cfg: dict checkpoint_path: str @@ -17,6 +21,8 @@ class SetupConfigs: @dataclass(frozen=True) class ModelInputs: + """Model inputs for the model.""" + voxels: torch.Tensor coors: torch.Tensor num_points_per_voxel: torch.Tensor @@ -34,6 +40,8 @@ class ModelInputs: @dataclass(frozen=True) class ModelData: + """Model data for the model.""" + model_inputs: ModelInputs torch_model: torch.nn.Module input_metas: dict @@ -41,12 +49,16 @@ class ModelData: @dataclass(frozen=True) class BackendConfigs: + """Backend configurations for the model.""" + type: str optimize: bool @dataclass(frozen=True) class BuilderData: + """Builder data for the model.""" + model_data: ModelData ir_configs: dict context_info: dict diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index c18480b66..ac2692905 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -1,3 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + import logging import os.path as osp from typing import Optional diff --git a/projects/BEVFusion/deploy/utils.py b/projects/BEVFusion/deploy/utils.py index 587789343..5a5f86686 100644 --- a/projects/BEVFusion/deploy/utils.py +++ b/projects/BEVFusion/deploy/utils.py @@ -1,3 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + import os from copy import deepcopy From a4bb4d615e7535f91558ca8c624e71509e63cbd9 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 17:59:46 +0900 Subject: [PATCH 06/22] Fix import statements --- projects/BEVFusion/deploy/builder.py | 2 -- projects/BEVFusion/deploy/utils.py | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/projects/BEVFusion/deploy/builder.py b/projects/BEVFusion/deploy/builder.py index 9c1a66d01..d8183a0de 100644 --- a/projects/BEVFusion/deploy/builder.py +++ b/projects/BEVFusion/deploy/builder.py @@ -16,8 +16,6 @@ from mmdet3d.registry import MODELS from mmengine.registry import RUNNERS -from projects.BEVFusion.deploy.torch2onnx import backend - class ExportBuilder: diff --git a/projects/BEVFusion/deploy/utils.py b/projects/BEVFusion/deploy/utils.py index 5a5f86686..393658925 100644 --- a/projects/BEVFusion/deploy/utils.py +++ b/projects/BEVFusion/deploy/utils.py @@ -3,13 +3,12 @@ import os from copy import deepcopy +from data_classes import SetupConfigs from mmdeploy.utils import ( get_onnx_config, load_config, ) -from .data_classes import SetupConfigs - def setup_configs( deploy_cfg_path: str, From 7fadaf24404ded20cbad26f9c04d317d04509097 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 18:03:37 +0900 Subject: [PATCH 07/22] Fix import statements --- projects/BEVFusion/deploy/data_classes.py | 1 + projects/BEVFusion/deploy/torch2onnx.py | 4 ++-- projects/BEVFusion/deploy/utils.py | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/projects/BEVFusion/deploy/data_classes.py b/projects/BEVFusion/deploy/data_classes.py index 7d3c41a71..4e244b441 100644 --- a/projects/BEVFusion/deploy/data_classes.py +++ b/projects/BEVFusion/deploy/data_classes.py @@ -17,6 +17,7 @@ class SetupConfigs: sample_idx: int module: str onnx_cfg: dict + work_dir: str @dataclass(frozen=True) diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index 9ec01ce1f..9ea60197b 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -32,8 +32,8 @@ def parse_args(): args = parse_args() set_start_method("spawn", force=True) setup_config = setup_configs( - args.deploy_cfg_path, - args.model_cfg_path, + args.deploy_cfg, + args.model_cfg, args.checkpoint, args.device, args.work_dir, diff --git a/projects/BEVFusion/deploy/utils.py b/projects/BEVFusion/deploy/utils.py index 393658925..c92d97152 100644 --- a/projects/BEVFusion/deploy/utils.py +++ b/projects/BEVFusion/deploy/utils.py @@ -74,4 +74,5 @@ def setup_configs( sample_idx=sample_idx, module=module, onnx_cfg=onnx_cfg, + work_dir=work_dir, ) From a39a1417cdec4289cead02afb9e1cec23b54678c Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 18:03:47 +0900 Subject: [PATCH 08/22] Fix import statements --- ...y_lidar_only_intensity_tensorrt_dynamic.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 projects/BEVFusion/configs/deploy/bevfusion_main_body_lidar_only_intensity_tensorrt_dynamic.py diff --git a/projects/BEVFusion/configs/deploy/bevfusion_main_body_lidar_only_intensity_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_main_body_lidar_only_intensity_tensorrt_dynamic.py new file mode 100644 index 000000000..9e089fe05 --- /dev/null +++ b/projects/BEVFusion/configs/deploy/bevfusion_main_body_lidar_only_intensity_tensorrt_dynamic.py @@ -0,0 +1,49 @@ +codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end") + +custom_imports = dict( + imports=[ + "projects.BEVFusion.deploy", + "projects.BEVFusion.bevfusion", + "projects.SparseConvolution", + ], + allow_failed_imports=False, +) + +backend_config = dict( + type="tensorrt", + common_config=dict(max_workspace_size=1 << 32), + model_inputs=[ + dict( + input_shapes=dict( + voxels=dict( + min_shape=[1, 10, 5], opt_shape=[64000, 10, 5], max_shape=[256000, 10, 5] + ), # [M, maximum number of points, features] features=5 when using intensity + coors=dict(min_shape=[1, 3], opt_shape=[64000, 3], max_shape=[256000, 3]), + num_points_per_voxel=dict(min_shape=[1], opt_shape=[64000], max_shape=[256000]), + ) + ) + ], +) + +onnx_config = dict( + type="onnx", + export_params=True, + keep_initializers_as_inputs=False, + opset_version=17, + save_file="main_body.onnx", + input_names=["voxels", "coors", "num_points_per_voxel"], + output_names=["bbox_pred", "score", "label_pred"], + dynamic_axes={ + "voxels": { + 0: "voxels_num", + }, + "coors": { + 0: "voxels_num", + }, + "num_points_per_voxel": { + 0: "voxels_num", + }, + }, + input_shape=None, + verbose=True, +) From cc67c6573700556631086cb06dc6bc196aa59d6a Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 18:30:05 +0900 Subject: [PATCH 09/22] Fix import statements --- projects/BEVFusion/bevfusion/bevfusion.py | 2 +- ...ar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py | 4 ++-- projects/BEVFusion/deploy/__init__.py | 5 ++--- projects/BEVFusion/deploy/builder.py | 6 +++--- projects/BEVFusion/deploy/exporter.py | 10 ++++++++-- projects/BEVFusion/deploy/torch2onnx.py | 8 ++++++-- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/projects/BEVFusion/bevfusion/bevfusion.py b/projects/BEVFusion/bevfusion/bevfusion.py index a1589355f..243b3beb5 100644 --- a/projects/BEVFusion/bevfusion/bevfusion.py +++ b/projects/BEVFusion/bevfusion/bevfusion.py @@ -355,7 +355,7 @@ def extract_feat( ) features.append(img_feature) - if points is not None and self.pts_middle_encoder is not None: + if self.pts_middle_encoder is not None: pts_feature = self.extract_pts_feat( batch_inputs_dict.get("voxels", {}).get("voxels", None), batch_inputs_dict.get("voxels", {}).get("coors", None), diff --git a/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py b/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py index 178f5ff3d..e698e9eb1 100644 --- a/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py +++ b/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py @@ -12,8 +12,8 @@ custom_imports["imports"] += ["autoware_ml.detection3d.datasets.transforms"] # user setting -data_root = "data/t4dataset/" -info_directory_path = "info/user_name/" +data_root = "data/t4datasets/" +info_directory_path = "info/kokseang_2_5/" experiment_group_name = "bevfusion_lidar_intensity/j6gen2_base/" + _base_.dataset_type experiment_name = "lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m" diff --git a/projects/BEVFusion/deploy/__init__.py b/projects/BEVFusion/deploy/__init__.py index 5eee97a02..ea641c12e 100644 --- a/projects/BEVFusion/deploy/__init__.py +++ b/projects/BEVFusion/deploy/__init__.py @@ -1,5 +1,4 @@ +from .exporter import Torch2OnnxExporter from .voxel_detection import VoxelDetection -__all__ = [ - "VoxelDetection", -] +__all__ = ["VoxelDetection", "Torch2OnnxExporter"] diff --git a/projects/BEVFusion/deploy/builder.py b/projects/BEVFusion/deploy/builder.py index d8183a0de..92d96cd0d 100644 --- a/projects/BEVFusion/deploy/builder.py +++ b/projects/BEVFusion/deploy/builder.py @@ -41,10 +41,10 @@ def build(self): ir_configs = self._build_ir_configs() # Update the deploy config - self._update_dpeloy_cfg(ir_configs, backend) + self._update_deploy_cfg(ir_configs, backend) # Build the intermediate representations - ir = self._build_intermediate_representations(ir_configs) + ir = self._build_intermediate_representations() # Build the context info context_info = self._build_context_info(ir, ir_configs, backend, optimize) @@ -132,7 +132,7 @@ def _add_or_update(cfg: dict, key: str, val: Any) -> None: else: cfg[key] = val - def update_deploy_cfg(self, ir_configs: dict, backend: Backend) -> None: + def _update_deploy_cfg(self, ir_configs: dict, backend: Backend) -> None: """Update the deploy config. Args: diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index ac2692905..1ff26d777 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -74,7 +74,9 @@ def _export_model( # Export the main network with camera or lidar-only elif self.setup_configs.module == "main_body": - self._export_main_body(model_data=model_data, ir_configs=ir_configs, patched_model=patched_model) + self._export_main_body( + model_data=model_data, ir_configs=ir_configs, patched_model=patched_model, image_feats=image_feats + ) def _export_image_backbone(self, model_data: ModelData, ir_configs: dict, patched_model: torch.nn.Module) -> None: """Export the image backbone. @@ -217,7 +219,11 @@ def _export_main_body( keep_initializers_as_inputs=ir_configs["keep_initializers_as_inputs"], verbose=ir_configs["verbose"], ) - self.logger.info(f"Camera bev only network exported to {self.output_path}") + if image_feats is None: + model_name = "lidar-only" + else: + model_name = "camera-lidar" + self.logger.info(f"Main body network with {model_name} exported to {self.output_path}") def _fix_onnx_graph(self) -> None: """Fix the ONNX graph. diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index 9ea60197b..87085b875 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -3,6 +3,10 @@ import logging import os +from mmdet3d.utils import register_all_modules + +register_all_modules(init_default_scope=True) + from exporter import Torch2OnnxExporter from torch.multiprocessing import set_start_method from utils import setup_configs @@ -40,8 +44,8 @@ def parse_args(): args.sample_idx, args.module, ) - # Build the exporter + # # Build the exporter exporter = Torch2OnnxExporter(setup_config, args.log_level) - # Export the model + # # Export the model exporter.export() From 3e3e60a044f638d067729021afba74720c8b0a63 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 18:30:14 +0900 Subject: [PATCH 10/22] Fix import statements --- projects/BEVFusion/deploy/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/projects/BEVFusion/deploy/__init__.py b/projects/BEVFusion/deploy/__init__.py index ea641c12e..a1662c4b8 100644 --- a/projects/BEVFusion/deploy/__init__.py +++ b/projects/BEVFusion/deploy/__init__.py @@ -1,4 +1,3 @@ -from .exporter import Torch2OnnxExporter from .voxel_detection import VoxelDetection -__all__ = ["VoxelDetection", "Torch2OnnxExporter"] +__all__ = ["VoxelDetection"] From 1b4b1f25d1dce6a3b43aefea4172f524c458dbc2 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:02:19 +0900 Subject: [PATCH 11/22] Fix camera outputs --- Dockerfile | 5 ++++- ...sion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py | 2 +- ..._voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py | 4 ++-- projects/BEVFusion/deploy/containers.py | 12 ++++++++++++ projects/BEVFusion/deploy/exporter.py | 4 +++- 5 files changed, 22 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 582582c76..0239d863e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -67,7 +67,10 @@ RUN python3 -m pip install git+https://github.com/tier4/autoware_perception_eval RUN python3 -m pip --no-cache-dir install \ setuptools==60.2.0 \ transformers==4.51.3 \ - polars==1.37.1 + polars==1.37.1 \ + onnx_graphsurgeon \ + spconv-cu120 + # NOTE(knzo25): this patch is needed to use numpy versions over 1.23.5 (version used in mmdet3d 1.4.0) # It can be safely deleted when mmdet3d updates the numpy version diff --git a/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py b/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py index b781e2c71..4c992c945 100644 --- a/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py +++ b/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py @@ -13,7 +13,7 @@ # user setting data_root = "data/t4dataset/" -info_directory_path = "info/user_name/" +info_directory_path = "info/kokseang/" experiment_group_name = "bevfusion_camera/j6gen2_base/" + _base_.dataset_type experiment_name = "bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m" diff --git a/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py b/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py index e698e9eb1..178f5ff3d 100644 --- a/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py +++ b/projects/BEVFusion/configs/t4dataset/BEVFusion-L/bevfusion_lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m.py @@ -12,8 +12,8 @@ custom_imports["imports"] += ["autoware_ml.detection3d.datasets.transforms"] # user setting -data_root = "data/t4datasets/" -info_directory_path = "info/kokseang_2_5/" +data_root = "data/t4dataset/" +info_directory_path = "info/user_name/" experiment_group_name = "bevfusion_lidar_intensity/j6gen2_base/" + _base_.dataset_type experiment_name = "lidar_voxel_second_secfpn_30e_4xb8_j6gen2_base_120m" diff --git a/projects/BEVFusion/deploy/containers.py b/projects/BEVFusion/deploy/containers.py index 046934ee8..85bdd1704 100644 --- a/projects/BEVFusion/deploy/containers.py +++ b/projects/BEVFusion/deploy/containers.py @@ -21,6 +21,7 @@ def forward(self, imgs): class TrtBevFusionMainContainer(torch.nn.Module): + def __init__(self, mod, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.mod = mod @@ -72,7 +73,18 @@ def forward( ) outputs = mod._forward(batch_inputs_dict, using_image_features=True) + bbox_pred, score, label_pred = self.postprocessing(outputs) + return bbox_pred, score, label_pred + + def postprocessing(self, outputs: dict): + """Postprocess the outputs of the model to get the final predictions. + + Args: + outputs (dict): The outputs of the model. + Returns: + dict: The final predictions. + """ # The following code is taken from # projects/BEVFusion/bevfusion/bevfusion_head.py # It is used to simplify the post process in deployment diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index 1ff26d777..47f46553a 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -178,6 +178,7 @@ def _export_main_body( Main body. """ main_container = TrtBevFusionMainContainer(patched_model) + data_samples = model_data.input_metas["data_samples"] voxels = model_data.model_inputs.voxels coors = model_data.model_inputs.coors num_points_per_voxel = model_data.model_inputs.num_points_per_voxel @@ -189,9 +190,10 @@ def _export_main_body( ) if image_feats is not None: + imgs = model_data.model_inputs.imgs points = model_data.model_inputs.points lidar2img = model_data.model_inputs.lidar2img - img_aug_matrix = model_data.model_inputs.img_aug_matrix + img_aug_matrix = imgs.new_tensor(np.stack(data_samples[0].img_aug_matrix)) geom_feats = model_data.model_inputs.geom_feats kept = model_data.model_inputs.kept ranks = model_data.model_inputs.ranks From a485b79b774298861e5c1ee32e9baded91997357 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:04:02 +0900 Subject: [PATCH 12/22] Fix camera outputs --- .../bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py b/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py index 4c992c945..b781e2c71 100644 --- a/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py +++ b/projects/BEVFusion/configs/t4dataset/BEVFusion-C/bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m.py @@ -13,7 +13,7 @@ # user setting data_root = "data/t4dataset/" -info_directory_path = "info/kokseang/" +info_directory_path = "info/user_name/" experiment_group_name = "bevfusion_camera/j6gen2_base/" + _base_.dataset_type experiment_name = "bevfusion_camera_swin_fpn_30e_4xb8_j6gen2_base_120m" From 69590d2a140f8ad28fcb1cc70f3fe586e6422a91 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:10:58 +0900 Subject: [PATCH 13/22] Add docstring --- projects/BEVFusion/deploy/builder.py | 11 ++++-- projects/BEVFusion/deploy/exporter.py | 53 ++++++++++++++++++--------- 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/projects/BEVFusion/deploy/builder.py b/projects/BEVFusion/deploy/builder.py index 92d96cd0d..56acfcd95 100644 --- a/projects/BEVFusion/deploy/builder.py +++ b/projects/BEVFusion/deploy/builder.py @@ -22,11 +22,11 @@ class ExportBuilder: def __init__(self, setup_configs: SetupConfigs): self.setup_configs = setup_configs - def build(self): - """Build the model. + def build(self) -> BuilderData: + """Build configs and a PyTorch model for ONNX export. Returns: - Model data. + BuilderData with ModelData, ir_configs, context_info, and patched_model (Pytorch model). """ # Build the model data model_data = self._build_model_data() @@ -59,7 +59,7 @@ def build(self): patched_model=patched_model, ) - def _build_model_data(self): + def _build_model_data(self) -> ModelData: """Build the model. Args: @@ -127,6 +127,9 @@ def _build_model_data(self): @staticmethod def _add_or_update(cfg: dict, key: str, val: Any) -> None: + """ + Update key with the values to cfg. + """ if key in cfg and isinstance(cfg[key], dict) and isinstance(val, dict): cfg[key].update(val) else: diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index 47f46553a..0ce9469d9 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -20,6 +20,7 @@ class Torch2OnnxExporter: def __init__(self, setup_configs: SetupConfigs, log_level: str): + """Initialization of Torch2OnnxExporter.""" self.setup_configs = setup_configs log_level = logging.getLevelName(log_level) self.logger = get_root_logger() @@ -31,7 +32,10 @@ def __init__(self, setup_configs: SetupConfigs, log_level: str): self.output_path = self.output_prefix + ".onnx" self.builder = ExportBuilder(self.setup_configs) - def export(self): + def export(self) -> None: + """ + Export Pytorch Model to ONNX. + """ self.logger.info(f"Export PyTorch model to ONNX: {self.output_path}.") # Build the model data and configs @@ -53,10 +57,13 @@ def export(self): def _export_model( self, model_data: ModelData, context_info: dict, patched_model: torch.nn.Module, ir_configs: dict ) -> None: - """Rewrite the context info. - - Returns: - Context info. + """ + Export torch model to ONNX. + Args: + model_data (ModelData): Dataclass with data inputs. + context_info (dict): Context when deploying to rewrite some configs. + patched_model (torch.nn.Module): Patched Pytorch model. + ir_configs (dict): Configs for intermediate representations in ONNX. """ with RewriterContext(**context_info), torch.no_grad(): image_feats = None @@ -78,11 +85,19 @@ def _export_model( model_data=model_data, ir_configs=ir_configs, patched_model=patched_model, image_feats=image_feats ) - def _export_image_backbone(self, model_data: ModelData, ir_configs: dict, patched_model: torch.nn.Module) -> None: + def _export_image_backbone( + self, model_data: ModelData, ir_configs: dict, patched_model: torch.nn.Module + ) -> Optional[torch.Tensor]: """Export the image backbone. + Args: + model_data (ModelData): Dataclass with data inputs. + context_info (dict): Context when deploying to rewrite some configs. + patched_model (torch.nn.Module): Patched Pytorch model. + ir_configs (dict): Configs for intermediate representations in ONNX. + Returns: - Image backbone. + Image feats. """ data_preprocessor = model_data.input_metas["data_preprocessor"] model_inputs_data = model_data.model_inputs @@ -121,10 +136,13 @@ def _export_camera_bev_only_network( patched_model: torch.nn.Module, image_feats: Optional[torch.Tensor], ) -> None: - """Export the camera bev only network. + """Export the camera bev only network to an ONNX file. - Returns: - Camera bev only network. + Args: + model_data (ModelData): Dataclass with data inputs. + context_info (dict): Context when deploying to rewrite some configs. + patched_model (torch.nn.Module): Patched Pytorch model. + ir_configs (dict): Configs for intermediate representations in ONNX. """ main_container = TrtBevFusionCameraOnlyContainer(patched_model) data_samples = model_data.input_metas["data_samples"] @@ -172,10 +190,13 @@ def _export_main_body( patched_model: torch.nn.Module, image_feats: Optional[torch.Tensor], ) -> None: - """Export the main body. + """Export the main body (lidar-only or camera-lidar) to an ONNX file. - Returns: - Main body. + Args: + model_data (ModelData): Dataclass with data inputs. + context_info (dict): Context when deploying to rewrite some configs. + patched_model (torch.nn.Module): Patched Pytorch model. + ir_configs (dict): Configs for intermediate representations in ONNX. """ main_container = TrtBevFusionMainContainer(patched_model) data_samples = model_data.input_metas["data_samples"] @@ -228,11 +249,7 @@ def _export_main_body( self.logger.info(f"Main body network with {model_name} exported to {self.output_path}") def _fix_onnx_graph(self) -> None: - """Fix the ONNX graph. - - Returns: - ONNX graph. - """ + """Fix the ONNX graph with an ONNX file.""" self.logger.info("Attempting to fix the graph (TopK's K becoming a tensor)") model = onnx.load(self.output_path.replace(".onnx", "_temp_to_be_fixed.onnx")) graph = gs.import_onnx(model) From 1c0a307a4e1bee5a15526daeba5a6d96060e9218 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:11:25 +0900 Subject: [PATCH 14/22] Add docstring --- Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0239d863e..165ea1336 100644 --- a/Dockerfile +++ b/Dockerfile @@ -67,9 +67,7 @@ RUN python3 -m pip install git+https://github.com/tier4/autoware_perception_eval RUN python3 -m pip --no-cache-dir install \ setuptools==60.2.0 \ transformers==4.51.3 \ - polars==1.37.1 \ - onnx_graphsurgeon \ - spconv-cu120 + polars==1.37.1 # NOTE(knzo25): this patch is needed to use numpy versions over 1.23.5 (version used in mmdet3d 1.4.0) From 29a00afec989d966df66775859abb162b6b8b36b Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:11:48 +0900 Subject: [PATCH 15/22] Add docstring --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 165ea1336..582582c76 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,7 +69,6 @@ RUN python3 -m pip --no-cache-dir install \ transformers==4.51.3 \ polars==1.37.1 - # NOTE(knzo25): this patch is needed to use numpy versions over 1.23.5 (version used in mmdet3d 1.4.0) # It can be safely deleted when mmdet3d updates the numpy version # NOTE(amadeuszsz): patches for torch.load can be removed after mmlab's PyTorch 2.6+ support From 5132269156b4c754fbbeffafe22633c9c0ef4807 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Mon, 9 Feb 2026 19:32:49 +0900 Subject: [PATCH 16/22] Update configs --- .../deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py index e14f34597..731da107e 100644 --- a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py +++ b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py @@ -12,7 +12,7 @@ ) depth_bins = 129 -feature_dims = (60, 80) +feature_dims = (48, 96) # image_dims = (640, 576) backend_config = dict( From 1c9b1c016e46641b41cc78952bf687b3cd84a72e Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:23:02 +0900 Subject: [PATCH 17/22] Add TODO for feature_dims --- .../deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py index 731da107e..1b3945e27 100644 --- a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py +++ b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py @@ -1,4 +1,11 @@ # Deploy camera with lidar inputs +_base_ = [ + "../default/pipelines/default_camera_lidar_intensity_120m.py", + "../default/models/default_camera_swin_fpn_120m.py", + "../default/schedulers/default_30e_4xb8_adamw_linear_cosine.py", + "../default/default_misc.py", +] + codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end") @@ -12,6 +19,7 @@ ) depth_bins = 129 +# TODO(KokSeang): Read this parameter from a base config feature_dims = (48, 96) # image_dims = (640, 576) From b38a970f710e0187e91b2ac78e3a187977063468 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:26:00 +0900 Subject: [PATCH 18/22] Raise ValueError if num_proposal not found --- projects/BEVFusion/deploy/exporter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index 0ce9469d9..83fa58e9b 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -258,7 +258,9 @@ def _fix_onnx_graph(self) -> None: topk_nodes = [node for node in graph.nodes if node.op == "TopK"] assert len(topk_nodes) == 1 topk = topk_nodes[0] - k = self.setup_configs.model_cfg.num_proposals + k = self.setup_configs.model_cfg.get("num_proposals", None) + if k is None: + raise ValueError(f"num_proposals is not found in the model configs!") topk.inputs[1] = gs.Constant("K", values=np.array([k], dtype=np.int64)) topk.outputs[0].shape = [1, k] topk.outputs[0].dtype = topk.inputs[0].dtype if topk.inputs[0].dtype else np.float32 From 82e1e2948cc773c02928293a45bcd21663f59a73 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:27:35 +0900 Subject: [PATCH 19/22] Remove _network from param --- .../deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py | 2 +- projects/BEVFusion/deploy/exporter.py | 6 +++--- projects/BEVFusion/deploy/torch2onnx.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py index 1b3945e27..e947dbd92 100644 --- a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py +++ b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py @@ -68,7 +68,7 @@ export_params=True, keep_initializers_as_inputs=False, opset_version=17, - save_file="camera_point_bev_network.onnx", + save_file="camera_point_bev.onnx", input_names=["points", "lidar2image", "img_aug_matrix", "geom_feats", "kept", "ranks", "indices", "image_feats"], output_names=["bbox_pred", "score", "label_pred"], dynamic_axes={ diff --git a/projects/BEVFusion/deploy/exporter.py b/projects/BEVFusion/deploy/exporter.py index 83fa58e9b..1cccf22bc 100644 --- a/projects/BEVFusion/deploy/exporter.py +++ b/projects/BEVFusion/deploy/exporter.py @@ -74,8 +74,8 @@ def _export_model( return # Export the camera bev only network - if self.setup_configs.module == "camera_bev_only_network": - self._export_camera_bev_only_network( + if self.setup_configs.module == "camera_bev_only": + self._export_camera_bev_only( model_data=model_data, ir_configs=ir_configs, patched_model=patched_model, image_feats=image_feats ) @@ -129,7 +129,7 @@ def _export_image_backbone( self.logger.info(f"Converted Image backbone") return image_feats - def _export_camera_bev_only_network( + def _export_camera_bev_only( self, model_data: ModelData, ir_configs: dict, diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index 87085b875..fd2bb4d2e 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -26,7 +26,7 @@ def parse_args(): help="module to export", required=True, default="main_body", - choices=["main_body", "image_backbone", "camera_bev_only_network"], + choices=["main_body", "image_backbone", "camera_bev_only"], ) args = parser.parse_args() return args From 8b60d109cfb843a1827778a4f8c5d3a7f5ddb58b Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:28:35 +0900 Subject: [PATCH 20/22] Remove double commenting --- projects/BEVFusion/deploy/torch2onnx.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/projects/BEVFusion/deploy/torch2onnx.py b/projects/BEVFusion/deploy/torch2onnx.py index fd2bb4d2e..1a12aa5dc 100644 --- a/projects/BEVFusion/deploy/torch2onnx.py +++ b/projects/BEVFusion/deploy/torch2onnx.py @@ -44,8 +44,8 @@ def parse_args(): args.sample_idx, args.module, ) - # # Build the exporter + # Build the exporter exporter = Torch2OnnxExporter(setup_config, args.log_level) - # # Export the model + # Export the model exporter.export() From 78f43374339c833c7fa41944dfd70f5febdb4596 Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:32:53 +0900 Subject: [PATCH 21/22] Update readme --- projects/BEVFusion/README.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/projects/BEVFusion/README.md b/projects/BEVFusion/README.md index 248cb2515..6586b923c 100644 --- a/projects/BEVFusion/README.md +++ b/projects/BEVFusion/README.md @@ -173,6 +173,35 @@ python projects/BEVFusion/deploy/torch2onnx.py \ ``` +To export a camera-only model, please use the following command: + +```bash +DEPLOY_CFG_MAIN_BODY=configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py.py +DEPLOY_CFG_IMAGE_BACKBONE=configs/deploy/bevfusion_camera_backbone_tensorrt_dynamic.py + +MODEL_CFG=... +CHECKPOINT_PATH=... +WORK_DIR=... + +python projects/BEVFusion/deploy/torch2onnx.py \ + ${DEPLOY_CFG_MAIN_BODY} \ + ${MODEL_CFG} \ + ${CHECKPOINT_PATH} \ + --device cuda:0 \ + --work-dir ${WORK_DIR} \ + --module camera_bev_only_network + + +python projects/BEVFusion/deploy/torch2onnx.py \ + ${DEPLOY_CFG_IMAGE_BACKBONE} \ + ${MODEL_CFG} \ + ${CHECKPOINT_PATH} \ + --device cuda:0 \ + --work-dir ${WORK_DIR} \ + --module image_backbone +``` +Note that this camera-only model takes lidar pointclouds as an input for a depth map, and we will release a model without lidar pointclouds in another release. + This will generate two models in the `WORK_DIR` folder. `end2end.onnx` corresponds to the standard exported model ,whereas `end2end_fixed.onnx` contains a fix for the `TopK` operator (compatibility issues between `mmdeploy` and `TensorRT`). ## TODO From 7e3a271a498b88603f1e282358f3fcc751645a6f Mon Sep 17 00:00:00 2001 From: Kok Seang Tan Date: Fri, 13 Feb 2026 19:37:36 +0900 Subject: [PATCH 22/22] Remove unnecessary model impoprt --- .../bevfusion_camera_point_bev_tensorrt_dynamic.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py index e947dbd92..c9b1a9475 100644 --- a/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py +++ b/projects/BEVFusion/configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py @@ -1,12 +1,3 @@ -# Deploy camera with lidar inputs -_base_ = [ - "../default/pipelines/default_camera_lidar_intensity_120m.py", - "../default/models/default_camera_swin_fpn_120m.py", - "../default/schedulers/default_30e_4xb8_adamw_linear_cosine.py", - "../default/default_misc.py", -] - - codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end") custom_imports = dict(