Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
9bb04ce
Update docker installation commit for perception_evaluation
KSeangTan Jan 14, 2026
ebee4a0
Merge branch 'tier4:main' into main
KSeangTan Jan 14, 2026
775f1d0
Resolve merge conflicts
KSeangTan Jan 20, 2026
edb373a
Merge branch 'tier4:main' into main
KSeangTan Jan 20, 2026
a358886
Merge branch 'tier4:main' into main
KSeangTan Jan 20, 2026
be329c4
Merge branch 'tier4:main' into main
KSeangTan Jan 21, 2026
6b81116
Merge branch 'tier4:main' into main
KSeangTan Jan 23, 2026
3e3c09d
Merge branch 'tier4:main' into main
KSeangTan Jan 29, 2026
9de5ca0
Add them temp
KSeangTan Feb 2, 2026
fdeaa33
Merge branch 'tier4:main' into main
KSeangTan Feb 9, 2026
33fc04c
Merge branch 'main' into feat/export_bevfusion_camera
KSeangTan Feb 9, 2026
e16ddee
Fix broken bevfusion exporter script
KSeangTan Feb 9, 2026
f4b02c6
Fix broken bevfusion exporter script
KSeangTan Feb 9, 2026
aa8c322
Fix broken bevfusion exporter script
KSeangTan Feb 9, 2026
a4bb4d6
Fix import statements
KSeangTan Feb 9, 2026
7fadaf2
Fix import statements
KSeangTan Feb 9, 2026
a39a141
Fix import statements
KSeangTan Feb 9, 2026
cc67c65
Fix import statements
KSeangTan Feb 9, 2026
3e3e60a
Fix import statements
KSeangTan Feb 9, 2026
1b4b1f2
Fix camera outputs
KSeangTan Feb 9, 2026
a485b79
Fix camera outputs
KSeangTan Feb 9, 2026
69590d2
Add docstring
KSeangTan Feb 9, 2026
1c0a307
Add docstring
KSeangTan Feb 9, 2026
29a00af
Add docstring
KSeangTan Feb 9, 2026
9b71536
Merge branch 'tier4:main' into main
KSeangTan Feb 9, 2026
b208d49
Merge branch 'main' into feat/export_bevfusion_camera
KSeangTan Feb 9, 2026
5132269
Update configs
KSeangTan Feb 9, 2026
1c9b1c0
Add TODO for feature_dims
KSeangTan Feb 13, 2026
b38a970
Raise ValueError if num_proposal not found
KSeangTan Feb 13, 2026
82e1e29
Remove _network from param
KSeangTan Feb 13, 2026
8b60d10
Remove double commenting
KSeangTan Feb 13, 2026
78f4337
Update readme
KSeangTan Feb 13, 2026
7e3a271
Remove unnecessary model impoprt
KSeangTan Feb 13, 2026
e991644
Merge branch 'main' into feat/export_bevfusion_camera
KSeangTan Feb 13, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions projects/BEVFusion/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,35 @@ python projects/BEVFusion/deploy/torch2onnx.py \

```

To export a camera-only model, please use the following command:

```bash
DEPLOY_CFG_MAIN_BODY=configs/deploy/bevfusion_camera_point_bev_tensorrt_dynamic.py.py
DEPLOY_CFG_IMAGE_BACKBONE=configs/deploy/bevfusion_camera_backbone_tensorrt_dynamic.py

MODEL_CFG=...
CHECKPOINT_PATH=...
WORK_DIR=...

python projects/BEVFusion/deploy/torch2onnx.py \
${DEPLOY_CFG_MAIN_BODY} \
${MODEL_CFG} \
${CHECKPOINT_PATH} \
--device cuda:0 \
--work-dir ${WORK_DIR} \
--module camera_bev_only_network


python projects/BEVFusion/deploy/torch2onnx.py \
${DEPLOY_CFG_IMAGE_BACKBONE} \
${MODEL_CFG} \
${CHECKPOINT_PATH} \
--device cuda:0 \
--work-dir ${WORK_DIR} \
--module image_backbone
```
Note that this camera-only model takes lidar pointclouds as an input for a depth map, and we will release a model without lidar pointclouds in another release.

This will generate two models in the `WORK_DIR` folder. `end2end.onnx` corresponds to the standard exported model ,whereas `end2end_fixed.onnx` contains a fix for the `TopK` operator (compatibility issues between `mmdeploy` and `TensorRT`).

## TODO
Expand Down
2 changes: 1 addition & 1 deletion projects/BEVFusion/bevfusion/bevfusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def extract_feat(
)
features.append(img_feature)

if points is not None and self.pts_middle_encoder is not None:
if self.pts_middle_encoder is not None:
pts_feature = self.extract_pts_feat(
batch_inputs_dict.get("voxels", {}).get("voxels", None),
batch_inputs_dict.get("voxels", {}).get("coors", None),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end")

custom_imports = dict(
imports=[
"projects.BEVFusion.deploy",
"projects.BEVFusion.bevfusion",
"projects.SparseConvolution",
],
allow_failed_imports=False,
)

depth_bins = 129
# TODO(KokSeang): Read this parameter from a base config
feature_dims = (48, 96)
# image_dims = (640, 576)

backend_config = dict(
type="tensorrt",
common_config=dict(max_workspace_size=1 << 32),
model_inputs=[
dict(
input_shapes=dict(
# TODO(TIERIV): Optimize. Now, using points will increase latency significantly
points=dict(min_shape=[5000, 4], opt_shape=[50000, 4], max_shape=[200000, 4]),
lidar2image=dict(min_shape=[1, 4, 4], opt_shape=[6, 4, 4], max_shape=[6, 4, 4]),
img_aug_matrix=dict(min_shape=[1, 4, 4], opt_shape=[6, 4, 4], max_shape=[6, 4, 4]),
geom_feats=dict(
min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1], 4],
opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2, 4],
max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1], 4],
),
kept=dict(
min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]],
opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]],
max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]],
),
ranks=dict(
min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]],
opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2],
max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]],
),
indices=dict(
min_shape=[0 * depth_bins * feature_dims[0] * feature_dims[1]],
opt_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1] // 2],
max_shape=[6 * depth_bins * feature_dims[0] * feature_dims[1]],
),
image_feats=dict(
min_shape=[0, 256, feature_dims[0], feature_dims[1]],
opt_shape=[6, 256, feature_dims[0], feature_dims[1]],
max_shape=[6, 256, feature_dims[0], feature_dims[1]],
),
)
)
],
)

onnx_config = dict(
type="onnx",
export_params=True,
keep_initializers_as_inputs=False,
opset_version=17,
save_file="camera_point_bev.onnx",
input_names=["points", "lidar2image", "img_aug_matrix", "geom_feats", "kept", "ranks", "indices", "image_feats"],
output_names=["bbox_pred", "score", "label_pred"],
dynamic_axes={
"points": {
0: "num_points",
},
"lidar2image": {
0: "num_imgs",
},
"img_aug_matrix": {
0: "num_imgs",
},
"geom_feats": {
0: "num_kept",
},
"kept": {
0: "num_geom_feats",
},
"ranks": {
0: "num_kept",
},
"indices": {
0: "num_kept",
},
"image_feats": {
0: "num_imgs",
},
},
input_shape=None,
verbose=True,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
codebase_config = dict(type="mmdet3d", task="VoxelDetection", model_type="end2end")

custom_imports = dict(
imports=[
"projects.BEVFusion.deploy",
"projects.BEVFusion.bevfusion",
"projects.SparseConvolution",
],
allow_failed_imports=False,
)

backend_config = dict(
type="tensorrt",
common_config=dict(max_workspace_size=1 << 32),
model_inputs=[
dict(
input_shapes=dict(
voxels=dict(
min_shape=[1, 10, 5], opt_shape=[64000, 10, 5], max_shape=[256000, 10, 5]
), # [M, maximum number of points, features] features=5 when using intensity
coors=dict(min_shape=[1, 3], opt_shape=[64000, 3], max_shape=[256000, 3]),
num_points_per_voxel=dict(min_shape=[1], opt_shape=[64000], max_shape=[256000]),
)
)
],
)

onnx_config = dict(
type="onnx",
export_params=True,
keep_initializers_as_inputs=False,
opset_version=17,
save_file="main_body.onnx",
input_names=["voxels", "coors", "num_points_per_voxel"],
output_names=["bbox_pred", "score", "label_pred"],
dynamic_axes={
"voxels": {
0: "voxels_num",
},
"coors": {
0: "voxels_num",
},
"num_points_per_voxel": {
0: "voxels_num",
},
},
input_shape=None,
verbose=True,
)
6 changes: 1 addition & 5 deletions projects/BEVFusion/deploy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
from . import base
from .voxel_detection import VoxelDetection

__all__ = [
"base",
"VoxelDetection",
]
__all__ = ["VoxelDetection"]
Loading