diff --git a/projects/BEVFusion/Dockerfile b/projects/BEVFusion/Dockerfile new file mode 100644 index 000000000..589bbc5ee --- /dev/null +++ b/projects/BEVFusion/Dockerfile @@ -0,0 +1,27 @@ +ARG AWML_BASE_IMAGE="ghcr.io/tier4/autoware-ml-base:latest" +FROM ${AWML_BASE_IMAGE} + +# Install SparseConv +RUN python3 -m pip --no-cache-dir install \ + spconv-cu120 + +# Install the libraries for onnx deployment +RUN python3 -m pip --no-cache-dir install \ + onnx==1.17.0 \ + onnx_simplifier==0.4.8 \ + onnxsim==0.4.10 \ + onnxruntime==1.13.1 \ + shapely==1.8.0 + +RUN python3 -m pip install --no-cache-dir \ + --index-url https://pypi.org/simple \ + --extra-index-url https://pypi.ngc.nvidia.com \ + --extra-index-url https://pypi.nvidia.com \ + onnx-graphsurgeon \ + pytorch-quantization + +RUN python3 -m pip --no-cache-dir install \ + numpy==1.22 + +WORKDIR /workspace +RUN pip install --no-cache-dir -e . diff --git a/projects/BEVFusion/README.md b/projects/BEVFusion/README.md index bbe2697a8..ba7bfd17c 100644 --- a/projects/BEVFusion/README.md +++ b/projects/BEVFusion/README.md @@ -20,9 +20,19 @@ - BEVFusion-L - v0 - [BEVFusion-L base/0.X](./docs/BEVFusion-L/v0/base.md) + - v1 + - [BEVFusion-L base/1.X](./docs/BEVFusion-L/v1/base.md) + - v2 + - [BEVFusion-L base/2.X](./docs/BEVFusion-L/v2/base.md) + - [BEVFusion-L j6gen2/2.X](./docs/BEVFusion-L/v2/j6gen2.md) - BEVFusion-CL - v0 - [BEVFusion-CL base/0.X](./docs/BEVFusion-CL/v0/base.md) + - v1 + - [BEVFusion-CL base/1.X](./docs/BEVFusion-CL/v1/base.md) + - v2 + - [BEVFusion-CL base/2.X](./docs/BEVFusion-CL/v2/base.md) + - [BEVFusion-CL j6gen2/2.X](./docs/BEVFusion-CL/v2/j6gen2.md) - BEVFusion-L-offline - v0 - [BEVFusion-L-offline base/0.X](./docs/BEVFusion-L-offline/v0/base.md) @@ -30,33 +40,28 @@ - v0 - [BEVFusion-CL-offline base/0.X](./docs/BEVFusion-CL-offline/v0/base.md) - ## Get started ### 1. Setup - Please follow the [installation tutorial](/docs/tutorial/tutorial_detection_3d.md)to set up the environment. -- Run docker +- Docker build for BEVFusion ```sh -docker run -it --rm --gpus all --shm-size=64g --name awml -p 6006:6006 -v $PWD/:/workspace -v $PWD/data:/workspace/data autoware-ml +docker build -t awml-bevfusion projects/BEVFusion/ ``` -- Build and install dependencies (required only the first time) +- Run docker ```sh -python projects/BEVFusion/setup.py develop +docker run -it --rm --gpus all --shm-size=64g --name awml -p 6006:6006 -v $PWD/:/workspace -v $PWD/data:/workspace/data awml-bevfusion ``` -- (Choice) Install traveller59's sparse convolutions backend - -By default, mmcv's backend will be used, but the commonly adopted backend is traveller59's, which is also includes deployment concerns in its design such as memory allocation. For this reason it is highly recommended to install it: +- Build and install dependencies (required only the first time) -```bash -pip install spconv-cu120 +```sh +python projects/BEVFusion/setup.py develop ``` -`AWML` will automatically select this implementation if the dependency is installed. - ### 2. Train #### 2.1. Train the LiDAR-only model first @@ -126,7 +131,6 @@ bash tools/detection3d/dist_script.sh projects/BEVFusion/configs/t4dataset/bevfu ``` ### 4. Deployment - #### 4.1. Sparse convolutions support Sparse convolutions are not deployable by default. In the [deployment](configs/deploy/bevfusion_lidar_tensorrt_dynamic.py) we follow the instructions found in the [SparseConvolution](../SparseConvolution/README.md) project to enable this feature. @@ -142,8 +146,6 @@ We provide three general deploy config files: - [main-body](configs/deploy/bevfusion_main_body_with_image_tensorrt_dynamic.py) - [image-backbone](configs/deploy/bevfusion_camera_backbone_tensorrt_dynamic.py) - - To export an ONNX, use the following command: ```bash @@ -162,7 +164,6 @@ python projects/BEVFusion/deploy/torch2onnx.py \ --work-dir ${WORK_DIR} --module main_body - python projects/BEVFusion/deploy/torch2onnx.py \ ${DEPLOY_CFG_IMAGE_BACKBONE} \ ${MODEL_CFG} \ @@ -182,6 +183,12 @@ This will generate two models in the `WORK_DIR` folder. `end2end.onnx` correspon - Fix BEVFusion ROIs for t4dataset - Add self-supervised loss +## Trouble shooting +### Sparse convolutions + +If you use mmcv's backend, the commonly adopted backend is traveller59's, which is also includes deployment concerns in its design such as memory allocation. +For this reason, in our Dockerfile, we use `spconv-cu120`. + ## Reference - [BEVFusion of mmdetection3d](https://github.com/open-mmlab/mmdetection3d/tree/v1.4.0/projects/BEVFusion) diff --git a/projects/BEVFusion/requirements.txt b/projects/BEVFusion/requirements.txt deleted file mode 100644 index e78d1ba1d..000000000 --- a/projects/BEVFusion/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# Onnx export -onnx==1.17.0 -onnx_simplifier==0.4.8 -onnxsim==0.4.10 -onnxruntime==1.13.1 ---extra-index-url https://pypi.ngc.nvidia.com -onnx-graphsurgeon -pytorch-quantization -shapely==1.8.0 -numpy==1.22 diff --git a/projects/BEVFusion/setup.py b/projects/BEVFusion/setup.py index 41d24eb09..647a72178 100644 --- a/projects/BEVFusion/setup.py +++ b/projects/BEVFusion/setup.py @@ -42,7 +42,6 @@ def make_cuda_ext(name, module, sources, sources_cuda=[], extra_args=[], extra_i name="bev_pool", install_requires=[ "onnx_graphsurgeon==0.5.8", - "spconv-cu120==2.3.6", ], ext_modules=[ make_cuda_ext(