diff --git a/.clang-format b/.clang-format
index 1bd3afb..8960d1c 100644
--- a/.clang-format
+++ b/.clang-format
@@ -10,7 +10,7 @@ IndentWidth: 2
# 连续的空行保留几行
MaxEmptyLinesToKeep: 1
# 圆括号的换行方式
-BreakBeforeBraces: Custom
+BreakBeforeBraces: Custom
# 是否允许短方法单行
AllowShortFunctionsOnASingleLine: false
# 支持一行的if
@@ -81,4 +81,4 @@ BraceWrapping:
# 分离空语句
SplitEmptyRecord: false
# 分离空命名空间
- SplitEmptyNamespace: false
\ No newline at end of file
+ SplitEmptyNamespace: false
diff --git a/.github/workflows/Compilation.yml b/.github/workflows/Compilation.yml
index 00d17ad..ce4854b 100644
--- a/.github/workflows/Compilation.yml
+++ b/.github/workflows/Compilation.yml
@@ -12,7 +12,7 @@ jobs:
name: 'NvidiaGpuTensorRT'
runs-on: ubuntu-latest
container:
- image: ghcr.io/teddywesside1/easy_deploy_base_dev:nvidia_gpu_tensorrt_u2204
+ image: ghcr.io/zz990099/easy_deploy_tool:nvidia_gpu_trt10_u2204
options:
--privileged
--ipc host
@@ -20,11 +20,14 @@ jobs:
-w /workspace
steps:
- uses: actions/checkout@v4
+ with:
+ submodules: recursive
+
- name: Compile Codes
run:
- cd /workspace &&
- mkdir build && cd build &&
- cmake .. -DBUILD_TESTING=ON -DENABLE_TENSORRT=ON -DENABLE_ORT=ON &&
+ cd /workspace &&
+ mkdir build && cd build &&
+ cmake .. -DBUILD_TESTING=ON -DENABLE_TENSORRT=ON -DENABLE_ORT=ON &&
make -j
rk3588:
@@ -33,10 +36,12 @@ jobs:
permissions:
contents: read
packages: read # 访问容器镜像仓库的权限
-
+
steps:
- name: Checkout code
uses: actions/checkout@v4
+ with:
+ submodules: recursive
# 启用 QEMU 虚拟化支持
- name: Set up QEMU
@@ -62,7 +67,7 @@ jobs:
--name easy_deploy_container \
--platform linux/arm64 \
-v "$PWD:/workspace" \
- ghcr.io/teddywesside1/easy_deploy_base_dev:rknn_u2204 \
+ ghcr.io/zz990099/easy_deploy_tool:rknn_230_u2204 \
tail -f /dev/null
# ========== 编译执行阶段 ==========
diff --git a/.github/workflows/Lint.yml b/.github/workflows/Lint.yml
new file mode 100644
index 0000000..8ae99c2
--- /dev/null
+++ b/.github/workflows/Lint.yml
@@ -0,0 +1,26 @@
+name: pre-commit Checks
+
+on:
+ pull_request: # 在 PR 时触发
+ push: # 在推送代码到 main/master 分支时触发
+ branches: [main, master]
+
+jobs:
+ pre-commit:
+ name: Run pre-commit checks
+ runs-on: ubuntu-latest # 使用 Ubuntu 环境
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4 # 检出代码
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10" # 指定 Python 版本
+
+ - name: Install pre-commit
+ run: pip install pre-commit # 安装 pre-commit
+
+ - name: Run pre-commit checks
+ run: pre-commit run --all-files # 运行所有检查
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..8a66214
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "easy_deploy_tool"]
+ path = easy_deploy_tool
+ url = git@github.com:zz990099/EasyDeployTool.git
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..9f43cc2
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,28 @@
+repos:
+ # 官方仓库中的基础钩子
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v5.0.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-ast
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-xml
+ - id: check-yaml
+ args: ["--allow-multiple-documents"]
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+ exclude_types: [rst]
+ - id: fix-byte-order-marker
+
+ # 运行 Uncrustify 格式化 C/C++ 代码
+ - repo: https://github.com/pre-commit/mirrors-clang-format
+ rev: "v20.1.0" # 指定 clang-format 版本
+ hooks:
+ - id: clang-format
+ name: clang-format (check)
+ args: [--style=file, --dry-run, --Werror] # 检查模式
+ types: [c, c++]
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d8a9759..28df3a8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,18 +2,11 @@ cmake_minimum_required(VERSION 3.8)
project(easy_deployment)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
-set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
-set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
-
-add_subdirectory(deploy_core)
-add_subdirectory(deploy_utils)
+if (BUILD_TESTING)
+ enable_testing()
+endif()
-add_subdirectory(inference_core)
+add_subdirectory(easy_deploy_tool)
add_subdirectory(detection_2d)
-
add_subdirectory(sam)
-
-if (BUILD_TESTING)
- add_subdirectory(simple_tests)
-endif()
\ No newline at end of file
diff --git a/README.md b/README.md
index 0b181c3..1fc4372 100644
--- a/README.md
+++ b/README.md
@@ -1,49 +1,20 @@
# EasyDeploy
-
-
-
## About The Project
-The engineering deployment of deep learning algorithms relies on various inference frameworks, which often differ significantly from one another. These differences lead to low deployment and migration efficiency, especially when there is a need to support multiple hardware platforms.
+This project implements several common detection and segmentation algorithms using the [EasyDeployTool](https://github.com/zz990099/EasyDeployTool) library, supporting `TensorRT`, `OnnxRuntime`, and `RKNN` inference frameworks.
-The `EasyDeploy` project aims to address these challenges in two key ways:
+### Models and Inference Frameworks Supported
-1. **Abstracting inference framework functionalities**: By decoupling the pre-processing and post-processing procedures of algorithms from the inference process of deep learning models, `EasyDeploy` enables rapid deployment and migration of algorithms across multiple inference frameworks and hardware platforms.
-
-2. **Asynchronous inference pipeline**: The project implements an asynchronous inference workflow, which significantly improves model inference throughput on platforms that support multi-core parallel inference.
-
-### Features
-
-1. Abstracting inference framework (hardware platform) characteristics to enable efficient algorithm deployment and migration.
-
-2. Asynchronous inference pipeline to improve workflow throughput.
-
-3. Supporting segmented and distributed model inference, enabling asynchronous inference across devices such as CPU, GPU and NPU.
-
-### Models and Inference Frameworks Supported
-
-- **Deployed Inference Frameworks**:
- 1. TensorRT
- 2. ONNX-Runtime
+- **Deployed Inference Frameworks**:
+ 1. TensorRT
+ 2. ONNX-Runtime
3. RKNN
-- **Deployed Algorithms**:
- 1. YOLOv8
- 2. RT-DETR
- 3. MobileSAM
+- **Deployed Algorithms**:
+ 1. YOLOv8
+ 2. RT-DETR
+ 3. MobileSAM
4. NanoSAM
## Demo test Results
@@ -105,46 +76,63 @@ The following test results showcase the visualized reasoning outcomes of the alg
## Getting Started
-### Dependency
-
-- The `EasyDeploy` project is entirely written in C++ and built using the CMake tool. It relies on the following dependencies:
- - **OpenCV**
- - **CMake**
- - **glog**
- - **GoogleTest**
- - Specific dependencies for each **inference framework**
-
-### Environment Build
-- Follow [EnvironmentSetup](doc/EnviromentSetup.md) to setup enviroment with scripts quickly.
-
-## What You Could Do With This Project
-
-EasyDeploy aims to minimize the impact of inference framework-specific characteristics on the deployment of deep learning algorithms. To achieve this, we have developed an abstract base class named BaseInferCore and created specialized base classes for certain types of algorithms, such as 2D detection and instance segmentation.
-
-Additionally, EasyDeploy provides an asynchronous inference pipeline to further enhance deployment efficiency.
-
-With these features, EasyDeploy offers the following capabilities:
-
-- **Direct use of pre-implemented algorithms**:
- - If you need to directly use algorithms such as YOLOv8, RT-DETR, MobileSAM or NanoSAM, EasyDeploy has already implemented and optimized their deployment.
- - [QuickStart](doc/QuickStart.md) may help.
-
-- **Deploying a new algorithm efficiently**:
- - If you need to deploy a new algorithm without worrying about the specific implementation details of inference frameworks, or if you want to easily migrate your algorithm to other inference frameworks, the BaseInferCore abstract base class can help you quickly implement and migrate the algorithm.
- - [HowToDeployModels](doc/HowToDeployModels.md) may help.
-
-- **Migrating algorithms to a new inference framework**:
- - If you want to migrate algorithms based on BaseInferCore to a new inference framework, implementing a subclass of BaseInferCore will allow you to migrate all algorithms to the new framework with ease.
- - [HowToDeployModels](doc/HowToDeployModels.md) may help.
-
-- **Improving inference throughput**:
- - If you need to increase the throughput of algorithm inference, EasyDeploy provides an asynchronous inference pipeline. For certain algorithm types (e.g., 2D detection, SAM), asynchronous base classes are already available, enabling you to boost the throughput of your models with minimal effort.
-
-- **Segmented distributed asynchronous inference**:
- - If you need to implement simple segmented, distributed, asynchronous inference for algorithms, the abstract base classes and asynchronous pipeline features provided in EasyDeploy make it easy to achieve this functionality.
-
-## Todo
-
-- [ ] Use `rga` library on rk3588 to do image pre-process.
-- [x] Update ONNX-Runtime inference core code and test it out.
-- [x] Test TensorRT inference framework on jetson platform.
+### Download Project
+
+Clone the git repository:
+```bash
+git clone git@github.com:zz990099/EasyDeploy.git
+cd EasyDeploy
+git submodule init && git submodule update
+```
+
+### Build Enviroment
+
+Set up the working environment using Docker:
+```bash
+cd EasyDeploy
+bash easy_deploy_tool/docker/easy_deploy_startup.sh
+# Select `jetson` -> `trt10_u2204`/`trt8_u2204`
+bash easy_deploy_tool/docker/into_docker.sh
+```
+
+### Compile Codes
+
+Inside the Docker container, compile the project. Use the `-DENABLE_*` flags to enable specific inference frameworks. Available options: `-DENABLE_TENSORRT=ON`, `-DENABLE_ORT=ON`, `-DENABLE_RKNN=ON` (compatible with each other).
+```bash
+cd /workspace
+mdkir build && cd build
+cmake .. -DBUILD_TESTING=ON -DENABLE_TENSORRT=ON # -DENABLE_RKNN=ON
+make -j
+```
+
+### Convert Model
+
+1. Download models from [Google Drive](https://drive.google.com/drive/folders/1yVEOzo59aob_1uXwv343oeh0dTKuHT58?usp=drive_link) and place them in `/workspace/models/`.
+
+2. Inside the Docker container, run the model conversion script:
+```bash
+cd /workspace
+bash tools/cvt_onnx2trt.sh
+# bash tools/cvt_onnx2rknn_all.sh
+```
+
+### Run Test Cases
+
+1. Execute test cases (refer to the code for details):
+```bash
+cd /workspace/build
+# List available test cases
+ctest -N
+# List available test cases
+ctest
+```
+
+## References
+
+- [ultralytics](https://github.com/ultralytics/ultralytics)
+- [ultralytics-rknn](https://github.com/airockchip/ultralytics_yolov8/blob/main/RKOPT_README.md)
+- [rt-detr](https://github.com/lyuwenyu/RT-DETR)
+- [sam](https://github.com/facebookresearch/segment-anything)
+- [mobilesam](https://github.com/ChaoningZhang/MobileSAM)
+- [nanosam](https://github.com/NVIDIA-AI-IOT/nanosam)
+- [EasyDeployTool](https://github.com/zz990099/EasyDeployTool)
diff --git a/deploy_core/CMakeLists.txt b/deploy_core/CMakeLists.txt
deleted file mode 100644
index 6b92bec..0000000
--- a/deploy_core/CMakeLists.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-cmake_minimum_required(VERSION 3.8)
-project(deploy_core)
-
-add_compile_options(-std=c++17)
-add_compile_options(-O3 -Wextra -Wdeprecated -fPIC)
-set(CMAKE_CXX_STANDARD 17)
-
-
-find_package(OpenCV REQUIRED)
-find_package(glog REQUIRED)
-
-include_directories(
- include
- ${OpenCV_INCLUDE_DIRS}
-)
-
-set(source_file src/base_infer_core.cpp
- src/base_detection.cpp
- src/base_sam.cpp
-)
-
-add_library(${PROJECT_NAME} SHARED ${source_file})
-
-
-target_link_libraries(${PROJECT_NAME} PUBLIC
- ${OpenCV_LIBS}
- glog::glog
-)
-
-install(TARGETS ${PROJECT_NAME}
- LIBRARY DESTINATION lib)
-
-target_include_directories(${PROJECT_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/include)
diff --git a/deploy_core/README.md b/deploy_core/README.md
deleted file mode 100644
index 6f71793..0000000
--- a/deploy_core/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Deploy Core
-
-The `DeployCore` module defines the abstract functionalities for all components, including core inference capabilities, 2D detection features, SAM functionalities, and more. Beyond merely defining abstract functions, DeployCore also provides external encapsulations for certain algorithms. When implementing an algorithm, developers only need to focus on completing the key processes outlined in these definitions to achieve seamless algorithm deployment.
-
-## Functionality
-
-`DeployCore` is designed to provide abstract interface definitions for the functionalities of all modules, as well as abstract base classes containing reusable code.
-
-- Abstract core inference functionality: `BaseInferCore`
-- Abstract 2D detection functionality: `BaseDetection2DModel`
-- Abstract SAM functionality: `BaseSamModel`
-- Plug-and-play asynchronous pipeline base class: `BaseAsyncPipeline`
-
-## Structure
-
-The entire project code is divided into three parts:
- 1. Abstract interface classes for functional modules
- 2. Abstract base classes for certain functional modules
- 3. Base classes for the asynchronous inference pipeline framework
-
-code structure:
- ```bash
- deploy_core
- |-- CMakeLists.txt
- |-- README.md
- |-- include
- | `-- deploy_core
- | |-- base_infer_core.h
- | |-- base_detection.h
- | |-- base_sam.h
- | |-- async_pipeline.h
- | |-- async_pipeline_impl.h
- | |-- block_queue.h
- | |-- common_defination.h
- | `-- wrapper.h
- `-- src
- |-- base_detection.cpp
- |-- base_infer_core.cpp
- `-- base_sam.cpp
- ```
-
-
- - Abstract interface classes for functional modules
- ```bash
- |-- base_infer_core.h
- |-- base_detection.h
- |-- base_sam.h
- ```
- 1. **`base_infer_core.h`**: Defines the core inference functionalities and related abstract classes, while also providing an abstract base class for the foundational features of the inference core module.
- 2. **`base_detection.h`**: Defines the abstract base class for 2D detection functionalities.
- 3. **`base_sam.h`**: Defines the abstract base class for SAM functionalities.
-
- - Base classes for the asynchronous inference pipeline framework
- ```bash
- |-- async_pipeline.h
- |-- async_pipeline_impl.h
- |-- block_queue.h
- |-- common_defination.h
- `-- wrapper.h
- ```
- 1. **`async_pipeline.h`** and **`async_pipeline_impl.h`**: Define the asynchronous inference framework and its implementation.
- 2. **`block_queue.h`**: Implements the blocking queue.
- 3. **`common_defination.h`**: Contains common definitions, such as 2D bounding boxes.
- 4. **`wrapper.h`**: Provides wrappers for certain classes, such as the encapsulation of OpenCV's `cv::Mat` format.
-
-
-## TODO
-
diff --git a/deploy_core/include/deploy_core/async_pipeline.h b/deploy_core/include/deploy_core/async_pipeline.h
deleted file mode 100644
index 8593c78..0000000
--- a/deploy_core/include/deploy_core/async_pipeline.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:00:38
- * @LastEditTime: 2024-11-26 22:29:22
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/async_pipeline.h
- */
-#ifndef ___DEPLOY_CORE_ASYNC_PIPELINE_H
-#define ___DEPLOY_CORE_ASYNC_PIPELINE_H
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-#include "deploy_core/async_pipeline_impl.h"
-#include "deploy_core/blob_buffer.h"
-#include "deploy_core/block_queue.h"
-
-namespace async_pipeline {
-
-/**
- * @brief A abstract class of image data. Needed by pipeline processing. Useful when data is
- * on device or other location which host cant read/write data directly. Could enable the
- * zero-copy feature if needed.
- *
- */
-class IPipelineImageData {
-public:
- struct ImageDataInfo {
- uint8_t *data_pointer;
- int image_height;
- int image_width;
- int image_channels;
- DataLocation location;
- ImageDataFormat format;
- };
- virtual const ImageDataInfo &GetImageDataInfo() const = 0;
-
-protected:
- virtual ~IPipelineImageData() = default;
-};
-
-/**
- * @brief The basic unit pointer which is parsed in the pipeline processing.
- *
- */
-class IPipelinePackage {
-public:
- /**
- * @brief `GetInferBuffer` provides the interface to get the blobs buffer instance
- * which will be used to deploy inference. Case the algorithm may need multiple inference
- * core and multiple blobs buffer to complete the whole processing.
- *
- * @return std::shared_ptr
- */
- virtual std::shared_ptr GetInferBuffer() = 0;
-
-protected:
- virtual ~IPipelinePackage() = default;
-};
-
-/**
- * @brief This base class provides a simple implementation of the asynchronous inference
- * pipeline which could be plug-and-play.
- *
- * `BaseAsyncPipeline` takes function instance as a basic unit `Block` of the pipeline. User should
- * call the static method `BuildPipelineBlock` to construct a `Block`. Multiple `Block`s make up
- * a `Context`, which pipeline deploys the whole process on.
- *
- * @tparam ResultType
- * @tparam GenResult
- */
-template
-class BaseAsyncPipeline {
- using ParsingType = std::shared_ptr;
- using Block_t = AsyncPipelineBlock;
- using Context_t = AsyncPipelineContext;
-
-protected:
- BaseAsyncPipeline() = default;
-
- ~BaseAsyncPipeline()
- {
- ClosePipeline();
- }
-
- /**
- * @brief The `Block` in pipeline is constructed with a function and its name. Call this method
- * in the derived class to get `Block_t` instance which is used to configure the whole pipeline.
- *
- * @param func
- * @param block_name
- * @return Block_t
- */
- static Block_t BuildPipelineBlock(const std::function &func,
- const std::string &block_name)
- {
- return Block_t(func, block_name);
- }
-
- /**
- * @brief Configure the pipelien with a `pipeline_name` and multiple `Context_t` instances. One
- * derived class intance could have sereral pipelines by calling `ConfigPipeline`.
- *
- * @param pipeline_name
- * @param block_list
- */
- void ConfigPipeline(const std::string &pipeline_name, const std::vector &block_list)
- {
- map_name2instance_.emplace(pipeline_name, block_list);
- }
-
-public:
- /**
- * @brief Get the default pipeline context. Multiple instances derived from `BaseAsyncPipeline`
- * could use this method to get the context from the other to generate a more complex pipeline.
- * For example, in detection_2d_yolov8, we combine the algorithm process and inference_core
- * process to make a integral processing pipeline.
- *
- * @return const Context_t&
- */
- const Context_t &GetPipelineContext() const
- {
- if (map_name2instance_.size() != 1)
- {
- throw std::runtime_error("[BaseAsyncPipeline] expect one pipeline, got " +
- std::to_string(map_name2instance_.size()));
- }
- return map_name2instance_.begin()->second.GetContext();
- }
-
- /**
- * @brief `PushPipeline` allow user to asynchronously push the package into pipeline and wait on
- * the `future` in another thread. The instance of template type `Result` is generated by functor
- * `GenResult`.
- *
- * @param pipeline_name
- * @param package
- * @return std::future
- */
- [[nodiscard]] std::future PushPipeline(const std::string &pipeline_name,
- const ParsingType &package) noexcept
- {
- if (map_name2instance_.find(pipeline_name) == map_name2instance_.end())
- {
- LOG(ERROR) << "[BaseAsyncPipeline] `PushPipeline` pipeline {" << pipeline_name
- << "} is not valid !!!";
- return std::future();
- }
-
- map_index2result_[package_index_] = std::promise();
- auto ret = map_index2result_[package_index_].get_future();
-
- auto callback = [this, package_index = package_index_](const ParsingType &package) -> bool {
- ResultType result = gen_result_from_package_(package);
- map_index2result_[package_index].set_value(std::move(result));
- map_index2result_.erase(package_index);
- return true;
- };
- map_name2instance_[pipeline_name].PushPipeline(package, callback);
-
- package_index_++;
-
- return std::move(ret);
- }
-
- /**
- * @brief Return if the pipeline is initialized.
- *
- * @param pipeline_name
- * @return true
- * @return false
- */
- bool IsPipelineInitialized(const std::string &pipeline_name) noexcept
- {
- if (map_name2instance_.find(pipeline_name) == map_name2instance_.end())
- {
- return false;
- }
- return map_name2instance_[pipeline_name].IsInitialized();
- }
-
- /**
- * @brief Close all pipeline. The un-finished packages will be dropped.
- *
- */
- void ClosePipeline()
- {
- for (auto &p_name_ins : map_name2instance_)
- {
- p_name_ins.second.ClosePipeline();
- }
- }
-
- /**
- * @brief Stop all pipeline. The un-finished packages will not be dropped.
- *
- */
- void StopPipeline()
- {
- for (auto &p_name_ins : map_name2instance_)
- {
- p_name_ins.second.StopPipeline();
- }
- }
-
- /**
- * @brief Initialize all configured pipeline. Call this function before push packages into
- * pipeline.
- *
- */
- void InitPipeline()
- {
- for (auto &p_name_ins : map_name2instance_)
- {
- p_name_ins.second.Init();
- }
- }
-
-private:
- std::unordered_map> map_name2instance_;
-
- size_t package_index_ = 0;
- std::unordered_map> map_index2result_;
- GenResult gen_result_from_package_;
-};
-
-} // namespace async_pipeline
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/async_pipeline_impl.h b/deploy_core/include/deploy_core/async_pipeline_impl.h
deleted file mode 100644
index a7692c1..0000000
--- a/deploy_core/include/deploy_core/async_pipeline_impl.h
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:00:38
- * @LastEditTime: 2024-11-26 21:50:48
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/async_pipeline_impl.h
- */
-#ifndef __EASY_DEPLOY_ASYNC_PIPELINE_IMPL_H
-#define __EASY_DEPLOY_ASYNC_PIPELINE_IMPL_H
-
-#include
-#include
-#include
-
-#include
-#include
-
-#include "deploy_core/block_queue.h"
-
-namespace async_pipeline {
-
-/**
- * @brief Async Pipeline Block
- *
- * @tparam ParsingType
- */
-template
-class AsyncPipelineBlock {
-public:
- AsyncPipelineBlock() = default;
- AsyncPipelineBlock(const AsyncPipelineBlock &block)
- : func_(block.func_), block_name_(block.block_name_)
- {}
-
- AsyncPipelineBlock &operator=(const AsyncPipelineBlock &block)
- {
- func_ = block.func_;
- block_name_ = block.block_name_;
- return *this;
- }
-
- AsyncPipelineBlock(const std::function &func) : func_(func)
- {}
-
- AsyncPipelineBlock(const std::function &func, const std::string &block_name)
- : func_(func), block_name_(block_name)
- {}
-
- const std::string &GetName() const
- {
- return block_name_;
- }
-
- bool operator()(const ParsingType &pipeline_unit) const
- {
- return func_(pipeline_unit);
- }
-
-private:
- std::function func_;
- std::string block_name_;
-};
-
-/**
- * @brief Async Pipeline Context
- *
- * @tparam ParsingType
- */
-template
-class AsyncPipelineContext {
- using Block_t = AsyncPipelineBlock;
- using Context_t = AsyncPipelineContext;
-
-public:
- AsyncPipelineContext() = default;
-
- AsyncPipelineContext(const Block_t &block) : blocks_({block})
- {}
-
- AsyncPipelineContext(const std::vector &block_vec)
- {
- for (const auto &block : block_vec)
- {
- blocks_.push_back(block);
- }
- }
-
- AsyncPipelineContext &operator=(const std::vector &block_vec)
- {
- for (const auto &block : block_vec)
- {
- blocks_.push_back(block);
- }
- return *this;
- }
-
- AsyncPipelineContext(const Context_t &context) : blocks_(context.blocks_)
- {}
-
- AsyncPipelineContext(const std::vector &context_vec)
- {
- for (const auto &context : context_vec)
- {
- for (const auto &block : context.blocks_)
- {
- blocks_.push_back(block);
- }
- }
- }
-
- AsyncPipelineContext &operator=(const std::vector &context_vec)
- {
- for (const auto &context : context_vec)
- {
- for (const auto &block : context.blocks_)
- {
- blocks_.push_back(block);
- }
- }
- return *this;
- }
-
- AsyncPipelineContext &operator=(const Context_t &context)
- {
- for (const auto &block : context.blocks_)
- {
- blocks_.push_back(block);
- }
- return *this;
- }
-
-public:
- std::vector blocks_;
-};
-
-/**
- * @brief Async Pipeline Processing Instance
- *
- * @tparam ParsingType
- */
-template
-class PipelineInstance {
- using Block_t = AsyncPipelineBlock;
- using Context_t = AsyncPipelineContext;
- using Callback_t = std::function;
-
- // for inner processing
- struct _InnerPackage {
- ParsingType package;
- Callback_t callback;
- };
- using InnerParsingType = std::shared_ptr<_InnerPackage>;
- using InnerBlock_t = AsyncPipelineBlock;
- using InnerContext_t = AsyncPipelineContext;
-
-public:
- PipelineInstance() = default;
-
- PipelineInstance(const std::vector &block_list) : context_(block_list)
- {
- // initialize inner context
- std::vector inner_block_list;
- for (const auto &block : context_.blocks_)
- {
- auto func = [&](InnerParsingType p) -> bool { return block(p->package); };
- InnerBlock_t inner_block(func, block.GetName());
- inner_block_list.push_back(inner_block);
- }
- inner_context_ = InnerContext_t(inner_block_list);
- }
-
- ~PipelineInstance()
- {
- ClosePipeline();
- }
-
- void Init(int bq_max_size = 100)
- {
- // 1. for `n` blocks, construct `n+1` block queues
- const auto blocks = inner_context_.blocks_;
- const int n = blocks.size();
- LOG(INFO) << "[AsyncPipelineInstance] Total {" << n << "} Pipeline Blocks";
- for (int i = 0; i < n + 1; ++i)
- {
- block_queue_.emplace_back(std::make_shared>(bq_max_size));
- }
- pipeline_close_flag_.store(false);
-
- async_futures_.resize(n + 1);
- // 2. open `n` async threads to execute blocks
- for (int i = 0; i < n; ++i)
- {
- async_futures_[i] = std::async(&PipelineInstance::ThreadExcuteEntry, this, block_queue_[i],
- block_queue_[i + 1], blocks[i]);
- }
- // 3. open output threads to execute callback
- async_futures_[n] = std::async(&PipelineInstance::ThreadOutputEntry, this, block_queue_[n]);
-
- pipeline_initialized_.store(true);
- }
-
- void ClosePipeline()
- {
- if (pipeline_initialized_)
- {
- LOG(INFO) << "[AsyncPipelineInstance] Closing pipeline ...";
- for (const auto &bq : block_queue_)
- {
- bq->DisableAndClear();
- }
- LOG(INFO) << "[AsyncPipelineInstance] Disabled all block queue ...";
- pipeline_close_flag_.store(true);
-
- for (auto &future : async_futures_)
- {
- auto res = future.get();
- }
- LOG(INFO) << "[AsyncPipelineInstance] Join all block queue ...";
- block_queue_.clear();
- LOG(INFO) << "[AsyncPipelineInstance] Async pipeline is released successfully!!";
- pipeline_initialized_ = false;
- pipeline_close_flag_.store(true);
- pipeline_no_more_input_.store(true);
- }
- }
-
- void StopPipeline()
- {
- if (pipeline_initialized_)
- {
- pipeline_no_more_input_.store(true);
- block_queue_[0]->SetNoMoreInput();
- }
- }
-
- bool IsInitialized() const
- {
- return pipeline_initialized_;
- }
-
- const Context_t &GetContext() const
- {
- return context_;
- }
-
- void PushPipeline(const ParsingType &obj, const Callback_t &callback)
- {
- auto inner_pack = std::make_shared<_InnerPackage>();
- inner_pack->package = obj;
- inner_pack->callback = callback;
-
- block_queue_[0]->BlockPush(inner_pack);
- }
-
-private:
- bool ThreadExcuteEntry(std::shared_ptr> bq_input,
- std::shared_ptr> bq_output,
- const InnerBlock_t &pipeline_block)
- {
- LOG(INFO) << "[AsyncPipelineInstance] {" << pipeline_block.GetName() << "} thread start!";
- while (!pipeline_close_flag_)
- {
- auto data = bq_input->Take();
- if (!data.has_value())
- {
- if (pipeline_no_more_input_)
- {
- LOG(INFO) << "[AsyncPipelineInstance] {" << pipeline_block.GetName()
- << "} set no more output ...";
- bq_output->SetNoMoreInput();
- break;
- } else
- {
- continue;
- }
- }
- auto start = std::chrono::high_resolution_clock::now();
- bool status = pipeline_block(data.value());
- auto end = std::chrono::high_resolution_clock::now();
- LOG(INFO) << "[AsyncPipelineInstance] {" << pipeline_block.GetName() << "} cost (us) : "
- << std::chrono::duration_cast(end - start).count();
-
- if (!status)
- {
- LOG(WARNING) << "[AsyncPipelineInstance] {" << pipeline_block.GetName()
- << "}, excute block function failed! Drop package.";
- continue;
- }
-
- bq_output->BlockPush(data.value());
- }
- LOG(INFO) << "[AsyncPipelineInstance] {" << pipeline_block.GetName() << "} thread quit!";
- return true;
- }
-
- bool ThreadOutputEntry(std::shared_ptr> bq_input)
- {
- LOG(INFO) << "[AsyncPipelineInstance] {Output} thread start!";
- while (!pipeline_close_flag_)
- {
- auto data = bq_input->Take();
- if (!data.has_value())
- {
- if (pipeline_no_more_input_)
- {
- LOG(INFO) << "[AsyncPipelineInstance] {Output} set no more output ...";
- break;
- } else
- {
- continue;
- }
- }
- const auto &inner_pack = data.value();
- if (inner_pack != nullptr && inner_pack->callback != nullptr)
- {
- inner_pack->callback(inner_pack->package);
- } else
- {
- LOG(WARNING)
- << "[AsyncPipelineInstance] {Output} package without valid callback will be dropped!!!";
- }
- }
- LOG(INFO) << "[AsyncPipelineInstance] {Output} thread quit!";
-
- return true;
- }
-
-private:
- Context_t context_;
-
- InnerContext_t inner_context_;
-
- std::vector>> block_queue_;
- std::vector> async_futures_;
-
- std::atomic pipeline_close_flag_{true};
- std::atomic pipeline_no_more_input_{true};
- std::atomic pipeline_initialized_{false};
-};
-
-} // namespace async_pipeline
-
-#endif
diff --git a/deploy_core/include/deploy_core/base_detection.h b/deploy_core/include/deploy_core/base_detection.h
deleted file mode 100644
index fa999ff..0000000
--- a/deploy_core/include/deploy_core/base_detection.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:24:19
- * @LastEditTime: 2024-12-02 20:03:34
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/base_detection.h
- */
-#ifndef __DEPLOY_CORE_BASE_DETECTION_H
-#define __DEPLOY_CORE_BASE_DETECTION_H
-
-#include
-#include
-#include
-#include
-
-#include
-
-#include "deploy_core/async_pipeline.h"
-#include "deploy_core/base_infer_core.h"
-
-namespace detection_2d {
-
-/**
- * @brief A abstract interface class which defines the preprocess interface of detection_2d
- * algorithms.
- *
- */
-class IDetectionPreProcess {
-public:
- virtual float Preprocess(std::shared_ptr input_image_data,
- std::shared_ptr blob_buffer,
- const std::string &blob_name,
- int dst_height,
- int dst_width) = 0;
-};
-
-/**
- * @brief A abstract interface class which defines the postprocess interface of detection_2d
- * algorithms.
- *
- */
-class IDetectionPostProcess {
-public:
- virtual void Postprocess(const std::vector &output_blobs_ptr,
- std::vector &results,
- float conf_threshold,
- float transform_scale) = 0;
-};
-
-/**
- * @brief The common detection_2d pipeline package wrapper.
- *
- */
-struct DetectionPipelinePackage : public async_pipeline::IPipelinePackage {
- // the wrapped pipeline image data
- std::shared_ptr input_image_data;
- // confidence used in postprocess
- float conf_thresh;
- // record the transform factor during image preprocess
- float transform_scale;
- // the detection result
- std::vector results;
-
- // maintain the blobs buffer instance
- std::shared_ptr infer_buffer;
-
- // override from `IPipelinePakcage`, to provide the blobs buffer to inference_core
- std::shared_ptr GetInferBuffer() override
- {
- if (infer_buffer == nullptr)
- {
- LOG(ERROR) << "[DetectionPipelinePackage] returned nullptr of infer_buffer!!!";
- }
- return infer_buffer;
- }
-};
-
-/**
- * @brief A abstract class defines two pure virtual methods -- `PreProcess` and `PostProcess`.
- * The derived class could only override these methods to make it work.
- *
- */
-class IDetectionModel {
-public:
- IDetectionModel() = default;
-
-protected:
- virtual ~IDetectionModel() = default;
-
- /**
- * @brief PreProcess-Stage. Inside the method, you should cast the `pipeline_unit` pointer to
- * `DetectionPipelinePackage` type pointer, and check if the convertion works. If the package
- * pointer is not valid or anything goes wrong, it should return `false` to mention the inference
- * processing to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool PreProcess(std::shared_ptr pipeline_unit) = 0;
-
- /**
- * @brief PostProcess-Stage. Inside the method, you should cast the `pipeline_unit` pointer to
- * `DetectionPipelinePackage` type pointer, and check if the convertion works. If the package
- * pointer is not valid or anything goes wrong, it should return `false` to mention the inference
- * processing to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool PostProcess(std::shared_ptr pipeline_unit) = 0;
-};
-
-/**
- * @brief A functor to generate detection results from `DetectionPipelinePackage`. Used in async
- * pipeline.
- *
- */
-class DetectionGenResultType {
-public:
- std::vector operator()(const std::shared_ptr &package)
- {
- auto detection_package = std::dynamic_pointer_cast(package);
- if (detection_package == nullptr)
- {
- LOG(ERROR) << "[DetectionGenResult] Got INVALID package ptr!!!";
- return {};
- }
- return std::move(detection_package->results);
- }
-};
-
-/**
- * @brief The base class of detection_2d algorithms. It implements `Detect` which is the synchronous
- * version of detection and `DetectAsync` which is the asynchronous version of detection.
- *
- * @note Call `InitPipeline()` before you intend to use `DetectAsync`. And Check if `DetectAsync`
- * returns a valid `std::future<>` instance before involke `get()` method.
- *
- */
-class BaseDetectionModel
- : public IDetectionModel,
- public async_pipeline::BaseAsyncPipeline, DetectionGenResultType> {
- typedef std::shared_ptr ParsingType;
-
-public:
- BaseDetectionModel(std::shared_ptr infer_core);
-
- /**
- * @brief Run the detection processing in synchronous mode.
- *
- * @param input_image input image in cv::Mat format.
- * @param det_results the output results
- * @param conf_thresh confidence threshold
- * @param isRGB if the input is rgb format. Will flip channels if `isRGB` == false.
- * @return true
- * @return false
- */
- bool Detect(const cv::Mat &input_image,
- std::vector &det_results,
- float conf_thresh,
- bool isRGB = false) noexcept;
-
- /**
- * @brief Run the detection processing in asynchronous mode.
- *
- * @param input_image input image in cv::Mat format.
- * @param conf_thresh confidence threshold
- * @param isRGB if the input is rgb format. Will flip channels if `isRGB` == false. default=false.
- * @param cover_oldest whether cover the oldest package if the pipeline queue is full.
- * default=false.
- * @return std::future>
- */
- [[nodiscard]] std::future> DetectAsync(const cv::Mat &input_image,
- float conf_thresh,
- bool isRGB = false,
- bool cover_oldest = false) noexcept;
-
-protected:
- // forbidden the access from outside to `BaseAsyncPipeline::PushPipeline`
- using BaseAsyncPipeline::PushPipeline;
-
- virtual ~BaseDetectionModel();
-
- std::shared_ptr infer_core_{nullptr};
-
- static std::string detection_pipeline_name_;
-};
-
-/**
- * @brief Abstract factory class of detection_2d model.
- *
- */
-class BaseDetection2DFactory {
-public:
- virtual std::shared_ptr Create() = 0;
-};
-
-class BaseDetectionPreprocessFactory {
-public:
- virtual std::shared_ptr Create() = 0;
-};
-
-class BaseDetectionPostprocessFactory {
-public:
- virtual std::shared_ptr Create() = 0;
-};
-
-} // namespace detection_2d
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/base_infer_core.h b/deploy_core/include/deploy_core/base_infer_core.h
deleted file mode 100644
index 37e9124..0000000
--- a/deploy_core/include/deploy_core/base_infer_core.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-26 08:42:05
- * @LastEditTime: 2024-12-02 19:03:37
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/base_infer_core.h
- */
-#ifndef __EASY_DEPLOY_BASE_INFER_CORE_H
-#define __EASY_DEPLOY_BASE_INFER_CORE_H
-
-#include
-#include
-#include
-#include
-
-#include "deploy_core/block_queue.h"
-#include "deploy_core/async_pipeline.h"
-
-namespace inference_core {
-
-enum InferCoreType { ONNXRUNTIME, TENSORRT, RKNN, NOT_PROVIDED };
-
-/**
- * @brief `IRotInferCore` is abstract interface class which defines all pure virtual functions
- * that the derived class should implement, e.g., `PreProcess`, `Inference` and `PostProcess`.
- *
- */
-class IRotInferCore {
-public:
- /**
- * @brief `AllocBlobsBuffer` is a common interface that user could get a brand new buffer
- * instance by. This pure virtual function is implemented by actual inference core, which
- * may take a while to process. Use pre-allocated buffer instance in mem buffer pool could
- * get better performance. See `BaseInferCore`.
- *
- * @return std::shared_ptr A brand new buffer instance allocated by inference
- * core.
- */
- virtual std::shared_ptr AllocBlobsBuffer() = 0;
-
- /**
- * @brief Get the core type.
- *
- * @return InferCoreType
- */
- virtual InferCoreType GetType()
- {
- return InferCoreType::NOT_PROVIDED;
- }
-
- /**
- * @brief Return the name of inference core.
- *
- * @return std::string
- */
- virtual std::string GetName()
- {
- return "";
- }
-
-protected:
- virtual ~IRotInferCore() = default;
-
- /**
- * @brief `PreProcess` stage of the inference process. Return true if this is stage is not
- * needed in the actual inference core implementation. Return false if something went wrong
- * while doing processing. The pipeline will drop the package if `PreProcess` returns false.
- *
- * @param buffer a common "pipeline" package ptr.
- * @return true
- * @return false
- */
- virtual bool PreProcess(std::shared_ptr buffer) = 0;
-
- /**
- * @brief `Inference` stage of the inference process. Return false if something went wrong
- * while doing processing. The pipeline will drop the package if `Inference` returns false.
- *
- * @param buffer a common "pipeline" package ptr.
- * @return true
- * @return false
- */
- virtual bool Inference(std::shared_ptr buffer) = 0;
-
- /**
- * @brief `PostProcess` stage of the inference process. Return false if something went wrong
- * while doing processing. The pipeline will drop the package if `PostProcess` returns false.
- *
- * @param buffer a common "pipeline" package ptr.
- * @return true
- * @return false
- */
- virtual bool PostProcess(std::shared_ptr buffer) = 0;
-};
-
-/**
- * @brief A simple implementation of mem buffer pool. Using `BlockQueue` to deploy a producer-
- * consumer model. It will allocate buffer using `AllocBlobsBuffer` method of `IRotInferCore`
- * and provides `IBlobsBuffer` ptr when `Alloc` method is called. The "Alloced" buffer will
- * return back to mem buffer pool while the customed deconstruction method of shared_ptr ptr
- * is called.
- *
- */
-class MemBufferPool {
-public:
- MemBufferPool(IRotInferCore *infer_core, const int pool_size)
- : pool_size_(pool_size), dynamic_pool_(pool_size)
- {
- for (int i = 0; i < pool_size; ++i)
- {
- auto blob_buffer = infer_core->AllocBlobsBuffer();
- dynamic_pool_.BlockPush(blob_buffer.get());
- static_pool_.insert({blob_buffer.get(), blob_buffer});
- }
- }
-
- std::shared_ptr Alloc(bool block)
- {
- // customed deconstruction method
- auto func_dealloc = [&](IBlobsBuffer *buf) {
- buf->Reset();
- this->dynamic_pool_.BlockPush(buf);
- };
-
- auto buf = block ? dynamic_pool_.Take() : dynamic_pool_.TryTake();
- return buf.has_value() ? std::shared_ptr(buf.value(), func_dealloc) : nullptr;
- }
-
- void Release()
- {
- if (dynamic_pool_.Size() != pool_size_)
- {
- LOG(WARNING) << "[MemBufPool] does not maintain all bufs when release func called!";
- }
- static_pool_.clear();
- }
-
- int RemainSize()
- {
- return dynamic_pool_.Size();
- }
-
- ~MemBufferPool()
- {
- Release();
- }
-
-private:
- const int pool_size_;
- BlockQueue dynamic_pool_;
- std::unordered_map> static_pool_;
-};
-
-/**
- * @brief A dummy class to help `BaseInferCore` inherit from `BaseAsyncPipeline` to generate
- * async pipeline framework.
- *
- */
-class _DummyInferCoreGenReulstType {
-public:
- bool operator()(const std::shared_ptr & /*package*/)
- {
- return true;
- }
-};
-
-/**
- * @brief `BaseInferCore` inherits `IRotInferCore` and `BaseAsyncPipeline`. `IRotInferCore`
- * defines all pure virtual methods of the abstract function of the inference core.
- * `BaseAsyncPipeline` provides a set of methods to help user build and utilize a async
- * inference pipeline. See `BaseAsyncPipeline` defination.
- *
- * @note The inheritance relationship between class A and class B is modified by protected.
- * And `BaseInferCore` only makes the `GetPipelineContext` method public, which means the
- * derived class of `BaseInferCore` is not supported to deploy async pipeline inference
- * process. It should be used by specific algorithms in its entirety.
- *
- */
-class BaseInferCore : public IRotInferCore,
- protected async_pipeline::BaseAsyncPipeline {
-protected:
- BaseInferCore();
- typedef std::shared_ptr ParsingType;
-
-public:
- using BaseAsyncPipeline::GetPipelineContext;
-
- /**
- * @brief This function provides a sync inference process which is completely independent
- * of the async inference pipeline. Through, it depends on the three stage virtual methods
- * defined in `IRotInferCore`. Return false if something went wrong while inference.
- *
- * @param buffer
- * @param batch_size default=1, multi-batch inference may not be supported.
- * @return true
- * @return false
- */
- bool SyncInfer(std::shared_ptr buffer, const int batch_size = 1);
-
- /**
- * @brief Get the pre-allocated blobs buffer shared pointer. The returned pointer is a
- * smart pointer which will automatically return to the pool when it is released.
- *
- * @param block whether to block the thread if the pool is empty.
- * @return std::shared_ptr
- */
- std::shared_ptr GetBuffer(bool block);
-
- /**
- * @brief Release the sources in base class.
- *
- * @warning The derived class should call `BaseInferCore::Release()` in its deconstruct
- * function in order to release the blobs buffer before the enviroment is destroyed.
- * Things go wrong if allocated memory released after their enviroment released on some
- * hardware.
- *
- */
- virtual void Release();
-
-protected:
- virtual ~BaseInferCore();
-
- /**
- * @brief Init the base class memory pool.
- *
- * @warning Please call `Init()` at the derived class construct function`s end when the
- * runtime enviroment is setup successfully. This method will call `AllocBlobsBuffer`
- * to create a memory pool. Temporary we manually call this method to init the memory pool.
- *
- * @param mem_buf_size number of blobs buffers pre-allocated.
- */
- void Init(int mem_buf_size = 5);
-
-private:
- std::unique_ptr mem_buf_pool_{nullptr};
-};
-
-/**
- * @brief Abstract factory class of infer_core.
- *
- */
-class BaseInferCoreFactory {
-public:
- virtual std::shared_ptr Create() = 0;
-};
-
-} // namespace inference_core
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/base_sam.h b/deploy_core/include/deploy_core/base_sam.h
deleted file mode 100644
index e309f65..0000000
--- a/deploy_core/include/deploy_core/base_sam.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 18:38:34
- * @LastEditTime: 2024-12-02 19:03:30
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/base_sam.h
- */
-#ifndef __EASY_DEPLOY_BASE_SAM_H
-#define __EASY_DEPLOY_BASE_SAM_H
-
-#include "deploy_core/base_infer_core.h"
-#include "deploy_core/common_defination.h"
-
-#include
-
-namespace sam {
-
-/**
- * @brief The common sam pipeline package wrapper.
- *
- */
-struct SamPipelinePackage : public async_pipeline::IPipelinePackage {
- // maintain image-encoder's blobs buffer
- std::shared_ptr image_encoder_blobs_buffer;
- // maintain mask-decoder's blobs buffer
- std::shared_ptr mask_decoder_blobs_buffer;
-
- // the wrapped pipeline image data
- std::shared_ptr input_image_data;
- // input boxes prompt
- std::vector boxes;
- // input points prompt
- std::vector> points;
- // input points labels
- std::vector labels;
- // record the transform factor in image preprocessing
- float transform_scale;
- // mask results
- cv::Mat mask;
-
- // the blobs buffer used in inference core processing
- std::shared_ptr infer_buffer;
- std::shared_ptr GetInferBuffer() override
- {
- return infer_buffer;
- }
-};
-
-/**
- * @brief The abstract interface class of `Segment Anything Model`(SAM) which defines
- * image-preprocess、prompt-preprocess、mask-postprocess interfaces. Any SAM algorithms
- * implementation could override these pure virtual methods to make up a sync/async
- * inference supported pipeline.
- *
- * workflow:
- *
- * `ImagePreProcess` --> `ImageEncoderInfer` --> `PromptBoxPreProcess`/`PromptPointPreProcess`
- * --> `MaskDecoderInfer` --> `MaskPostProcess`
- *
- */
-class ISamModel {
-protected:
- typedef std::shared_ptr ParsingType;
- virtual ~ISamModel() = default;
- /**
- * @brief The `ImagePreProcess` stage. Inside the method, you should cast the `pipeline_unit`
- * pointer to `SamPipelinePackage` type pointer, and check if the convertion works. If the
- * package pointer is not valid or anything goes wrong, it should return `false` to mention
- * the inference pipelinee to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool ImagePreProcess(ParsingType pipeline_unit) = 0;
-
- /**
- * @brief The `PromptBoxPreProcess` stage. Inside the method, you should cast the `pipeline_unit`
- * pointer to `SamPipelinePackage` type pointer, and check if the convertion works. If the
- * package pointer is not valid or anything goes wrong, it should return `false` to mention
- * the inference pipelinee to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool PromptBoxPreProcess(ParsingType pipeline_unit) = 0;
-
- /**
- * @brief The `PromptPointPreProcess` stage. Inside the method, you should cast the
- * `pipeline_unit` pointer to `SamPipelinePackage` type pointer, and check if the convertion
- * works. If the package pointer is not valid or anything goes wrong, it should return `false` to
- * mention the inference pipelinee to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool PromptPointPreProcess(ParsingType pipeline_unit) = 0;
-
- /**
- * @brief The `MaskPostProcess` stage. Inside the method, you should cast the `pipeline_unit`
- * pointer to `SamPipelinePackage` type pointer, and check if the convertion works. If the
- * package pointer is not valid or anything goes wrong, it should return `false` to mention
- * the inference pipelinee to drop the package.
- *
- * @param pipeline_unit
- * @return true
- * @return false
- */
- virtual bool MaskPostProcess(ParsingType pipeline_unit) = 0;
-};
-
-/**
- * @brief A functor to generate sam results from `SamPipelinePackage`. Used in async pipeline.
- *
- */
-class SamGenResultType {
-public:
- cv::Mat operator()(const std::shared_ptr &package)
- {
- auto sam_package = std::dynamic_pointer_cast(package);
- if (sam_package == nullptr)
- {
- LOG(ERROR) << "[SamGenResultType] Got INVALID package ptr!!!";
- return {};
- }
- return std::move(sam_package->mask);
- }
-};
-
-/**
- * @brief The base class of SAM model. It implements `GenerateMask` and `GenerateMaskAsync`
- * both with `box` prompts or `points` prompts. In the asynchronous pipeline inference mode,
- * the `box` pipeline and `point` pipeline could been used in the same time, cause they are
- * independent.
- *
- */
-class BaseSamModel : public ISamModel,
- public async_pipeline::BaseAsyncPipeline {
-protected:
- using ParsingType = std::shared_ptr;
- /**
- * @brief Construct `BaseSamModel` with `image_encoder_core` and at least one of `mask_points_
- * decoder_core` or `mask_boxes_decoder_core`. Will throw exception if both decoders with points
- * and boxes are nullptr.
- *
- * @param model_name
- * @param image_encoder_core
- * @param mask_points_decoder_core
- * @param mask_boxes_decoder_core
- */
- BaseSamModel(const std::string &model_name,
- std::shared_ptr image_encoder_core,
- std::shared_ptr mask_points_decoder_core,
- std::shared_ptr mask_boxes_decoder_core);
-
- virtual ~BaseSamModel();
-
-public:
- /**
- * @brief Generate the mask with points as prompts in sync mode.
- *
- * @param image input image
- * @param points points coords
- * @param labels points labels, 0 - background; 1 - foreground
- * @param cv::Mat reference to the result. 0 - background; 255 - foreground
- * @param isRGB if the input image is RGB format. default=false
- * @return true
- * @return false
- */
- bool GenerateMask(const cv::Mat &image,
- const std::vector> &points,
- const std::vector &labels,
- cv::Mat &result,
- bool isRGB = false);
- /**
- * @brief Generate the mask with boxes as prompts in sync mode.
- *
- * @note SAM model with boxes only support one box as its prompts. More boxes wont make any
- * exception, but also will not take effect.
- *
- * @param image input image
- * @param boxes boxes coords
- * @param cv::Mat reference to the result. 0 - background; 255 - foreground
- * @param isRGB if the input image is RGB format. default=false
- * @return true
- * @return false
- */
- bool GenerateMask(const cv::Mat &image,
- const std::vector &boxes,
- cv::Mat &result,
- bool isRGB = false);
-
- /**
- * @brief Generate the mask with points as prompts in async mode.
- *
- * @warning The returned `std::future<>` instance could be invalid. Please make sure it is
- * valid before you call `get()`.
- *
- * @param image input image
- * @param points points coords
- * @param labels points labels, 0 - background; 1 - foreground
- * @param isRGB if the input image is RGB format. default=false
- * @param cover_oldest whether cover the oldest package if the pipeline queue is full.
- * default=false.
- * @return std::future A std::future instance of the result.
- */
- [[nodiscard]] std::future GenerateMaskAsync(
- const cv::Mat &image,
- const std::vector> &points,
- const std::vector &labels,
- bool isRGB = false,
- bool cover_oldest = false);
-
- /**
- * @brief Generate the mask with boxes as prompts in async mode.
- *
- * @note SAM model with boxes only support one box as its prompts. More boxes wont make any
- * exception, but also will not take effect.
- *
- * @warning The returned `std::future<>` instance could be invalid. Please make sure it is
- * valid before you call `get()`.
- *
- * @param image input image
- * @param boxes boxes coords
- * @param callback callback function if needed. default=nullptr.
- * @param isRGB if the input image is RGB format. default=false
- * @param cover_oldest whether cover the oldest package if the pipeline queue is full.
- * default=false.
- * @return std::future A std::future instance of the result.
- */
- [[nodiscard]] std::future GenerateMaskAsync(const cv::Mat &image,
- const std::vector &boxes,
- bool isRGB = false,
- bool cover_oldest = false);
-
-private:
- // forbidden the access from outside to `BaseAsyncPipeline::PushPipeline`
- using BaseAsyncPipeline::PushPipeline;
-
- void ConfigureBoxPipeline();
-
- void ConfigurePointPipeline();
-
-protected:
- std::shared_ptr image_encoder_core_;
- std::shared_ptr mask_points_decoder_core_;
- std::shared_ptr mask_boxes_decoder_core_;
-
- const std::string box_pipeline_name_;
- const std::string point_pipeline_name_;
- const std::string model_name_;
-};
-
-
-/**
- * @brief Abstract factory base class of Sam model.
- *
- */
-class BaseSamFactory {
-public:
- virtual std::shared_ptr Create() = 0;
-};
-
-} // namespace sam
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/blob_buffer.h b/deploy_core/include/deploy_core/blob_buffer.h
deleted file mode 100644
index c503bbf..0000000
--- a/deploy_core/include/deploy_core/blob_buffer.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 15:27:59
- * @LastEditTime: 2024-11-26 21:57:59
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/blob_buffer.h
- */
-#ifndef __EASY_DEPLOY_BLOB_BUFFER_H
-#define __EASY_DEPLOY_BLOB_BUFFER_H
-
-#include
-#include
-
-#include "deploy_core/common_defination.h"
-
-namespace inference_core {
-
-/**
- * @brief The key to abstracting and shielding the inference framework and hardware characteristics
- * lies in how the management of inference buffer is abstracted. Considering the requirements
- * of asynchronous inference framework, we encapsulated the buffer used during inference into a
- * dedicated class and abstracted its functionality by developing the `IBlobsBuffer` interface
- * class. The implementation of this interface must provide the following functionalities:
- *
- * 1. Set the buffer address to be used for inference.
- *
- * 2. Get the default buffer address.
- *
- * 3. Set the shape of the model blob.
- *
- * 4. Get the default blob shape.
- *
- * And Other base functionalities as declared below.
- *
- */
-class IBlobsBuffer {
-public:
- /**
- * @brief The `BlobsBuffer` instance should provide the buffer ptr which will be used in the
- * inference process. This buffer is allocated by certain inference_core by default. User could
- * customize the buffer ptr by calling `SetBlobBuffer`.
- *
- * @param blob_name The name of the blob.
- * @return std::pair Will return {nullptr, UNKOWN} if `blob_name` is
- * invalid.
- */
- virtual std::pair GetOuterBlobBuffer(
- const std::string &blob_name) noexcept = 0;
-
- /**
- * @brief The `BlobsBuffer` instance should provide the functionality to accept a customized
- * data buffer ptr which could be on host or device. Some inference frameworks based on
- * heterogeneous architecture hardware (e.g. CUDA) use buffer on device to deploy inference. There
- * is no need to copy data from host to device if the device buffer ptr is provided to
- * `BlobsBuffer`.
- *
- * @param blob_name The name of the blob.
- * @param data_ptr The ptr of the customized data buffer.
- * @param location Location of the customized data buffer.
- * @return true
- * @return false Will return false if `blob_name` is invalid.
- */
- virtual bool SetBlobBuffer(const std::string &blob_name,
- void *data_ptr,
- DataLocation location) noexcept = 0;
-
- /**
- * @brief `SetBlobBuffer` provides the functionality to change the default using data buffer
- * on host size or device side. After calling this method, `GetOuterBlobBuffer` will return
- * the buffer ptr on the certain side.
- *
- * @note Some inference frameworks (e.g. onnxruntime, rknn) do not distinguish buffer between
- * the host side and the device side. So this method will not change their default buffer ptr.
- *
- * @param blob_name The name of the blob.
- * @param location Location of the customized data buffer.
- * @return true
- * @return false Will return false if `blob_name` is invalid.
- */
- virtual bool SetBlobBuffer(const std::string &blob_name, DataLocation location) noexcept = 0;
-
- /**
- * @brief `SetBlobShape` provides the functionality to change the dynamic blob shape in the
- * inference processing if the model engine allows.
- *
- * @note Some inference framework (e.g. rknn) do not support dynamic blob shape. And make sure
- * your model supports dynamic blob shape before you call this method.
- *
- * @param blob_name The name of the blob.
- * @param shape The dynamic blob shape.
- * @return true
- * @return false Will return false if `blob_name` is invalid.
- */
- virtual bool SetBlobShape(const std::string &blob_name,
- const std::vector &shape) noexcept = 0;
-
- /**
- * @brief `GetBlobShape` provides the functionality to get the dynamic blob shape in the
- * inference processing. By default, this will return the max blob shape which is parsed
- * in `inference_core` construction.
- *
- * @param blob_name The name of the blob.
- * @return const std::vector& The const reference of blob shape vector maintained.
- */
- virtual const std::vector &GetBlobShape(const std::string &blob_name) const noexcept = 0;
-
- /**
- * @brief Return the total number of blobs.
- *
- * @return size_t
- */
- virtual size_t Size() const noexcept = 0;
-
- /**
- * @brief Reset the `BlobsBuffer` which will not release the buffer memory.
- *
- */
- virtual void Reset() noexcept = 0;
-
-protected:
- virtual ~IBlobsBuffer() noexcept = default;
-
- /**
- * @brief Release the whole `BlobsBuffer` instance.
- *
- */
- virtual void Release() noexcept = 0;
-};
-
-} // namespace inference_core
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/block_queue.h b/deploy_core/include/deploy_core/block_queue.h
deleted file mode 100644
index c73f36f..0000000
--- a/deploy_core/include/deploy_core/block_queue.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:00:38
- * @LastEditTime: 2024-11-26 09:29:20
- * @FilePath: /EasyDeploy/deploy_core/include/deploy_core/block_queue.h
- */
-#ifndef __EASY_DEPLOY_BLOCK_QUEUE_H
-#define __EASY_DEPLOY_BLOCK_QUEUE_H
-
-#include
-#include
-#include
-#include
-
-/**
- * @brief A simple implementation of block queue.
- *
- * @tparam T
- */
-template
-class BlockQueue {
-public:
- BlockQueue(const size_t max_size) : max_size_(max_size)
- {}
-
- /**
- * @brief Push a obj into the queue. Will block the thread if the queue is full.
- *
- * @param obj
- * @return true
- * @return false
- */
- bool BlockPush(const T &obj) noexcept;
-
- /**
- * @brief Push a obj into the queue. Will cover the oldest element if the queue is full.
- *
- * @param obj
- * @return true
- * @return false
- */
- bool CoverPush(const T &obj) noexcept;
-
- /**
- * @brief Get and pop the oldest element in the queue. Will block the thread if the queue is
- * empty.
- *
- * @return std::optional
- */
- std::optional Take() noexcept;
-
- /**
- * @brief Get and pop the oldest element in the queue. Will return `nullopt` if the queue is
- * empty.
- *
- * @return std::optional
- */
- std::optional TryTake() noexcept;
-
- /**
- * @brief Get the size of the queue.
- *
- * @return int
- */
- int Size() noexcept;
-
- /**
- * @brief Return if the queue is empty.
- *
- * @return true
- * @return false
- */
- bool Empty() noexcept;
-
- /**
- * @brief Set the `push` process disabled. After called this method, all `push` calling will
- * return `false`, which means this block queue no longer accept new elements.
- *
- */
- void DisablePush() noexcept;
-
- /**
- * @brief Set the `push` process enabled.
- *
- */
- void EnablePush() noexcept;
-
- /**
- * @brief Set the `take` process disabled. After called this method, all `take` calling will
- * return `false`, which means this block queue no longer provides elements.
- *
- */
- void DisableTake() noexcept;
-
- /**
- * @brief Set the `take` process enabled.
- *
- */
- void EnableTake() noexcept;
-
- /**
- * @brief Set the `push` and `take` process disabled.
- *
- */
- void Disable() noexcept;
-
- /**
- * @brief Get the max size of the block queue.
- *
- * @return int
- */
- int GetMaxSize() const noexcept;
-
- /**
- * @brief Set the `push` and `take` process disabled, and clear all elements in it.
- *
- */
- void DisableAndClear() noexcept;
-
- /**
- * @brief Set the `push` process will no longer be called. The consumer threads which were
- * blocked will be notified and quit blocking, when this method is called.
- *
- */
- void SetNoMoreInput() noexcept;
-
- ~BlockQueue() noexcept;
-
-private:
- const size_t max_size_;
- std::queue q_;
- std::atomic push_enabled_{true};
- std::atomic take_enabled_{true};
- std::condition_variable producer_cv_;
- std::condition_variable consumer_cv_;
- std::mutex lck_;
-
- std::atomic no_more_input_{false};
-};
-
-template
-BlockQueue::~BlockQueue() noexcept
-{
- Disable();
-}
-
-template
-bool BlockQueue::BlockPush(const T &obj) noexcept
-{
- std::unique_lock u_lck(lck_);
- while (q_.size() >= max_size_ && push_enabled_.load())
- {
- producer_cv_.wait(u_lck);
- }
- if (!push_enabled_.load())
- {
- return false;
- }
- q_.push(obj);
- consumer_cv_.notify_one();
- return true;
-}
-
-template
-bool BlockQueue::CoverPush(const T &obj) noexcept
-{
- std::unique_lock u_lck(lck_);
- if (!push_enabled_.load())
- {
- return false;
- }
- if (q_.size() == max_size_)
- {
- q_.pop();
- }
- q_.push(obj);
- consumer_cv_.notify_one();
- return true;
-}
-
-template
-std::optional BlockQueue::Take() noexcept
-{
- std::unique_lock u_lck(lck_);
- // block until: 1. take disabled; 2. no more input set; 3. new elements
- while (q_.size() == 0 && take_enabled_ && no_more_input_ == false)
- {
- consumer_cv_.wait(u_lck);
- }
- if (!take_enabled_ || (no_more_input_ && q_.size() == 0))
- {
- return std::nullopt;
- }
- T ret = q_.front();
- q_.pop();
- producer_cv_.notify_one();
-
- if (no_more_input_)
- {
- consumer_cv_.notify_all();
- }
- return ret;
-}
-
-template
-std::optional BlockQueue::TryTake() noexcept
-{
- std::unique_lock u_lck(lck_);
- if (q_.size() == 0)
- {
- return std::nullopt;
- } else
- {
- T ret = q_.front();
- q_.pop();
- producer_cv_.notify_all();
- if (no_more_input_)
- {
- consumer_cv_.notify_all();
- }
- return ret;
- }
-}
-
-template
-int BlockQueue::Size() noexcept
-{
- std::unique_lock u_lck(lck_);
- return q_.size();
-}
-
-template
-bool BlockQueue::Empty() noexcept
-{
- std::unique_lock u_lck(lck_);
- return q_.size() == 0;
-}
-
-template
-int BlockQueue::GetMaxSize() const noexcept
-{
- return max_size_;
-}
-
-template
-void BlockQueue::Disable() noexcept
-{
- DisablePush();
- DisableTake();
-}
-
-template
-void BlockQueue::DisableAndClear() noexcept
-{
- Disable();
- std::unique_lock u_lck(lck_);
- while (!q_.empty()) q_.pop();
-}
-
-template
-void BlockQueue::DisablePush() noexcept
-{
- push_enabled_.store(false);
- producer_cv_.notify_all();
-}
-
-template
-void BlockQueue::EnablePush() noexcept
-{
- push_enabled_.store(true);
-}
-
-template
-void BlockQueue::DisableTake() noexcept
-{
- take_enabled_.store(false);
- consumer_cv_.notify_all();
-}
-
-template
-void BlockQueue::EnableTake() noexcept
-{
- take_enabled_.store(true);
-}
-
-template
-void BlockQueue::SetNoMoreInput() noexcept
-{
- no_more_input_.store(true);
- consumer_cv_.notify_all();
-}
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/common_defination.h b/deploy_core/include/deploy_core/common_defination.h
deleted file mode 100644
index 612cde9..0000000
--- a/deploy_core/include/deploy_core/common_defination.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:00:38
- * @LastEditTime: 2024-11-26 22:07:03
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/common_defination.h
- */
-#ifndef __EASY_DEPLOY_COMMON_DEFINATION_H
-#define __EASY_DEPLOY_COMMON_DEFINATION_H
-
-/**
- * @brief Defination of common 2D bounding box
- *
- * @param x center of bbox `x`
- * @param y center of bbox `y`
- * @param w width of bbox
- * @param h height of bbox
- * @param conf confidence of bbox
- * @param cls classification of bbox
- */
-struct BBox2D {
- float x;
- float y;
- float w;
- float h;
- float conf;
- float cls;
-};
-
-/**
- * @brief Enum of data loacation
- *
- * @param HOST data is host accessable
- * @param DEVICE data is device accessable, means host cant read/write the data buffer directly
- * @param UNKOWN some other condition
- *
- */
-enum DataLocation { HOST = 0, DEVICE = 1, UNKOWN = 2 };
-
-/**
- * @brief Defination of common image format.
- *
- */
-enum ImageDataFormat { YUV = 0, RGB = 1, BGR = 2, GRAY = 3 };
-
-// some macro
-#define CHECK_STATE(state, hint) \
- { \
- if (!(state)) \
- { \
- LOG(ERROR) << (hint); \
- return false; \
- } \
- }
-
-#define MESSURE_DURATION(run) \
- { \
- auto start = std::chrono::high_resolution_clock::now(); \
- (run); \
- auto end = std::chrono::high_resolution_clock::now(); \
- LOG(INFO) << #run << " cost(us): " \
- << std::chrono::duration_cast(end - start).count(); \
- }
-
-#define MESSURE_DURATION_AND_CHECK_STATE(run, hint) \
- { \
- auto start = std::chrono::high_resolution_clock::now(); \
- CHECK_STATE((run), hint); \
- auto end = std::chrono::high_resolution_clock::now(); \
- LOG(INFO) << #run << " cost(us): " \
- << std::chrono::duration_cast(end - start).count(); \
- }
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/include/deploy_core/wrapper.h b/deploy_core/include/deploy_core/wrapper.h
deleted file mode 100644
index 6c9b5b0..0000000
--- a/deploy_core/include/deploy_core/wrapper.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:00:38
- * @LastEditTime: 2024-11-26 21:58:32
- * @FilePath: /easy_deploy/deploy_core/include/deploy_core/wrapper.h
- */
-#ifndef __EASY_DEPLOY_WRAPPER_H
-#define __EASY_DEPLOY_WRAPPER_H
-
-#include "deploy_core/async_pipeline.h"
-
-#include
-
-#include
-
-/**
- * @brief A simple wrapper of cv::Mat. Used in pipeline.
- *
- */
-class PipelineCvImageWrapper : public async_pipeline::IPipelineImageData {
-public:
- PipelineCvImageWrapper(const cv::Mat &cv_image, bool isRGB = false) : inner_cv_image(cv_image)
- {
- image_data_info.data_pointer = cv_image.data;
- image_data_info.format = isRGB ? ImageDataFormat::RGB : ImageDataFormat::BGR;
- image_data_info.image_height = cv_image.rows;
- image_data_info.image_width = cv_image.cols;
- image_data_info.image_channels = cv_image.channels();
- image_data_info.location = DataLocation::HOST;
- }
-
- const ImageDataInfo &GetImageDataInfo() const
- {
- return image_data_info;
- }
-
-private:
- IPipelineImageData::ImageDataInfo image_data_info;
- const cv::Mat inner_cv_image;
-};
-
-#endif
\ No newline at end of file
diff --git a/deploy_core/src/base_detection.cpp b/deploy_core/src/base_detection.cpp
deleted file mode 100644
index 5e2628b..0000000
--- a/deploy_core/src/base_detection.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-25 14:24:19
- * @LastEditTime: 2024-11-26 21:58:50
- * @FilePath: /easy_deploy/deploy_core/src/base_detection.cpp
- */
-#include "deploy_core/base_detection.h"
-
-#include "deploy_core/wrapper.h"
-
-namespace detection_2d {
-
-std::string BaseDetectionModel::detection_pipeline_name_ = "DetectionPipeline";
-
-/**
- * @brief construct a `DetectionPipelinePackage`
- *
- * @param input_image
- * @param conf_thresh
- * @param isRGB
- * @param blob_buffers
- * @return std::shared_ptr
- */
-static std::shared_ptr CreateDetectionPipelineUnit(
- const cv::Mat &input_image,
- float conf_thresh,
- bool isRGB,
- std::shared_ptr blob_buffers)
-{
- // 1. construct the image wrapper
- auto image_wrapper = std::make_shared(input_image, isRGB);
- // 2. construct `DetectionPipelinePakcage`
- auto package = std::make_shared();
- package->input_image_data = image_wrapper;
- package->conf_thresh = conf_thresh;
- package->infer_buffer = blob_buffers;
-
- return package;
-}
-
-BaseDetectionModel::BaseDetectionModel(std::shared_ptr infer_core)
- : infer_core_(infer_core)
-{
- // 1. check infer_core
- if (infer_core == nullptr)
- {
- throw std::invalid_argument("[BaseDetectionModel] Input argument `infer_core` is nullptr!!!");
- }
-
- // 2. configure pipeline
- auto preprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [=](ParsingType unit) -> bool { return PreProcess(unit); }, "BaseDet PreProcess");
-
- auto infer_core_context = infer_core->GetPipelineContext();
-
- auto postprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [=](ParsingType unit) -> bool { return PostProcess(unit); }, "BaseDet PostProcess");
-
- BaseAsyncPipeline::ConfigPipeline(detection_pipeline_name_,
- {preprocess_block, infer_core_context, postprocess_block});
-}
-
-bool BaseDetectionModel::Detect(const cv::Mat &input_image,
- std::vector &det_results, // todo
- float conf_thresh,
- bool isRGB) noexcept
-{
- // 1. Get blobs buffer
- auto blob_buffers = infer_core_->GetBuffer(false);
- if (blob_buffers == nullptr)
- {
- LOG(ERROR) << "[BaseDetectionModel] Inference Core run out buffer!!!";
- return false;
- }
-
- // 2. Create a dummy pipeline package
- auto package = CreateDetectionPipelineUnit(input_image, conf_thresh, isRGB, blob_buffers);
-
- // 3. preprocess by derived class
- MESSURE_DURATION_AND_CHECK_STATE(PreProcess(package),
- "[BaseDetectionModel] Preprocess execute failed!!!");
-
- // 4. network inference
- MESSURE_DURATION_AND_CHECK_STATE(infer_core_->SyncInfer(blob_buffers),
- "[BaseDetectionModel] SyncInfer execute failed!!!");
-
- // 5. postprocess by derived class
- MESSURE_DURATION_AND_CHECK_STATE(PostProcess(package),
- "[BaseDetectionModel] PostProcess execute failed!!!");
-
- // 6. take output
- det_results = std::move(package->results);
-
- return true;
-}
-
-std::future> BaseDetectionModel::DetectAsync(const cv::Mat &input_image,
- float conf_thresh,
- bool isRGB,
- bool cover_oldest) noexcept
-{
- // 1. check if the pipeline is initialized
- if (!IsPipelineInitialized(detection_pipeline_name_))
- {
- LOG(ERROR) << "[BaseDetectionModel] Async Pipeline is not init yet!!!";
- return std::future>();
- }
-
- // 2. get blob buffer
- auto blob_buffers = infer_core_->GetBuffer(true);
- if (blob_buffers == nullptr)
- {
- LOG(ERROR) << "[BaseDetectionModel] Failed to get buffer from inference core!!!";
- return std::future>();
- }
-
- // 3. create a pipeline package
- auto package = CreateDetectionPipelineUnit(input_image, conf_thresh, isRGB, blob_buffers);
-
- // 4. push package into pipeline and return `std::future`
- return PushPipeline(detection_pipeline_name_, package);
-}
-
-BaseDetectionModel::~BaseDetectionModel()
-{
- ClosePipeline();
- infer_core_->Release();
-}
-
-
-} // namespace detection_2d
\ No newline at end of file
diff --git a/deploy_core/src/base_infer_core.cpp b/deploy_core/src/base_infer_core.cpp
deleted file mode 100644
index 8981805..0000000
--- a/deploy_core/src/base_infer_core.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-19 18:33:00
- * @LastEditTime: 2024-11-26 21:56:31
- * @FilePath: /easy_deploy/deploy_core/src/base_infer_core.cpp
- */
-#include "deploy_core/base_infer_core.h"
-
-namespace inference_core {
-
-// used in sync infer
-struct _InnerSyncInferPackage : public async_pipeline::IPipelinePackage {
-public:
- std::shared_ptr GetInferBuffer() override
- {
- return buffer;
- }
- std::shared_ptr buffer;
-};
-
-BaseInferCore::BaseInferCore()
-{
- auto preprocess_block = BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return PreProcess(unit); }, "BaseInferCore PreProcess");
- auto inference_block = BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return Inference(unit); }, "BaseInferCore Inference");
- auto postprocess_block = BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return PostProcess(unit); }, "BaseInferCore PostProcess");
- ConfigPipeline("InferCore Pipieline", {preprocess_block, inference_block, postprocess_block});
-}
-
-bool BaseInferCore::SyncInfer(std::shared_ptr buffer, const int batch_size)
-{
- auto inner_package = std::make_shared<_InnerSyncInferPackage>();
- inner_package->buffer = buffer;
- CHECK_STATE(PreProcess(inner_package), "[BaseInferCore] SyncInfer Preprocess Failed!!!");
- CHECK_STATE(Inference(inner_package), "[BaseInferCore] SyncInfer Inference Failed!!!");
- CHECK_STATE(PostProcess(inner_package), "[BaseInferCore] SyncInfer PostProcess Failed!!!");
- return true;
-}
-
-std::shared_ptr BaseInferCore::GetBuffer(bool block)
-{
- return mem_buf_pool_->Alloc(block);
-}
-
-void BaseInferCore::Release()
-{
- BaseAsyncPipeline::ClosePipeline();
- mem_buf_pool_.reset();
-}
-
-void BaseInferCore::Init(int mem_buf_size)
-{
- if (mem_buf_size <= 0 || mem_buf_size > 100)
- {
- throw std::invalid_argument("mem_buf_size should be between [1,100], Got: " +
- std::to_string(mem_buf_size));
- }
- mem_buf_pool_ = std::make_unique(this, mem_buf_size);
- LOG(INFO) << "successfully init mem buf pool with pool_size : " << mem_buf_size;
-}
-
-BaseInferCore::~BaseInferCore()
-{
- Release();
-}
-
-} // namespace inference_core
\ No newline at end of file
diff --git a/deploy_core/src/base_sam.cpp b/deploy_core/src/base_sam.cpp
deleted file mode 100644
index 9ff533e..0000000
--- a/deploy_core/src/base_sam.cpp
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-24 20:05:41
- * @LastEditTime: 2024-11-26 21:57:33
- * @FilePath: /easy_deploy/deploy_core/src/base_sam.cpp
- */
-#include "deploy_core/base_sam.h"
-
-#include "deploy_core/wrapper.h"
-
-namespace sam {
-
-/**
- * @brief Check if the input arguments are valid
- *
- * @param image
- * @param infer_core
- * @param points
- * @param labels
- */
-static bool CheckValidArguments(const cv::Mat &image,
- const std::shared_ptr &infer_core,
- const std::vector> &points,
- const std::vector &labels) noexcept
-{
- if (image.empty())
- {
- LOG(ERROR) << "[BaseSamModel] Got empty image!!!";
- return false;
- } else if (infer_core == nullptr)
- {
- LOG(ERROR) << "[BaseSamModel] Infer_core with points as prompt is null!!!";
- return false;
- } else if (points.size() != labels.size() || points.size() < 1)
- {
- LOG(ERROR) << "[BaseSamModel] points/labels size is not valid!!! "
- << "points.size: " << points.size() << ", labels.size: " << labels.size();
- return false;
- }
-
- return true;
-}
-
-/**
- * @brief Check if the input arguments are valid
- *
- * @param image
- * @param infer_core
- * @param boxes
- */
-static bool CheckValidArguments(const cv::Mat &image,
- const std::shared_ptr &infer_core,
- const std::vector &boxes) noexcept
-{
- if (image.empty())
- {
- LOG(ERROR) << "[BaseSamModel] Got empty image!!!";
- return false;
- } else if (infer_core == nullptr)
- {
- LOG(ERROR) << "[BaseSamModel] Infer_core with boxes as prompt is null!!!";
- return false;
- } else if (boxes.size() < 1)
- {
- LOG(ERROR) << "[BaseSamModel] boxes size is not valid!!! "
- << "boxes.size: " << boxes.size();
- return false;
- } else if (boxes.size() > 1)
- {
- LOG(WARNING) << "[BaseSamModel] More than one boxes is not support in sam model!!";
- }
-
- return true;
-}
-
-BaseSamModel::BaseSamModel(const std::string &model_name,
- std::shared_ptr image_encoder_core,
- std::shared_ptr mask_points_decoder_core,
- std::shared_ptr mask_boxes_decoder_core)
- : model_name_(model_name),
- image_encoder_core_(image_encoder_core),
- mask_points_decoder_core_(mask_points_decoder_core),
- mask_boxes_decoder_core_(mask_boxes_decoder_core),
- box_pipeline_name_(model_name + "_SamWithBoxPipeline"),
- point_pipeline_name_(model_name + "_SamWithPointPipeline")
-{
- if (image_encoder_core == nullptr)
- {
- throw std::invalid_argument("`image_encoder_core` should not be null");
- }
-
- if (mask_points_decoder_core == nullptr && mask_boxes_decoder_core == nullptr)
- {
- throw std::invalid_argument("one of `point/box` decoder should be non-nullptr");
- }
-
- if (mask_points_decoder_core_ != nullptr)
- {
- ConfigurePointPipeline();
- }
- if (mask_boxes_decoder_core_ != nullptr)
- {
- ConfigureBoxPipeline();
- }
-}
-
-BaseSamModel::~BaseSamModel()
-{
- BaseAsyncPipeline::ClosePipeline();
-
- if (image_encoder_core_ != nullptr)
- {
- image_encoder_core_->Release();
- }
- if (mask_points_decoder_core_ != nullptr)
- {
- mask_points_decoder_core_->Release();
- }
- if (mask_boxes_decoder_core_ != nullptr)
- {
- mask_boxes_decoder_core_->Release();
- }
-}
-
-void BaseSamModel::ConfigureBoxPipeline()
-{
- auto image_preprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return ImagePreProcess(unit); },
- "[MobileSam Image PreProcess]");
-
- auto prompt_preprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return PromptBoxPreProcess(unit); },
- "[MobileSam Prompt PreProcess]");
-
- auto mask_postprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return MaskPostProcess(unit); },
- "[MobileSam Mask PostProcess]");
-
- const auto &image_encoder_context = image_encoder_core_->GetPipelineContext();
-
- const auto &mask_decoder_context = mask_boxes_decoder_core_->GetPipelineContext();
-
- BaseAsyncPipeline::ConfigPipeline(
- box_pipeline_name_, {image_preprocess_block, image_encoder_context, prompt_preprocess_block,
- mask_decoder_context, mask_postprocess_block});
-}
-
-void BaseSamModel::ConfigurePointPipeline()
-{
- auto image_preprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return ImagePreProcess(unit); },
- "[MobileSam Image PreProcess]");
-
- auto prompt_preprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return PromptPointPreProcess(unit); },
- "[MobileSam Prompt PreProcess]");
-
- auto mask_postprocess_block = BaseAsyncPipeline::BuildPipelineBlock(
- [&](ParsingType unit) -> bool { return MaskPostProcess(unit); },
- "[MobileSam Mask PostProcess]");
-
- const auto &image_encoder_context = image_encoder_core_->GetPipelineContext();
-
- const auto &mask_decoder_context = mask_points_decoder_core_->GetPipelineContext();
-
- BaseAsyncPipeline::ConfigPipeline(
- point_pipeline_name_, {image_preprocess_block, image_encoder_context, prompt_preprocess_block,
- mask_decoder_context, mask_postprocess_block});
-}
-
-bool BaseSamModel::GenerateMask(const cv::Mat &image,
- const std::vector> &points,
- const std::vector &labels,
- cv::Mat &result,
- bool isRGB)
-{
- // 0. check
- CHECK_STATE(CheckValidArguments(image, mask_points_decoder_core_, points, labels),
- "[BaseSamModel] `GenerateMask` with points got invalid arguments");
-
- // 1. Get blobs buffers
- auto encoder_blob_buffers = image_encoder_core_->GetBuffer(true);
- auto decoder_blob_buffers = mask_points_decoder_core_->GetBuffer(true);
-
- // 2. Construct `SamPipelinePackage`
- auto package = std::make_shared();
- package->input_image_data = std::make_shared(image, isRGB);
- package->points = points;
- package->labels = labels;
- package->image_encoder_blobs_buffer = encoder_blob_buffers;
- package->mask_decoder_blobs_buffer = decoder_blob_buffers;
-
- // 3. Carry out workflow
- MESSURE_DURATION_AND_CHECK_STATE(ImagePreProcess(package),
- "[BaseSamModel] Image-Preprocess execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(image_encoder_core_->SyncInfer(package->GetInferBuffer()),
- "[BaseSamModel] Image-encoder sync infer execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(PromptPointPreProcess(package),
- "[BaseSamModel] Prompt-preprocess execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(mask_points_decoder_core_->SyncInfer(package->GetInferBuffer()),
- "[BaseSamModel] Prompt-decoder sync infer execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(MaskPostProcess(package),
- "[BaseSamModel] Mask-postprocess execute failed!!!");
-
- // 4. output the result
- result = package->mask;
- return true;
-}
-
-bool BaseSamModel::GenerateMask(const cv::Mat &image,
- const std::vector &boxes,
- cv::Mat &result,
- bool isRGB)
-{
- // 0. check
- CHECK_STATE(CheckValidArguments(image, mask_boxes_decoder_core_, boxes),
- "[BaseSamModel] `GenerateMask` with boxes got invalid arguments");
-
- // 1. Get blobs buffers
- auto encoder_blob_buffers = image_encoder_core_->GetBuffer(true);
- auto decoder_blob_buffers = mask_boxes_decoder_core_->GetBuffer(true);
-
- // 2. Construct `SamPipelinePackage`
- auto package = std::make_shared();
- package->input_image_data = std::make_shared(image, isRGB);
- package->boxes = boxes;
- package->image_encoder_blobs_buffer = encoder_blob_buffers;
- package->mask_decoder_blobs_buffer = decoder_blob_buffers;
-
- // 3. Carry out workflow
- MESSURE_DURATION_AND_CHECK_STATE(ImagePreProcess(package),
- "[BaseSamModel] Image-Preprocess execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(image_encoder_core_->SyncInfer(package->GetInferBuffer()),
- "[BaseSamModel] Image-encoder sync infer execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(PromptBoxPreProcess(package),
- "[BaseSamModel] Prompt-preprocess execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(mask_boxes_decoder_core_->SyncInfer(package->GetInferBuffer()),
- "[BaseSamModel] Prompt-decoder sync infer execute failed!!!");
-
- MESSURE_DURATION_AND_CHECK_STATE(MaskPostProcess(package),
- "[BaseSamModel] Mask-postprocess execute failed!!!");
-
- // 4. output the result
- result = package->mask;
- return true;
-}
-
-std::future BaseSamModel::GenerateMaskAsync(const cv::Mat &image,
- const std::vector> &points,
- const std::vector &labels,
- bool isRGB,
- bool cover_oldest)
-{
- // 0. Check
- if (!CheckValidArguments(image, mask_points_decoder_core_, points, labels))
- {
- LOG(ERROR) << "[BaseSamModel] `GenerateMask` with points got invalid arguments";
- return std::future();
- }
- if (!BaseAsyncPipeline::IsPipelineInitialized(point_pipeline_name_))
- {
- LOG(ERROR) << "[BaseSamModel] Async pipeline with points as prompt is not initialized yet!!!";
- return std::future();
- }
-
- // 1. Get blobs buffers
- auto encoder_blob_buffers = image_encoder_core_->GetBuffer(true);
- auto decoder_blob_buffers = mask_points_decoder_core_->GetBuffer(true);
-
- // 2. Construct `SamPipelinePackage`
- auto package = std::make_shared();
- package->input_image_data = std::make_shared(image, isRGB);
- package->points = points;
- package->labels = labels;
- package->image_encoder_blobs_buffer = encoder_blob_buffers;
- package->mask_decoder_blobs_buffer = decoder_blob_buffers;
-
- // 3. return `std::future` instance
- return BaseAsyncPipeline::PushPipeline(point_pipeline_name_, package);
-}
-
-std::future BaseSamModel::GenerateMaskAsync(const cv::Mat &image,
- const std::vector &boxes,
- bool isRGB,
- bool cover_oldest)
-{
- // 0. check
- if (!CheckValidArguments(image, mask_boxes_decoder_core_, boxes))
- {
- LOG(ERROR) << "[BaseSamModel] `GenerateMask` with boxes got invalid arguments";
- return std::future();
- }
-
- if (!BaseAsyncPipeline::IsPipelineInitialized(box_pipeline_name_))
- {
- LOG(ERROR) << "[BaseSamModel] Async pipeline with boxes as prompt is not initialized yet!!!";
- return std::future();
- }
-
- // 1. Get blobs buffers
- auto encoder_blob_buffers = image_encoder_core_->GetBuffer(true);
- auto decoder_blob_buffers = mask_boxes_decoder_core_->GetBuffer(true);
-
- // 2. Construct `SamPipelinePackage`
- auto package = std::make_shared();
- package->input_image_data = std::make_shared(image, isRGB);
- package->boxes = boxes;
- package->image_encoder_blobs_buffer = encoder_blob_buffers;
- package->mask_decoder_blobs_buffer = decoder_blob_buffers;
-
- // 3. return `std::future` instance
- return BaseAsyncPipeline::PushPipeline(box_pipeline_name_, package);
-}
-
-} // namespace sam
\ No newline at end of file
diff --git a/deploy_utils/CMakeLists.txt b/deploy_utils/CMakeLists.txt
deleted file mode 100644
index 6a0ed77..0000000
--- a/deploy_utils/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-cmake_minimum_required(VERSION 3.8)
-project(deploy_utils)
-
-
-add_subdirectory(image_processing_utils)
diff --git a/deploy_utils/image_processing_utils/CMakeLists.txt b/deploy_utils/image_processing_utils/CMakeLists.txt
deleted file mode 100644
index 6e359b5..0000000
--- a/deploy_utils/image_processing_utils/CMakeLists.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-cmake_minimum_required(VERSION 3.8)
-project(image_processing_utils)
-
-add_compile_options(-std=c++17)
-add_compile_options(-O3)
-set(CMAKE_CXX_STANDARD 17)
-
-if(ENABLE_TENSORRT)
- find_package(CUDA REQUIRED)
- set(CMAKE_CUDA_COMPILER /usr/local/cuda/bin/nvcc)
- enable_language(CUDA)
- include_directories(${CUDA_INCLUDE_DIRS})
-endif()
-
-find_package(OpenCV REQUIRED)
-find_package(glog REQUIRED)
-
-set(source_file
- src/detection_2d_preprocess_cpu.cpp
- src/detection_2d_postprocess_cpu.cpp
-)
-
-if(ENABLE_TENSORRT)
- list(APPEND source_file
- src/detection_2d_preprocess_cuda.cpp
- src/detection_2d_cuda_preprocess.cu
- )
-endif()
-
-include_directories(
- include
- ${OpenCV_INCLUDE_DIRS}
-)
-
-add_library(${PROJECT_NAME} SHARED ${source_file})
-
-target_link_libraries(${PROJECT_NAME} PUBLIC
- glog::glog
- ${OpenCV_LIBS}
- deploy_core
-)
-
-install(TARGETS ${PROJECT_NAME}
- LIBRARY DESTINATION lib)
-
-target_include_directories(${PROJECT_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/include)
\ No newline at end of file
diff --git a/deploy_utils/image_processing_utils/include/detection_2d_util/detection_2d_util.h b/deploy_utils/image_processing_utils/include/detection_2d_util/detection_2d_util.h
deleted file mode 100644
index 4eeac9d..0000000
--- a/deploy_utils/image_processing_utils/include/detection_2d_util/detection_2d_util.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * @Description:
- * @Author: Teddywesside 18852056629@163.com
- * @Date: 2024-11-19 18:33:00
- * @LastEditTime: 2024-12-03 15:51:37
- * @FilePath: /EasyDeploy/deploy_utils/image_processing_utils/include/detection_2d_util/detection_2d_util.h
- */
-#ifndef __EASY_DEPLOY_DETECTION_2D_UTIL_H
-#define __EASY_DEPLOY_DETECTION_2D_UTIL_H
-
-#include "deploy_core/base_detection.h"
-#include "deploy_core/base_infer_core.h"
-
-namespace detection_2d {
-
-/**
- * @brief Create a Cpu based Det Pre Process object
- *
- */
-std::shared_ptr CreateCpuDetPreProcess(
- const std::vector &mean = {0, 0, 0},
- const std::vector &val = {255, 255, 255},
- bool do_transpose = true,
- bool do_norm = true);
-
-std::shared_ptr CreateCpuDetPreProcessFactory(
- const std::vector &mean = {0, 0, 0},
- const std::vector &val = {255, 255, 255},
- bool do_transpose = true,
- bool do_norm = true);
-
-/**
- * @brief Create a Cuda based Det Pre Process object
- *
- */
-std::shared_ptr CreateCudaDetPreProcess(const int max_src_height = 1920,
- const int max_src_width = 1920,
- const int max_src_channels = 3);
-
-std::shared_ptr CreateCudaDetPreProcessFactory(
- const int max_src_height = 1920,
- const int max_src_width = 1920,
- const int max_src_channels = 3);
-
-/**
- * @brief Refer to `ultralytics` official project.
- *
- */
-std::shared_ptr CreateYolov8PostProcessCpuOrigin(
- const int input_height,
- const int input_width,
- const int cls_number,
- const std::vector &downsample_scales = {8, 16, 32});
-
-std::shared_ptr