diff --git a/apps/conference-demos/rgb-depth-connections/README.md b/apps/conference-demos/rgb-depth-connections/README.md index 887a47581..39b06b70f 100644 --- a/apps/conference-demos/rgb-depth-connections/README.md +++ b/apps/conference-demos/rgb-depth-connections/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/apps/default-app/README.md b/apps/default-app/README.md index 4cfc2d426..7d479a21d 100644 --- a/apps/default-app/README.md +++ b/apps/default-app/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/depth-driven-focus/README.md b/camera-controls/depth-driven-focus/README.md index 88bd1efa9..3206ed577 100644 --- a/camera-controls/depth-driven-focus/README.md +++ b/camera-controls/depth-driven-focus/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/depth-driven-focus/main.py b/camera-controls/depth-driven-focus/main.py index 7ef3d818e..40f8a22d3 100644 --- a/camera-controls/depth-driven-focus/main.py +++ b/camera-controls/depth-driven-focus/main.py @@ -1,9 +1,12 @@ +from pathlib import Path + import depthai as dai from depthai_nodes.node import ParsingNeuralNetwork, ApplyColormap, DepthMerger from utils.arguments import initialize_argparser from utils.depth_driven_focus import DepthDrivenFocus _, args = initialize_argparser() +MODEL_DIR = Path(__file__).resolve().parent / "depthai_models" visualizer = dai.RemoteConnection(httpPort=8082) device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device() @@ -15,7 +18,7 @@ platform = device.getPlatform() model_description = dai.NNModelDescription.fromYamlFile( - f"yunet.{platform.name}.yaml" + str(MODEL_DIR / f"yunet.{platform.name}.yaml") ) nn_archive = dai.NNArchive(dai.getModelFromZoo(model_description)) diff --git a/camera-controls/depth-driven-focus/requirements.txt b/camera-controls/depth-driven-focus/requirements.txt index df8d7aa85..0939d9a84 100644 --- a/camera-controls/depth-driven-focus/requirements.txt +++ b/camera-controls/depth-driven-focus/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/camera-controls/lossless-zooming/README.md b/camera-controls/lossless-zooming/README.md index cd8d72f29..87d2c376d 100644 --- a/camera-controls/lossless-zooming/README.md +++ b/camera-controls/lossless-zooming/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/lossless-zooming/main.py b/camera-controls/lossless-zooming/main.py index 9cbbaa4a1..da63a9cfa 100644 --- a/camera-controls/lossless-zooming/main.py +++ b/camera-controls/lossless-zooming/main.py @@ -25,7 +25,13 @@ else dai.ImgFrame.Type.BGR888p ) -model_description = dai.NNModelDescription.fromYamlFile(f"yunet.{platform.name}.yaml") +model_description = dai.NNModelDescription.fromYamlFile( + str( + Path(__file__).resolve().parent + / "depthai_models" + / f"yunet.{platform.name}.yaml" + ) +) nn_archive = dai.NNArchive(dai.getModelFromZoo(model_description)) model_width = nn_archive.getInputWidth() model_height = nn_archive.getInputHeight() diff --git a/camera-controls/lossless-zooming/requirements.txt b/camera-controls/lossless-zooming/requirements.txt index 8e4027fc1..e42562133 100644 --- a/camera-controls/lossless-zooming/requirements.txt +++ b/camera-controls/lossless-zooming/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/camera-controls/lossless-zooming/utils/crop_face.py b/camera-controls/lossless-zooming/utils/crop_face.py index 7d64e6e94..20b395fa9 100644 --- a/camera-controls/lossless-zooming/utils/crop_face.py +++ b/camera-controls/lossless-zooming/utils/crop_face.py @@ -1,6 +1,5 @@ import depthai as dai from typing import Tuple -from depthai_nodes import ImgDetectionsExtended AVG_MAX_NUM = 10 @@ -15,7 +14,7 @@ class CropFace(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfig messages. source_size : Tuple[int, int] @@ -49,7 +48,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -67,11 +66,11 @@ def build( def process(self, detection_message: dai.Buffer): """Process the input detections and create a crop config. This function is - ran every time a new ImgDetectionsExtended message is received. + ran every time a new dai.ImgDetections message is received. Sends one crop configuration to the config_output link. """ - assert isinstance(detection_message, ImgDetectionsExtended) + assert isinstance(detection_message, dai.ImgDetections) timestamp = detection_message.getTimestamp() sequence_num = detection_message.getSequenceNum() @@ -85,7 +84,7 @@ def process(self, detection_message: dai.Buffer): if len(dets) > 0: cfg.setSkipCurrentImage(False) coords = dets[0] - rect = coords.rotated_rect + rect = coords.getBoundingBox() x = rect.center.x y = rect.center.y diff --git a/camera-controls/manual-camera-control/README.md b/camera-controls/manual-camera-control/README.md index eb43bbcf3..aa0919898 100644 --- a/camera-controls/manual-camera-control/README.md +++ b/camera-controls/manual-camera-control/README.md @@ -56,7 +56,7 @@ The following controls can be selected and modified with `+` and `-` keys: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/camera-controls/manual-camera-control/requirements.txt b/camera-controls/manual-camera-control/requirements.txt index 04985657c..eae2117c0 100644 --- a/camera-controls/manual-camera-control/requirements.txt +++ b/camera-controls/manual-camera-control/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.4.0 opencv-python-headless~=4.10.0 \ No newline at end of file diff --git a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt index 88bd04809..09753579b 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt +++ b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.2.1 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@f40211e5665473b5db48457640bed18fd1f2cc8d #InstanceToSemanticMask +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 tokenizers~=0.21.0 diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py index 62fa463c3..b303edfe5 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py @@ -3,7 +3,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended logger = logging.getLogger(__name__) @@ -13,7 +12,7 @@ class DetectionsLabelMapper(dai.node.HostNode): Adds label names to detections and aligns detections to a reference frame. Inputs: - - input_detections: dai.ImgDetections or ImgDetectionsExtended + - input_detections: dai.ImgDetections - input_frame: dai.ImgFrame (reference coordinate space) Output: @@ -50,17 +49,8 @@ def build( def process( self, detections_message: dai.Buffer, frame_message: dai.ImgFrame ) -> None: - if isinstance(detections_message, ImgDetectionsExtended): - # Align detections to frame coordinate space - detections_message.setTransformation(frame_message.getTransformation()) - for detection in detections_message.detections: - detection.label_name = self._label_encoding.get( - detection.label, "unknown" - ) - elif isinstance(detections_message, dai.ImgDetections): - detections_message.setTransformation(frame_message.getTransformation()) - for detection in detections_message.detections: - detection.labelName = self._label_encoding.get( - detection.label, "unknown" - ) + assert isinstance(detections_message, dai.ImgDetections) + detections_message.setTransformation(frame_message.getTransformation()) + for detection in detections_message.detections: + detection.labelName = self._label_encoding.get(detection.label, "unknown") self.out.send(detections_message) diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py index 23e46bb69..58ba9a042 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py @@ -27,7 +27,7 @@ class NNDetectionNode(dai.node.ThreadedHostNode): -> LabelMapperNode (add label names for visualization) Exposes: - - detections_extended: ImgDetectionsExtended with label names (for visualizer) + - detections_extended: dai.ImgDetections with label names (for visualizer) - detections: dai.ImgDetections with label names (for snapping) - controller: PromptController for dynamic prompt updates (classes, confidence threshold) """ @@ -80,7 +80,7 @@ def build( # Detection filter self._det_filter.build(self._nn.out) - # Add label for visualization (ImgDetectionsExtended) + # Add label for visualization self._det_label_mapper.build( input_detections=self._det_filter.out, input_frame=input_frame ) diff --git a/custom-frontend/raw-stream/README.md b/custom-frontend/raw-stream/README.md index 78226d6f6..cdf50b7ed 100644 --- a/custom-frontend/raw-stream/README.md +++ b/custom-frontend/raw-stream/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: #### BackEnd -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/custom-frontend/raw-stream/requirements.txt b/custom-frontend/raw-stream/requirements.txt index 877bcf059..e0918ed1d 100644 --- a/custom-frontend/raw-stream/requirements.txt +++ b/custom-frontend/raw-stream/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/depth-measurement/3d-measurement/box-measurement/README.md b/depth-measurement/3d-measurement/box-measurement/README.md index db9cac3ee..314cc9600 100644 --- a/depth-measurement/3d-measurement/box-measurement/README.md +++ b/depth-measurement/3d-measurement/box-measurement/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/3d-measurement/box-measurement/main.py b/depth-measurement/3d-measurement/box-measurement/main.py index c4e4c6eef..6fddcdcf0 100644 --- a/depth-measurement/3d-measurement/box-measurement/main.py +++ b/depth-measurement/3d-measurement/box-measurement/main.py @@ -1,3 +1,5 @@ +from pathlib import Path + import depthai as dai from depthai_nodes.node import ParsingNeuralNetwork from utils.box_processing_node import BoxProcessingNode @@ -6,6 +8,8 @@ _, args = initialize_argparser() +EXAMPLE_DIR = Path(__file__).resolve().parent +MODEL_DIR = EXAMPLE_DIR / "depthai_models" NN_WIDTH, NN_HEIGHT = 512, 320 INPUT_SHAPE = (NN_WIDTH, NN_HEIGHT) @@ -21,7 +25,7 @@ platform = device.getPlatform() model_description = dai.NNModelDescription.fromYamlFile( - f"box_instance_segmentation.{platform.name}.yaml" + str(MODEL_DIR / f"box_instance_segmentation.{platform.name}.yaml") ) nn_archive = dai.NNArchive( dai.getModelFromZoo( @@ -76,7 +80,7 @@ color_output.link(manip.inputImage) - nn = p.create(ParsingNeuralNetwork).build(nn_source=nn_archive, input=manip.out) + nn = p.create(ParsingNeuralNetwork).build(nnSource=nn_archive, input=manip.out) if platform == dai.Platform.RVC2: nn.setNNArchive(nn_archive, numShaves=7) diff --git a/depth-measurement/3d-measurement/box-measurement/requirements.txt b/depth-measurement/3d-measurement/box-measurement/requirements.txt index 3f3924ee0..1c236c7c2 100644 --- a/depth-measurement/3d-measurement/box-measurement/requirements.txt +++ b/depth-measurement/3d-measurement/box-measurement/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 open3d~=0.18 opencv-python-headless==4.10.0.84 diff --git a/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py b/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py index 1221bbb5f..c6b26086e 100644 --- a/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py +++ b/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py @@ -1,10 +1,6 @@ import depthai as dai import numpy as np import cv2 -from depthai_nodes.message.img_detections import ( - ImgDetectionExtended, - ImgDetectionsExtended, -) from .helper_functions import reverse_resize_and_pad import time @@ -146,11 +142,11 @@ def _fit_cuboid( corners3d = np.asarray(outline.points) self._draw_cuboid_outline(corners3d) - def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: + def _draw_box_and_label(self, det: dai.ImgDetection) -> None: """Draws rotated rect and label""" # All annotation coordinates are normalized to the NN input size (512×320) - rr = det._rotated_rect + rr = det.getBoundingBox() cx, cy = rr.center.x, rr.center.y w, h = rr.size.width, rr.size.height angle = rr.angle @@ -173,18 +169,18 @@ def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: if self.fit: label = ( - f"Box ({det._confidence:.2f}) " + f"Box ({det.confidence:.2f}) " f"{self.dimensions[0]:.1f} x {self.dimensions[1]:.1f} x {self.dimensions[2]:.1f} cm" ) elif self.dimensions_cache is not None and ( time.time() - self.last_successful_fit < self.cache_duration ): label = ( - f"Box ({det._confidence:.2f}) " + f"Box ({det.confidence:.2f}) " f"{self.dimensions_cache[0]:.1f} x {self.dimensions_cache[1]:.1f} x {self.dimensions_cache[2]:.1f} cm" ) else: - label = f"{'Box'} {det._confidence:.2f}" + label = f"{'Box'} {det.confidence:.2f}" self.helper_det.draw_text( label, @@ -195,7 +191,7 @@ def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: ) def _annotate_detection( - self, det: ImgDetectionExtended, idx: int, mask: np.ndarray, pcl, pcl_colors + self, det: dai.ImgDetection, idx: int, mask: np.ndarray, pcl, pcl_colors ): """Draw all annotations (mask, 3D box fit, bounding box + label) for a single detection.""" self._draw_mask(mask, idx) @@ -217,10 +213,10 @@ def run(self): assert isinstance(pcl_msg, dai.PointCloudData) assert isinstance(rgb_msg, dai.ImgFrame) - assert isinstance(det_msg, ImgDetectionsExtended) + assert isinstance(det_msg, dai.ImgDetections) inPointCloud: dai.PointCloudData = pcl_msg inRGB: dai.ImgFrame = rgb_msg - parser_output: ImgDetectionsExtended = det_msg + parser_output: dai.ImgDetections = det_msg try: points, colors = inPointCloud.getPointsRGB() @@ -230,7 +226,7 @@ def run(self): rgba_img = colors.reshape(IMG_HEIGHT, IMG_WIDTH, 4) bgr_img = cv2.cvtColor(rgba_img, cv2.COLOR_BGRA2BGR) - mask = parser_output._masks._mask + mask = parser_output.getCvSegmentationMask() detections = parser_output.detections mask_full = reverse_resize_and_pad( mask, (IMG_WIDTH, IMG_HEIGHT), INPUT_SHAPE diff --git a/depth-measurement/3d-measurement/rgbd-pointcloud/README.md b/depth-measurement/3d-measurement/rgbd-pointcloud/README.md index 0c712fa4c..a48c33dda 100644 --- a/depth-measurement/3d-measurement/rgbd-pointcloud/README.md +++ b/depth-measurement/3d-measurement/rgbd-pointcloud/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt index 877bcf059..e0918ed1d 100644 --- a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt +++ b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/depth-measurement/3d-measurement/tof-pointcloud/README.md b/depth-measurement/3d-measurement/tof-pointcloud/README.md index 135eaffab..ac56ad292 100644 --- a/depth-measurement/3d-measurement/tof-pointcloud/README.md +++ b/depth-measurement/3d-measurement/tof-pointcloud/README.md @@ -16,7 +16,7 @@ Running this example requires a **Luxonis device** connected to your computer. R ### Installation -You need to first prepare a **Python 3.10** environment (python versions 3.8 - 3.13 should work too) with the following packages installed: +You need to first prepare a **Python >= 3.10** environment (python versions 3.8 - 3.13 should work too) with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [Open3D](https://pypi.org/project/open3d/) diff --git a/depth-measurement/calc-spatial-on-host/README.md b/depth-measurement/calc-spatial-on-host/README.md index 5bf5bded8..df157e958 100644 --- a/depth-measurement/calc-spatial-on-host/README.md +++ b/depth-measurement/calc-spatial-on-host/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/dynamic-calibration/README.md b/depth-measurement/dynamic-calibration/README.md index 2b7e6ba66..678996fe1 100644 --- a/depth-measurement/dynamic-calibration/README.md +++ b/depth-measurement/dynamic-calibration/README.md @@ -87,7 +87,7 @@ Use these keys while the app is running (focus the browser visualizer window): ## Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/dynamic-calibration/main.py b/depth-measurement/dynamic-calibration/main.py index 92e49943e..b4e55f6fe 100644 --- a/depth-measurement/dynamic-calibration/main.py +++ b/depth-measurement/dynamic-calibration/main.py @@ -1,6 +1,6 @@ import cv2 -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap import depthai as dai from utils.dynamic_controler import DynamicCalibrationControler @@ -35,8 +35,7 @@ right_out.link(dyn_calib.right) # Output queues - depth_parser = pipeline.create(ApplyColormap).build(stereo.disparity) - # depth_parser.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) # NOTE: Uncomment when DAI fixes a bug + depth_parser = pipeline.create(ApplyDepthColormap).build(stereo.disparity) depth_parser.setColormap(cv2.COLORMAP_JET) calibration = device.readCalibration() diff --git a/depth-measurement/dynamic-calibration/requirements.txt b/depth-measurement/dynamic-calibration/requirements.txt index c7a442e6f..434698e9c 100644 --- a/depth-measurement/dynamic-calibration/requirements.txt +++ b/depth-measurement/dynamic-calibration/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.2.1 -depthai-nodes==0.3.6 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python==4.10.0.84 opencv-contrib-python==4.10.0.84 diff --git a/depth-measurement/stereo-on-host/README.md b/depth-measurement/stereo-on-host/README.md index b0a243434..e5183581a 100644 --- a/depth-measurement/stereo-on-host/README.md +++ b/depth-measurement/stereo-on-host/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/stereo-on-host/main.py b/depth-measurement/stereo-on-host/main.py index e6c7a2e9a..38309bba5 100644 --- a/depth-measurement/stereo-on-host/main.py +++ b/depth-measurement/stereo-on-host/main.py @@ -3,7 +3,7 @@ from utils.arguments import initialize_argparser from utils.host_stereo_sgbm import StereoSGBM from utils.host_ssim import SSIM -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap import cv2 RESOLUTION_SIZE = (640, 400) @@ -51,8 +51,7 @@ stereo.setExtendedDisparity(False) stereo.setSubpixel(True) - depth_parser = pipeline.create(ApplyColormap).build(stereo.disparity) - depth_parser.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) + depth_parser = pipeline.create(ApplyDepthColormap).build(stereo.disparity) depth_parser.setColormap(cv2.COLORMAP_JET) ssim = pipeline.create(SSIM).build( diff --git a/depth-measurement/stereo-on-host/requirements.txt b/depth-measurement/stereo-on-host/requirements.txt index 05c5898ec..35c941724 100644 --- a/depth-measurement/stereo-on-host/requirements.txt +++ b/depth-measurement/stereo-on-host/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 scikit-image numpy>=1.22 diff --git a/depth-measurement/stereo-runtime-configuration/README.md b/depth-measurement/stereo-runtime-configuration/README.md index 75e868efe..811f2793c 100644 --- a/depth-measurement/stereo-runtime-configuration/README.md +++ b/depth-measurement/stereo-runtime-configuration/README.md @@ -34,7 +34,7 @@ To change the stereo depth settings, use the following keys: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/triangulation/README.md b/depth-measurement/triangulation/README.md index e782a34d1..34559cd9c 100644 --- a/depth-measurement/triangulation/README.md +++ b/depth-measurement/triangulation/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/triangulation/requirements.txt b/depth-measurement/triangulation/requirements.txt index d3c270afc..09e49daa1 100644 --- a/depth-measurement/triangulation/requirements.txt +++ b/depth-measurement/triangulation/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/depth-measurement/triangulation/utils/host_triangulation.py b/depth-measurement/triangulation/utils/host_triangulation.py index 9378a49bf..952e01285 100644 --- a/depth-measurement/triangulation/utils/host_triangulation.py +++ b/depth-measurement/triangulation/utils/host_triangulation.py @@ -2,9 +2,6 @@ import numpy as np import depthai as dai from typing import Tuple -from depthai_nodes import ( - ImgDetectionsExtended, -) from depthai_nodes.utils import AnnotationHelper from .stereo_inference import StereoInference @@ -73,15 +70,15 @@ def process( nn_face_left: dai.Buffer, nn_face_right: dai.Buffer, ) -> None: - assert isinstance(nn_face_left, ImgDetectionsExtended) - assert isinstance(nn_face_right, ImgDetectionsExtended) + assert isinstance(nn_face_left, dai.ImgDetections) + assert isinstance(nn_face_right, dai.ImgDetections) left_frame = face_left.getCvFrame() right_frame = face_right.getCvFrame() bbox_annot_left = AnnotationHelper() for detection in nn_face_left.detections: - rect = detection.rotated_rect + rect = detection.getBoundingBox() x = rect.center.x y = rect.center.y w = rect.size.width @@ -103,7 +100,7 @@ def process( bbox_annot_right = AnnotationHelper() for detection in nn_face_right.detections: - rect = detection.rotated_rect + rect = detection.getBoundingBox() x = rect.center.x y = rect.center.y w = rect.size.width @@ -137,8 +134,8 @@ def process( if nn_face_left.detections and nn_face_right.detections: spatials = [] keypoints = zip( - nn_face_left.detections[0].keypoints, - nn_face_right.detections[0].keypoints, + nn_face_left.detections[0].getKeypoints2f(), + nn_face_right.detections[0].getKeypoints2f(), ) for i, (keypoint_left, keypoint_right) in enumerate(keypoints): diff --git a/depth-measurement/wls-filter/README.md b/depth-measurement/wls-filter/README.md index e980a66eb..3f3788eee 100644 --- a/depth-measurement/wls-filter/README.md +++ b/depth-measurement/wls-filter/README.md @@ -36,7 +36,7 @@ Use the following keyboard controls in the visualizer to adjust WLS filtering pa ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/wls-filter/main.py b/depth-measurement/wls-filter/main.py index 5610d9e23..a0d081091 100755 --- a/depth-measurement/wls-filter/main.py +++ b/depth-measurement/wls-filter/main.py @@ -1,7 +1,7 @@ import cv2 import depthai as dai from utils.host_wls_filter import WLSFilter -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser _, args = initialize_argparser() @@ -39,14 +39,12 @@ baseline=baseline, ) - disp_colored = pipeline.create(ApplyColormap).build(stereo.disparity) - disp_colored.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) + disp_colored = pipeline.create(ApplyDepthColormap).build(stereo.disparity) disp_colored.setColormap(cv2.COLORMAP_JET) - filtered_disp_colored = pipeline.create(ApplyColormap).build( + filtered_disp_colored = pipeline.create(ApplyDepthColormap).build( wls_filter.filtered_disp ) - filtered_disp_colored.setMaxValue(255) filtered_disp_colored.setColormap(cv2.COLORMAP_JET) visualizer.addTopic("Rectified Right", stereo.rectifiedRight) diff --git a/depth-measurement/wls-filter/requirements.txt b/depth-measurement/wls-filter/requirements.txt index 7552cb7ce..bcb75c179 100644 --- a/depth-measurement/wls-filter/requirements.txt +++ b/depth-measurement/wls-filter/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-contrib-python==4.10.0.84 numpy>=1.22 diff --git a/integrations/foxglove/README.md b/integrations/foxglove/README.md index 4680a0205..b2e10e217 100644 --- a/integrations/foxglove/README.md +++ b/integrations/foxglove/README.md @@ -32,7 +32,7 @@ To see the streams, open [Foxglove Studio](https://app.foxglove.dev/), choose `O ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/hub-snaps-events/README.md b/integrations/hub-snaps-events/README.md index dcf3f721c..31cf26f60 100644 --- a/integrations/hub-snaps-events/README.md +++ b/integrations/hub-snaps-events/README.md @@ -37,7 +37,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/hub-snaps-events/main.py b/integrations/hub-snaps-events/main.py index ba63bc019..68712ee06 100644 --- a/integrations/hub-snaps-events/main.py +++ b/integrations/hub-snaps-events/main.py @@ -69,11 +69,9 @@ except ValueError: print(f"Class `{curr_class}` not predicted by the model, skipping.") - det_process_filter = pipeline.create(ImgDetectionsFilter).build( - nn_with_parser.out, - labels_to_keep=labels_to_keep, - confidence_threshold=args.confidence_threshold, - ) + det_process_filter = pipeline.create(ImgDetectionsFilter).build(nn_with_parser.out) + det_process_filter.keepLabels(labels_to_keep) + det_process_filter.minConfidence(args.confidence_threshold) snaps_producer = pipeline.create(SnapsProducer).build( frame=nn_with_parser.passthrough, diff --git a/integrations/hub-snaps-events/oakapp.toml b/integrations/hub-snaps-events/oakapp.toml index 8ac09306a..06416fdb7 100644 --- a/integrations/hub-snaps-events/oakapp.toml +++ b/integrations/hub-snaps-events/oakapp.toml @@ -14,4 +14,13 @@ build_steps = [] depthai_models = { yaml_path = "./depthai_models" } -entrypoint = ["bash", "-c", "python3 -u /app/main.py"] +entrypoint = ["bash", "-c", "python3 -u /app/main.py --api_key "] + +[base_image] +api_url = "https://registry-1.docker.io" +service = "registry.docker.io" +oauth_url = "https://auth.docker.io/token" +auth_type = "repository" +auth_name = "luxonis/oakapp-base" +image_name = "luxonis/oakapp-base" +image_tag = "1.2.6" \ No newline at end of file diff --git a/integrations/hub-snaps-events/requirements.txt b/integrations/hub-snaps-events/requirements.txt index bd5223a56..4340b39fd 100644 --- a/integrations/hub-snaps-events/requirements.txt +++ b/integrations/hub-snaps-events/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.2.1 -depthai-nodes==0.3.7 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update python-dotenv diff --git a/integrations/rerun/README.md b/integrations/rerun/README.md index 8a9e77496..68519b96c 100644 --- a/integrations/rerun/README.md +++ b/integrations/rerun/README.md @@ -32,7 +32,7 @@ By default, the example will open local Rerun Viewer on the device. You can also ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/integrations/roboflow-dataset/README.md b/integrations/roboflow-dataset/README.md index 143ac21c9..12b219020 100644 --- a/integrations/roboflow-dataset/README.md +++ b/integrations/roboflow-dataset/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/roboflow-dataset/requirements.txt b/integrations/roboflow-dataset/requirements.txt index b6d95df4c..46737ea80 100644 --- a/integrations/roboflow-dataset/requirements.txt +++ b/integrations/roboflow-dataset/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 roboflow==1.1.36 diff --git a/integrations/roboflow-workflow/backend/src/core/annotation_node.py b/integrations/roboflow-workflow/backend/src/core/annotation_node.py index fc7dd3d89..90f78ca98 100644 --- a/integrations/roboflow-workflow/backend/src/core/annotation_node.py +++ b/integrations/roboflow-workflow/backend/src/core/annotation_node.py @@ -1,7 +1,6 @@ import logging import depthai as dai from enum import Enum -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended class OutputType(Enum): @@ -20,7 +19,7 @@ def __init__( self.frames = {} # key -> ImgFrame self.output_frames = {"passthrough": self.createOutput()} - self.detections = {} # key -> ImgDetectionsExtended + self.detections = {} # key -> dai.ImgDetections self.output_detections = {} self._logger = logging.getLogger(self.__class__.__name__) @@ -80,13 +79,13 @@ def on_prediction(self, result, frame): self.frames[key] = vis_frame elif output_type == OutputType.DETECTION: - dets = ImgDetectionsExtended() + dets = dai.ImgDetections() try: for det in value: # Roboflow prediction output: xyxy, mask, conf, class_id, tracker, extra xyxy, _, conf, class_id, _, extra = det - new_det = ImgDetectionExtended() + new_det = dai.ImgDetection() h, w = extra["image_dimensions"] class_label = extra["class_name"] @@ -98,22 +97,19 @@ def on_prediction(self, result, frame): y0 /= h y1 /= h - new_det.rotated_rect = ( - float((x0 + x1) / 2), - float((y0 + y1) / 2), - float(x1 - x0), - float(y1 - y0), - 0, - ) + new_det.xmin = float(x0) + new_det.ymin = float(y0) + new_det.xmax = float(x1) + new_det.ymax = float(y1) new_det.confidence = float(conf) new_det.label = int(class_id) - new_det.label_name = str(class_label) + new_det.labelName = str(class_label) dets.detections.append(new_det) except Exception: self._logger.info( - f"Failed to parse output `{key}` as ImgDetectionExtended. " + f"Failed to parse output `{key}` as ImgDetection. " "Verify that this output contains a valid Roboflow Detection. " "If it does not, consider renaming the output in your Workflow so that " "'predictions' is not a substring of the output name." diff --git a/integrations/roboflow-workflow/backend/src/requirements.txt b/integrations/roboflow-workflow/backend/src/requirements.txt index be833a15b..a732ed178 100644 --- a/integrations/roboflow-workflow/backend/src/requirements.txt +++ b/integrations/roboflow-workflow/backend/src/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.2.1 -depthai-nodes==0.3.6 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python~=4.10.0 inference \ No newline at end of file diff --git a/neural-networks/3D-detection/objectron/README.md b/neural-networks/3D-detection/objectron/README.md index 443952b3c..b8a5532a4 100644 --- a/neural-networks/3D-detection/objectron/README.md +++ b/neural-networks/3D-detection/objectron/README.md @@ -30,7 +30,7 @@ Camera and shoes can not be detected with general YOLOv6 detector. So, you need ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/3D-detection/objectron/main.py b/neural-networks/3D-detection/objectron/main.py index 5a016a6ba..71da855d2 100644 --- a/neural-networks/3D-detection/objectron/main.py +++ b/neural-networks/3D-detection/objectron/main.py @@ -1,8 +1,12 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsFilter, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ( + ParsingNeuralNetwork, + ImgDetectionsFilter, + GatherData, + FrameCropper, +) from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -56,43 +60,33 @@ input_node, det_model_description, args.fps_limit ) - first_stage_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, - labels_to_keep=VALID_LABELS, - ) + first_stage_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + first_stage_filter.keepLabels(VALID_LABELS) # detection processing - script = pipeline.create(dai.node.Script) - first_stage_filter.out.link(script.inputs["det_in"]) - det_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=pos_model_w, - resize_height=pos_model_h, - padding=PADDING, - resize_mode="STRETCH", + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=first_stage_filter.out, + padding=PADDING, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=(pos_model_w, pos_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + ) ) - script.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(pos_model_w, pos_model_h) - crop_node.inputConfig.setWaitForMessage(True) - - script.outputs["manip_cfg"].link(crop_node.inputConfig) - script.outputs["manip_img"].link(crop_node.inputImage) pos_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, pos_nn_archive ) - detections_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, - labels_to_keep=VALID_LABELS, - ) - # detections and position estimations sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - detections_filter.out.link(gather_data.input_reference) - pos_nn.getOutput(0).link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=pos_nn.getOutput(0), + inputReference=first_stage_filter.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/3D-detection/objectron/requirements.txt b/neural-networks/3D-detection/objectron/requirements.txt index 338292859..e42562133 100644 --- a/neural-networks/3D-detection/objectron/requirements.txt +++ b/neural-networks/3D-detection/objectron/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/3D-detection/objectron/utils/annotation_node.py b/neural-networks/3D-detection/objectron/utils/annotation_node.py index f43202ab9..51b97c094 100644 --- a/neural-networks/3D-detection/objectron/utils/annotation_node.py +++ b/neural-networks/3D-detection/objectron/utils/annotation_node.py @@ -1,7 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, - Keypoints, GatheredData, PRIMARY_COLOR, SECONDARY_COLOR, @@ -36,7 +34,7 @@ def build( def process(self, gathered_data: dai.Buffer) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data + detections_message: dai.ImgDetections = gathered_data.reference_data detections_list: List[dai.ImgDetection] = detections_message.detections annotation_helper = AnnotationHelper() @@ -44,15 +42,25 @@ def process(self, gathered_data: dai.Buffer) -> None: padding = self.padding for ix, detection in enumerate(detections_list): - keypoints_msg: Keypoints = gathered_data.gathered[ix] + keypoints_msg: dai.KeypointsList = gathered_data.items[ix] slope_x = (detection.xmax + padding) - (detection.xmin - padding) slope_y = (detection.ymax + padding) - (detection.ymin - padding) xs = [] ys = [] - for kp in keypoints_msg.keypoints: - x = min(max(detection.xmin - padding + slope_x * kp.x, 0.0), 1.0) - y = min(max(detection.ymin - padding + slope_y * kp.y, 0.0), 1.0) + for kp in keypoints_msg.getKeypoints(): + x = min( + max( + detection.xmin - padding + slope_x * kp.imageCoordinates.x, 0.0 + ), + 1.0, + ) + y = min( + max( + detection.ymin - padding + slope_y * kp.imageCoordinates.y, 0.0 + ), + 1.0, + ) xs.append(x) ys.append(y) @@ -74,7 +82,7 @@ def process(self, gathered_data: dai.Buffer) -> None: ) annotation_helper.draw_text( - text=f"{(detection.confidence * 100):.2f}%", + text=f"{int(detection.confidence * 100)}%", position=(detection.xmin, detection.ymin - 0.05), color=SECONDARY_COLOR, size=16.0, diff --git a/neural-networks/counting/crowdcounting/README.md b/neural-networks/counting/crowdcounting/README.md index eee7f6fe7..dddca5da6 100644 --- a/neural-networks/counting/crowdcounting/README.md +++ b/neural-networks/counting/crowdcounting/README.md @@ -32,7 +32,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/crowdcounting/requirements.txt b/neural-networks/counting/crowdcounting/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/counting/crowdcounting/requirements.txt +++ b/neural-networks/counting/crowdcounting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/cumulative-object-counting/README.md b/neural-networks/counting/cumulative-object-counting/README.md index 3212ab39a..51289e932 100644 --- a/neural-networks/counting/cumulative-object-counting/README.md +++ b/neural-networks/counting/cumulative-object-counting/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/cumulative-object-counting/requirements.txt b/neural-networks/counting/cumulative-object-counting/requirements.txt index 7bfdaaf09..1457b9317 100644 --- a/neural-networks/counting/cumulative-object-counting/requirements.txt +++ b/neural-networks/counting/cumulative-object-counting/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/depth-people-counting/README.md b/neural-networks/counting/depth-people-counting/README.md index 7f0e6e9cd..34e3ea97b 100644 --- a/neural-networks/counting/depth-people-counting/README.md +++ b/neural-networks/counting/depth-people-counting/README.md @@ -65,7 +65,7 @@ To use the recording with the example: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/depth-people-counting/main.py b/neural-networks/counting/depth-people-counting/main.py index baaac67f8..0dc198161 100644 --- a/neural-networks/counting/depth-people-counting/main.py +++ b/neural-networks/counting/depth-people-counting/main.py @@ -1,6 +1,6 @@ import depthai as dai import os -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.frame_editor import FrameEditor @@ -74,7 +74,9 @@ dai.TrackerIdAssignmentPolicy.SMALLEST_ID ) - color_transform_disparity = pipeline.create(ApplyColormap).build(stereo.disparity) + color_transform_disparity = pipeline.create(ApplyDepthColormap).build( + stereo.disparity + ) color_transform_disparity.out.link(objectTracker.inputTrackerFrame) color_transform_disparity.out.link(objectTracker.inputDetectionFrame) detection_generator.out.link(objectTracker.inputDetections) @@ -86,7 +88,7 @@ # visualization visualizer.addTopic("Disparity", color_transform_disparity.out, "disparity") - visualizer.addTopic("Count", annotation_node.out) + visualizer.addTopic("Count", annotation_node.out, "disparity") print("Pipeline created.") diff --git a/neural-networks/counting/depth-people-counting/requirements.txt b/neural-networks/counting/depth-people-counting/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/counting/depth-people-counting/requirements.txt +++ b/neural-networks/counting/depth-people-counting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/people-counter/README.md b/neural-networks/counting/people-counter/README.md index 1e8318c32..af023b958 100644 --- a/neural-networks/counting/people-counter/README.md +++ b/neural-networks/counting/people-counter/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/people-counter/main.py b/neural-networks/counting/people-counter/main.py index 501cc52aa..16a9987df 100644 --- a/neural-networks/counting/people-counter/main.py +++ b/neural-networks/counting/people-counter/main.py @@ -51,9 +51,9 @@ # person detection filter classes = det_model_nn_archive.getConfig().model.heads[0].metadata.classes labels_to_keep = [classes.index("person")] if "person" in classes else [] - det_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, labels_to_keep=labels_to_keep, confidence_threshold=0.5 - ) + det_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + det_filter.keepLabels(labels_to_keep) + det_filter.minConfidence(0.5) # annotation annotation_node = pipeline.create(AnnotationNode).build(det_filter.out) diff --git a/neural-networks/counting/people-counter/requirements.txt b/neural-networks/counting/people-counter/requirements.txt index 8e4027fc1..e42562133 100644 --- a/neural-networks/counting/people-counter/requirements.txt +++ b/neural-networks/counting/people-counter/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/counting/people-counter/utils/annotation_node.py b/neural-networks/counting/people-counter/utils/annotation_node.py index d592b1bc8..2acc9604e 100644 --- a/neural-networks/counting/people-counter/utils/annotation_node.py +++ b/neural-networks/counting/people-counter/utils/annotation_node.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended from depthai_nodes.utils import AnnotationHelper @@ -15,7 +14,7 @@ def build(self, det_msg: dai.Node.Output) -> "AnnotationNode": return self def process(self, det_msg: dai.Buffer) -> None: - assert isinstance(det_msg, (dai.ImgDetections, ImgDetectionsExtended)) + assert isinstance(det_msg, (dai.ImgDetections)) count = len(det_msg.detections) diff --git a/neural-networks/depth-estimation/crestereo-stereo-matching/README.md b/neural-networks/depth-estimation/crestereo-stereo-matching/README.md index cd076e99c..7da7632d2 100644 --- a/neural-networks/depth-estimation/crestereo-stereo-matching/README.md +++ b/neural-networks/depth-estimation/crestereo-stereo-matching/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/foundation-stereo/main.py b/neural-networks/depth-estimation/foundation-stereo/main.py index 7da76ad96..07c6e3192 100644 --- a/neural-networks/depth-estimation/foundation-stereo/main.py +++ b/neural-networks/depth-estimation/foundation-stereo/main.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.utility import get_resolution_profile @@ -50,7 +50,7 @@ inference_shape=resolution_profile.nn_shape, ) - colored_disp = pipeline.create(ApplyColormap).build(stereo.disparity) + colored_disp = pipeline.create(ApplyDepthColormap).build(stereo.disparity) visualizer.addTopic("FS Result", fs_inferer.output) visualizer.addTopic("Disparity", colored_disp.out) diff --git a/neural-networks/depth-estimation/foundation-stereo/requirements.txt b/neural-networks/depth-estimation/foundation-stereo/requirements.txt index 5483b3136..9e1a358af 100644 --- a/neural-networks/depth-estimation/foundation-stereo/requirements.txt +++ b/neural-networks/depth-estimation/foundation-stereo/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 onnxruntime>=1.19.0 onnxruntime-gpu>=1.19.0 numpy>=1.22 diff --git a/neural-networks/depth-estimation/neural-depth/README.md b/neural-networks/depth-estimation/neural-depth/README.md index b051c6952..7a39f9b4b 100644 --- a/neural-networks/depth-estimation/neural-depth/README.md +++ b/neural-networks/depth-estimation/neural-depth/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/neural-depth/host_eval/README.md b/neural-networks/depth-estimation/neural-depth/host_eval/README.md index 8db9c1eb6..a9226ea76 100644 --- a/neural-networks/depth-estimation/neural-depth/host_eval/README.md +++ b/neural-networks/depth-estimation/neural-depth/host_eval/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt index e585b4f7a..ff0639b20 100644 --- a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4==4.12.3 -depthai==3.2.1 +depthai==3.4.0 numpy opencv-python~=4.10.0 requests diff --git a/neural-networks/depth-estimation/neural-depth/requirements.txt b/neural-networks/depth-estimation/neural-depth/requirements.txt index 1ef4f866c..98ae4b842 100644 --- a/neural-networks/depth-estimation/neural-depth/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.2.1 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@1b1dd7953feeaff1ca1a8c2234c532704b167d5f \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/README.md b/neural-networks/face-detection/age-gender/README.md index ddce6f240..4a5cb25be 100644 --- a/neural-networks/face-detection/age-gender/README.md +++ b/neural-networks/face-detection/age-gender/README.md @@ -28,7 +28,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/age-gender/main.py b/neural-networks/face-detection/age-gender/main.py index 60d2c519c..afa78eda9 100644 --- a/neural-networks/face-detection/age-gender/main.py +++ b/neural-networks/face-detection/age-gender/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,36 +72,30 @@ ) det_nn.getParser(0).conf_threshold = 0.9 # for more stable detections - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), - ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight() + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.outputs.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=rec_nn.outputs, + inputReference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/age-gender/requirements.txt b/neural-networks/face-detection/age-gender/requirements.txt index 8e4027fc1..1106ae10a 100644 --- a/neural-networks/face-detection/age-gender/requirements.txt +++ b/neural-networks/face-detection/age-gender/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/utils/annotation_node.py b/neural-networks/face-detection/age-gender/utils/annotation_node.py index 30df380ca..a462b4e3f 100644 --- a/neural-networks/face-detection/age-gender/utils/annotation_node.py +++ b/neural-networks/face-detection/age-gender/utils/annotation_node.py @@ -2,7 +2,6 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, Predictions, Classifications, SECONDARY_COLOR, @@ -22,34 +21,28 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_extended_msg: ImgDetectionsExtended = ( - gather_data_msg.reference_data - ) - assert isinstance(img_detections_extended_msg, ImgDetectionsExtended) + img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(img_detections_msg, dai.ImgDetections) - age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered + age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.items assert isinstance(age_gender_msg_group_list, list) assert all( isinstance(msg, dai.MessageGroup) for msg in age_gender_msg_group_list ) - assert len(img_detections_extended_msg.detections) == len( - age_gender_msg_group_list - ) + assert len(img_detections_msg.detections) == len(age_gender_msg_group_list) annotations = AnnotationHelper() - for img_detection_extended_msg, age_gender_msg_group in zip( - img_detections_extended_msg.detections, age_gender_msg_group_list + for img_detection_msg, age_gender_msg_group in zip( + img_detections_msg.detections, age_gender_msg_group_list ): age_msg: Predictions = age_gender_msg_group["0"] assert isinstance(age_msg, Predictions) gender_msg: Classifications = age_gender_msg_group["1"] assert isinstance(gender_msg, Classifications) - xmin, ymin, xmax, ymax = ( - img_detection_extended_msg.rotated_rect.getOuterRect() - ) + xmin, ymin, xmax, ymax = img_detection_msg.getBoundingBox().getOuterRect() annotations.draw_rectangle( (xmin, ymin), @@ -64,8 +57,8 @@ def process(self, gather_data_msg: dai.Buffer) -> None: ) annotations_msg = annotations.build( - timestamp=img_detections_extended_msg.getTimestamp(), - sequence_num=img_detections_extended_msg.getSequenceNum(), + timestamp=img_detections_msg.getTimestamp(), + sequence_num=img_detections_msg.getSequenceNum(), ) self.out.send(annotations_msg) diff --git a/neural-networks/face-detection/blur-faces/README.md b/neural-networks/face-detection/blur-faces/README.md index 0102b211f..fc094adaa 100644 --- a/neural-networks/face-detection/blur-faces/README.md +++ b/neural-networks/face-detection/blur-faces/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/blur-faces/requirements.txt b/neural-networks/face-detection/blur-faces/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/face-detection/blur-faces/requirements.txt +++ b/neural-networks/face-detection/blur-faces/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/blur-faces/utils/blur_detections.py b/neural-networks/face-detection/blur-faces/utils/blur_detections.py index 1839df301..ebeeea394 100644 --- a/neural-networks/face-detection/blur-faces/utils/blur_detections.py +++ b/neural-networks/face-detection/blur-faces/utils/blur_detections.py @@ -21,7 +21,7 @@ def run(self) -> None: h, w = frame_copy.shape[:2] for detection in detections: - rect: dai.RotatedRect = detection.rotated_rect + rect: dai.RotatedRect = detection.getBoundingBox() rect = rect.denormalize(w, h) detection = rect.getOuterRect() bbox = [int(d) for d in detection] diff --git a/neural-networks/face-detection/emotion-recognition/README.md b/neural-networks/face-detection/emotion-recognition/README.md index 1ef62e048..ba9b00531 100644 --- a/neural-networks/face-detection/emotion-recognition/README.md +++ b/neural-networks/face-detection/emotion-recognition/README.md @@ -28,7 +28,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/emotion-recognition/main.py b/neural-networks/face-detection/emotion-recognition/main.py index e01ec01ed..73767f833 100755 --- a/neural-networks/face-detection/emotion-recognition/main.py +++ b/neural-networks/face-detection/emotion-recognition/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -72,33 +71,30 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/emotion-recognition/requirements.txt b/neural-networks/face-detection/emotion-recognition/requirements.txt index 7bfdaaf09..8f9430e09 100644 --- a/neural-networks/face-detection/emotion-recognition/requirements.txt +++ b/neural-networks/face-detection/emotion-recognition/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py index efe5b9464..44753122a 100644 --- a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py +++ b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py @@ -2,7 +2,7 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, Classifications, SECONDARY_COLOR +from depthai_nodes import Classifications, SECONDARY_COLOR from depthai_nodes.utils import AnnotationHelper @@ -18,10 +18,10 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - dets_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(dets_msg, ImgDetectionsExtended) + dets_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(dets_msg, dai.ImgDetections) - rec_msg_list: List[Classifications] = gather_data_msg.gathered + rec_msg_list: List[Classifications] = gather_data_msg.items assert isinstance(rec_msg_list, list) assert all(isinstance(rec_msg, Classifications) for rec_msg in rec_msg_list) assert len(dets_msg.detections) == len(rec_msg_list) @@ -29,7 +29,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: annotations = AnnotationHelper() for det_msg, rec_msg in zip(dets_msg.detections, rec_msg_list): - xmin, ymin, xmax, ymax = det_msg.rotated_rect.getOuterRect() + xmin, ymin, xmax, ymax = det_msg.getBoundingBox().getOuterRect() annotations.draw_rectangle( (xmin, ymin), diff --git a/neural-networks/face-detection/face-mask-detection/README.md b/neural-networks/face-detection/face-mask-detection/README.md index b28abaff2..d6c2b15b0 100644 --- a/neural-networks/face-detection/face-mask-detection/README.md +++ b/neural-networks/face-detection/face-mask-detection/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/face-mask-detection/main.py b/neural-networks/face-detection/face-mask-detection/main.py index cc0d0b90f..dff669ce4 100644 --- a/neural-networks/face-detection/face-mask-detection/main.py +++ b/neural-networks/face-detection/face-mask-detection/main.py @@ -4,7 +4,6 @@ from depthai_nodes.node import ( ParsingNeuralNetwork, ImgDetectionsFilter, - ImgDetectionsBridge, ) from utils.arguments import initialize_argparser @@ -57,14 +56,11 @@ # filter and rename detection labels det_process_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) - det_process_filter.setLabels(list(LABEL_ENCODING.keys()), keep=True) - det_process_bridge = pipeline.create(ImgDetectionsBridge).build( - det_process_filter.out, label_encoding=LABEL_ENCODING - ) + det_process_filter.keepLabels(list(LABEL_ENCODING.keys())) # visualization visualizer.addTopic("Video", det_nn.passthrough, "images") - visualizer.addTopic("Detections", det_process_bridge.out, "images") + visualizer.addTopic("Detections", det_process_filter.out, "images") print("Pipeline created.") diff --git a/neural-networks/face-detection/face-mask-detection/requirements.txt b/neural-networks/face-detection/face-mask-detection/requirements.txt index 8e4027fc1..e42562133 100644 --- a/neural-networks/face-detection/face-mask-detection/requirements.txt +++ b/neural-networks/face-detection/face-mask-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/face-detection/fatigue-detection/README.md b/neural-networks/face-detection/fatigue-detection/README.md index 0d1900b84..661fbad5d 100644 --- a/neural-networks/face-detection/fatigue-detection/README.md +++ b/neural-networks/face-detection/fatigue-detection/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/fatigue-detection/main.py b/neural-networks/face-detection/fatigue-detection/main.py index eb3e88ab1..1425b0973 100644 --- a/neural-networks/face-detection/fatigue-detection/main.py +++ b/neural-networks/face-detection/fatigue-detection/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsBridge, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,33 +72,27 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_w, - resize_height=rec_model_h, + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=(rec_model_w, rec_model_h), + ) ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) landmark_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_description ) # detections and gaze estimations sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - landmark_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=landmark_nn.out, + inputReference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/fatigue-detection/requirements.txt b/neural-networks/face-detection/fatigue-detection/requirements.txt index df8d7aa85..0939d9a84 100644 --- a/neural-networks/face-detection/fatigue-detection/requirements.txt +++ b/neural-networks/face-detection/fatigue-detection/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py index 5549087c3..a881161f2 100644 --- a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py @@ -2,7 +2,6 @@ from collections import deque import depthai as dai from depthai_nodes.utils import AnnotationHelper -from depthai_nodes import ImgDetectionsExtended, Keypoints from utils.face_landmarks import determine_fatigue @@ -18,13 +17,15 @@ def build(self, gather_data_msg) -> "AnnotationNode": return self def process(self, gather_data_msg) -> None: - detections_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(detections_msg, ImgDetectionsExtended) - src_w, src_h = detections_msg.transformation.getSize() + detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(detections_msg, dai.ImgDetections) + src_w, src_h = detections_msg.getTransformation().getSize() - landmarks_msg_list: List[Keypoints] = gather_data_msg.gathered + landmarks_msg_list: List[dai.KeypointsList] = gather_data_msg.items assert isinstance(landmarks_msg_list, list) - assert all(isinstance(rec_msg, Keypoints) for rec_msg in landmarks_msg_list) + assert all( + isinstance(rec_msg, dai.KeypointsList) for rec_msg in landmarks_msg_list + ) assert len(landmarks_msg_list) == len(detections_msg.detections) annotations = AnnotationHelper() diff --git a/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py b/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py index 1a2c421c2..b2389558e 100644 --- a/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py +++ b/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py @@ -2,15 +2,15 @@ import cv2 import math import numpy as np -from depthai_nodes import Keypoints +import depthai as dai def determine_fatigue( - shape: Tuple[int, int], face_keypoints: Keypoints, pitch_angle: int = 20 + shape: Tuple[int, int], face_keypoints: dai.KeypointsList, pitch_angle: int = 20 ): h, w = shape # frame.shape[:2] face_points_2d = np.array( - [[int(kp.x * w), int(kp.y * h)] for kp in face_keypoints.keypoints] + [[int(kp.x * w), int(kp.y * h)] for kp in face_keypoints.getKeypoints()] ) left_eye_idx = [33, 160, 158, 133, 144, 153] diff --git a/neural-networks/face-detection/gaze-estimation/README.md b/neural-networks/face-detection/gaze-estimation/README.md index 345a76b70..3ed6976b0 100644 --- a/neural-networks/face-detection/gaze-estimation/README.md +++ b/neural-networks/face-detection/gaze-estimation/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/gaze-estimation/main.py b/neural-networks/face-detection/gaze-estimation/main.py index 6eb3719ca..e18113c4d 100644 --- a/neural-networks/face-detection/gaze-estimation/main.py +++ b/neural-networks/face-detection/gaze-estimation/main.py @@ -1,11 +1,10 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.process_keypoints import LandmarksProcessing -from utils.node_creators import create_crop_node from utils.annotation_node import AnnotationNode from utils.host_concatenate_head_pose import ConcatenateHeadPose @@ -101,14 +100,24 @@ ) det_nn.out.link(detection_process_node.detections_input) - left_eye_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.left_config_output + crop_output_size = ( + head_pose_model_nn_archive.getInputWidth(), + head_pose_model_nn_archive.getInputHeight(), ) - right_eye_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.right_config_output + left_eye_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.left_config_output) + .build(input_node_out, crop_output_size) ) - face_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.face_config_output + right_eye_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.right_config_output) + .build(input_node_out, crop_output_size) + ) + face_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.face_config_output) + .build(input_node_out, crop_output_size) ) # head pose estimation @@ -137,9 +146,11 @@ gaze_estimation_node.inputs["head_pose_angles_yaw_pitch_roll"].setMaxSize(5) # detections and gaze estimations sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - gaze_estimation_node.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=gaze_estimation_node.out, + inputReference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/gaze-estimation/requirements.txt b/neural-networks/face-detection/gaze-estimation/requirements.txt index 8e4027fc1..1106ae10a 100644 --- a/neural-networks/face-detection/gaze-estimation/requirements.txt +++ b/neural-networks/face-detection/gaze-estimation/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py index 760d81454..300165558 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py +++ b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py @@ -1,7 +1,6 @@ from typing import List import depthai as dai -from depthai_nodes import ImgDetectionsExtended from depthai_nodes.utils import AnnotationHelper @@ -14,11 +13,11 @@ def build(self, gather_data_msg) -> "AnnotationNode": return self def process(self, gather_data_msg) -> None: - detections_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(detections_msg, ImgDetectionsExtended) - src_w, src_h = detections_msg.transformation.getSize() + detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(detections_msg, dai.ImgDetections) + src_w, src_h = detections_msg.getTransformation().getSize() - gaze_msg_list: List[dai.NNData] = gather_data_msg.gathered + gaze_msg_list: List[dai.NNData] = gather_data_msg.items assert isinstance(gaze_msg_list, list) assert all(isinstance(rec_msg, dai.NNData) for rec_msg in gaze_msg_list) assert len(gaze_msg_list) == len(detections_msg.detections) @@ -26,8 +25,8 @@ def process(self, gather_data_msg) -> None: annotations = AnnotationHelper() for detection, gaze in zip(detections_msg.detections, gaze_msg_list): - face_bbox = detection.rotated_rect.getPoints() - keypoints = detection.keypoints + face_bbox = detection.getBoundingBox().getPoints() + keypoints = detection.getKeypoints2f() # Draw bbox annotations.draw_rectangle( diff --git a/neural-networks/face-detection/gaze-estimation/utils/node_creators.py b/neural-networks/face-detection/gaze-estimation/utils/node_creators.py deleted file mode 100644 index 3b361794b..000000000 --- a/neural-networks/face-detection/gaze-estimation/utils/node_creators.py +++ /dev/null @@ -1,32 +0,0 @@ -import depthai as dai -from pathlib import Path - - -def create_crop_node( - pipeline: dai.Pipeline, - input_frame: dai.Node.Output, - configs_message: dai.Node.Output, -) -> dai.node.ImageManip: - script_path = Path(__file__).parent / "config_sender_script.py" - with script_path.open("r") as script_file: - script_content = script_file.read() - - config_sender_script = pipeline.create(dai.node.Script) - config_sender_script.setScript(script_content) - config_sender_script.inputs["frame_input"].setBlocking(True) - config_sender_script.inputs["config_input"].setBlocking(True) - - img_manip_node = pipeline.create(dai.node.ImageManip) - img_manip_node.initialConfig.setReusePreviousImage(False) - img_manip_node.inputConfig.setReusePreviousMessage(False) - img_manip_node.inputImage.setReusePreviousMessage(False) - img_manip_node.inputConfig.setBlocking(True) - img_manip_node.inputImage.setBlocking(True) - - input_frame.link(config_sender_script.inputs["frame_input"]) - configs_message.link(config_sender_script.inputs["config_input"]) - - config_sender_script.outputs["output_config"].link(img_manip_node.inputConfig) - config_sender_script.outputs["output_frame"].link(img_manip_node.inputImage) - - return img_manip_node diff --git a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py index 6c743ee2b..d24a66e8b 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py +++ b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended class LandmarksProcessing(dai.node.ThreadedHostNode): @@ -27,28 +26,28 @@ def run(self) -> None: right_configs_message = dai.MessageGroup() face_configs_message = dai.MessageGroup() for i, detection in enumerate(detections): - detection: ImgDetectionExtended = detection - keypoints = detection.keypoints - face_size = detection.rotated_rect.size + detection: dai.ImgDetection = detection + keypoints = detection.getKeypoints2f() + face_size = detection.getBoundingBox().size face_w, face_h = face_size.width * self.w, face_size.height * self.h right_eye = self.crop_rectangle( keypoints[0], face_w * 0.25, face_h * 0.25 ) - right_configs_message[str(i + 100)] = self.create_crop_cfg( + right_configs_message[f"cfg_{i}"] = self.create_crop_cfg( right_eye, img_detections ) left_eye = self.crop_rectangle( keypoints[1], face_w * 0.25, face_h * 0.25 ) - left_configs_message[str(i + 100)] = self.create_crop_cfg( + left_configs_message[f"cfg_{i}"] = self.create_crop_cfg( left_eye, img_detections ) - face_rect = detection.rotated_rect + face_rect = detection.getBoundingBox() face_rect = face_rect.denormalize(self.w, self.h) - face_configs_message[str(i + 100)] = self.create_crop_cfg( + face_configs_message[f"cfg_{i}"] = self.create_crop_cfg( face_rect, img_detections ) @@ -77,7 +76,7 @@ def crop_rectangle(self, center_keypoint: dai.Point2f, crop_w: int, crop_h: int) return croped_rectangle.denormalize(self.w, self.h) def create_crop_cfg( - self, rectangle: dai.RotatedRect, img_detections: ImgDetectionsExtended + self, rectangle: dai.RotatedRect, img_detections: dai.ImgDetections ): cfg = dai.ImageManipConfig() cfg.addCropRotatedRect(rectangle, normalizedCoords=False) diff --git a/neural-networks/face-detection/head-posture-detection/README.md b/neural-networks/face-detection/head-posture-detection/README.md index 08389af1e..b2ea6fc0a 100644 --- a/neural-networks/face-detection/head-posture-detection/README.md +++ b/neural-networks/face-detection/head-posture-detection/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/head-posture-detection/main.py b/neural-networks/face-detection/head-posture-detection/main.py index 727f9c8cf..ffd16efca 100644 --- a/neural-networks/face-detection/head-posture-detection/main.py +++ b/neural-networks/face-detection/head-posture-detection/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.annotation_node import AnnotationNode from utils.arguments import initialize_argparser @@ -74,34 +73,27 @@ ) det_nn.input.setBlocking(True) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=pose_model_w, - resize_height=pose_model_h, + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=(pose_model_w, pose_model_h), + ) ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(pose_model_w, pose_model_h) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, pose_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - pose_nn.outputs.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=pose_nn.outputs, + inputReference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/head-posture-detection/requirements.txt b/neural-networks/face-detection/head-posture-detection/requirements.txt index 8e4027fc1..1106ae10a 100644 --- a/neural-networks/face-detection/head-posture-detection/requirements.txt +++ b/neural-networks/face-detection/head-posture-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py index 20b6ff898..b49367ffa 100644 --- a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py @@ -2,7 +2,7 @@ import numpy as np import depthai as dai -from depthai_nodes import ImgDetectionsExtended, Predictions +from depthai_nodes import Predictions from depthai_nodes.utils import AnnotationHelper @@ -19,21 +19,19 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_extended_msg: ImgDetectionsExtended = ( - gather_data_msg.reference_data - ) - assert isinstance(img_detections_extended_msg, ImgDetectionsExtended) + img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(img_detections_msg, dai.ImgDetections) - pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered + pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.items assert isinstance(pose_msg_group_list, list) assert all(isinstance(msg, dai.MessageGroup) for msg in pose_msg_group_list) - assert len(img_detections_extended_msg.detections) == len(pose_msg_group_list) + assert len(img_detections_msg.detections) == len(pose_msg_group_list) annotations = AnnotationHelper() - for img_detection_extended_msg, pose_msg_group in zip( - img_detections_extended_msg.detections, pose_msg_group_list + for img_detection_msg, pose_msg_group in zip( + img_detections_msg.detections, pose_msg_group_list ): yaw_msg: Predictions = pose_msg_group["0"] assert isinstance(yaw_msg, Predictions) @@ -49,15 +47,15 @@ def process(self, gather_data_msg: dai.Buffer) -> None: pose_information = f"Pitch: {pitch:.0f} \nYaw: {yaw:.0f} \nRoll: {roll:.0f}" - outer_points = img_detection_extended_msg.rotated_rect.getOuterRect() + outer_points = img_detection_msg.getBoundingBox().getOuterRect() x_min, y_min, x_max, _ = [np.round(x, 2) for x in outer_points] annotations.draw_text(pose_information, (x_max, y_min + 0.1), size=16) annotations.draw_text(pose_text, (x_min, y_min), size=28) annotations_msg = annotations.build( - timestamp=img_detections_extended_msg.getTimestamp(), - sequence_num=img_detections_extended_msg.getSequenceNum(), + timestamp=img_detections_msg.getTimestamp(), + sequence_num=img_detections_msg.getSequenceNum(), ) self.out.send(annotations_msg) diff --git a/neural-networks/feature-detection/xfeat/README.md b/neural-networks/feature-detection/xfeat/README.md index 809d7e464..06faa9a81 100644 --- a/neural-networks/feature-detection/xfeat/README.md +++ b/neural-networks/feature-detection/xfeat/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/feature-detection/xfeat/requirements.txt b/neural-networks/feature-detection/xfeat/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/feature-detection/xfeat/requirements.txt +++ b/neural-networks/feature-detection/xfeat/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/generic-example/README.md b/neural-networks/generic-example/README.md index ad0ec4a87..2e3e761a8 100644 --- a/neural-networks/generic-example/README.md +++ b/neural-networks/generic-example/README.md @@ -30,7 +30,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/generic-example/requirements.txt b/neural-networks/generic-example/requirements.txt index e5aedc196..f0931a49d 100644 --- a/neural-networks/generic-example/requirements.txt +++ b/neural-networks/generic-example/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 python-dotenv diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md b/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md index 2c2001b7e..d4b490595 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md @@ -59,7 +59,7 @@ sudo apt-get update && apt-get install -y libzbar0 libzbar-dev brew install zbar ``` -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt index 445462a22..40f198383 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt @@ -1,5 +1,5 @@ -depthai>=3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 pyzbar==0.1.9 diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py index b1a38851f..e2b987d2b 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py @@ -1,7 +1,7 @@ from typing import List import depthai as dai -from depthai_nodes import ImgDetectionsExtended, SECONDARY_COLOR +from depthai_nodes import SECONDARY_COLOR from depthai_nodes.utils import AnnotationHelper @@ -26,7 +26,7 @@ def run(self) -> None: while self.isRunning(): gather_data_msg: dai.Buffer = self.input.get() - img_detections_extended_msg: ImgDetectionsExtended = ( + img_detections_extended_msg: dai.ImgDetections = ( gather_data_msg.reference_data ) @@ -37,17 +37,10 @@ def run(self) -> None: for img_detection_extended_msg, msg_group in zip( img_detections_extended_msg.detections, msg_group_list ): - xmin, ymin, xmax, ymax = ( - img_detection_extended_msg.rotated_rect.getOuterRect() - ) - - try: - xmin = float(xmin) - ymin = float(ymin) - xmax = float(xmax) - ymax = float(ymax) - except Exception: - pass + xmin = img_detection_extended_msg.xmin + ymin = img_detection_extended_msg.ymin + xmax = img_detection_extended_msg.xmax + ymax = img_detection_extended_msg.ymax xmin = max(0.0, min(1.0, xmin)) ymin = max(0.0, min(1.0, ymin)) diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py index bcee42f3b..1b79222fb 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py @@ -3,8 +3,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended - class CropConfigsCreator(dai.node.HostNode): """A node to create and send a dai.ImageManipConfigV2 crop configuration for each @@ -17,11 +15,11 @@ class CropConfigsCreator(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended | dai.ImgDetections message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfigV2 messages. detections_output : dai.Output - The output link for the ImgDetectionsExtended message. + The output link for the dai.ImgDetections message. source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] = None @@ -146,7 +144,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -170,24 +168,18 @@ def build( def process(self, detections_input: dai.Buffer) -> None: """Process the input detections and create crop configurations. This function is - ran every time a new ImgDetectionsExtended or dai.ImgDetections message is - received. + ran every time a new dai.ImgDetections message is received. Sends len(detections) number of crop configurations to the config_output link. - In addition sends an ImgDetectionsExtended object containing the corresponding + In addition sends a dai.ImgDetections object containing the corresponding detections to the detections_output link. """ - assert isinstance(detections_input, (ImgDetectionsExtended, dai.ImgDetections)) + assert isinstance(detections_input, dai.ImgDetections) sequence_num = detections_input.getSequenceNum() timestamp = detections_input.getTimestamp() - if isinstance(detections_input, dai.ImgDetections): - detections_msg = self._convert_to_extended(detections_input) - else: - detections_msg = detections_input - - detections = detections_msg.detections + detections = detections_input.detections # Skip the current frame / load new frame cfg = dai.ImageManipConfig() @@ -206,11 +198,17 @@ def process(self, detections_input: dai.Buffer) -> None: for i in range(len(detections)): cfg = dai.ImageManipConfig() - detection: ImgDetectionExtended = detections[i] - rect = detection.rotated_rect - rect = rect.denormalize(self.w, self.h) + detection: dai.ImgDetection = detections[i] + + x_center = (detection.xmin + detection.xmax) / 2 + y_center = (detection.ymin + detection.ymax) / 2 + width = (detection.xmax - detection.xmin) * 1.15 + height = (detection.ymax - detection.ymin) * 1.15 + rect = dai.RotatedRect( + dai.Point2f(x_center, y_center), dai.Size2f(width, height), 0.0 + ) - cfg.addCropRotatedRect(rect, normalizedCoords=False) + cfg.addCropRotatedRect(rect, normalizedCoords=True) if self.target_w is not None and self.target_h is not None: cfg.setOutputSize(self.target_w, self.target_h, self.resize_mode) @@ -229,37 +227,7 @@ def process(self, detections_input: dai.Buffer) -> None: attempts += 1 time.sleep(0.001) # Small delay to prevent busy waiting - self.detections_output.send(detections_msg) - - def _convert_to_extended( - self, detections: dai.ImgDetections - ) -> ImgDetectionsExtended: - rotated_rectangle_detections = [] - for det in detections.detections: - img_detection = ImgDetectionExtended() - img_detection.label = det.label - img_detection.confidence = det.confidence - - x_center = (det.xmin + det.xmax) / 2 - y_center = (det.ymin + det.ymax) / 2 - width = det.xmax - det.xmin - height = det.ymax - det.ymin - width = width * 1.15 - height = height * 1.15 - - img_detection.rotated_rect = (x_center, y_center, width, height, 0.0) - - rotated_rectangle_detections.append(img_detection) - - img_detections_extended = ImgDetectionsExtended() - img_detections_extended.setSequenceNum(detections.getSequenceNum()) - img_detections_extended.setTimestamp(detections.getTimestamp()) - img_detections_extended.detections = rotated_rectangle_detections - transformation = detections.getTransformation() - if transformation is not None: - img_detections_extended.setTransformation(transformation) - - return img_detections_extended + self.detections_output.send(detections_input) def _validate_positive_integer(self, value: int): """Validates that the set size is a positive integer. diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py index 3a99ee463..6ee6d6b85 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py @@ -228,7 +228,12 @@ def _draw_detection_boxes(self, frame, detections): h, w = frame.shape[:2] for detection in detections.detections: - xmin, ymin, xmax, ymax = detection.rotated_rect.getOuterRect() + xmin, ymin, xmax, ymax = ( + detection.xmin, + detection.ymin, + detection.xmax, + detection.ymax, + ) x1 = int(xmin * w) y1 = int(ymin * h) diff --git a/neural-networks/object-detection/human-machine-safety/README.md b/neural-networks/object-detection/human-machine-safety/README.md index 48e0c27c6..e7c12fa0e 100644 --- a/neural-networks/object-detection/human-machine-safety/README.md +++ b/neural-networks/object-detection/human-machine-safety/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/human-machine-safety/main.py b/neural-networks/object-detection/human-machine-safety/main.py index ca818e59d..fa83f3f4a 100644 --- a/neural-networks/object-detection/human-machine-safety/main.py +++ b/neural-networks/object-detection/human-machine-safety/main.py @@ -4,7 +4,6 @@ MPPalmDetectionParser, DepthMerger, ImgDetectionsFilter, - ImgDetectionsBridge, ) from utils.arguments import initialize_argparser @@ -113,23 +112,19 @@ parser: MPPalmDetectionParser = palm_det_nn.getParser(0) parser.setConfidenceThreshold(0.7) - adapter = pipeline.create(ImgDetectionsBridge).build( - palm_det_nn.out, ignore_angle=True - ) - detection_depth_merger = pipeline.create(DepthMerger).build( - output_2d=obj_det_nn.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=obj_det_nn.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) palm_depth_merger = pipeline.create(DepthMerger).build( - output_2d=adapter.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=palm_det_nn.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) # merge both detections into one message @@ -143,8 +138,9 @@ filter_labels = [merged_labels.index(i) for i in DANGEROUS_OBJECTS] filter_labels.append(merged_labels.index("palm")) detection_filter = pipeline.create(ImgDetectionsFilter).build( - merge_detections.output, labels_to_keep=filter_labels + merge_detections.output ) + detection_filter.keepLabels(filter_labels) # annotation measure_object_distance = pipeline.create(MeasureObjectDistance).build( diff --git a/neural-networks/object-detection/human-machine-safety/requirements.txt b/neural-networks/object-detection/human-machine-safety/requirements.txt index 56b6f790b..1106ae10a 100644 --- a/neural-networks/object-detection/human-machine-safety/requirements.txt +++ b/neural-networks/object-detection/human-machine-safety/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py index 4a55d9d22..fc6bf59cd 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py +++ b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py @@ -1,10 +1,9 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended import cv2 class AnnotationNode(dai.node.HostNode): - """Transforms ImgDetectionsExtended received from parsers to dai.ImgDetections""" + """Transforms received detections from parsers to dai.ImgDetections""" def __init__(self) -> None: super().__init__() @@ -28,21 +27,29 @@ def process( depth_msg: dai.ImgFrame, ): assert isinstance(detections_msg, dai.SpatialImgDetections) - img_detections = ImgDetectionsExtended() + img_detections = dai.ImgDetections() + det_list = [] for detection in detections_msg.detections: detection: dai.SpatialImgDetection = detection - img_detection = ImgDetectionExtended() + img_detection = dai.ImgDetection() img_detection.label = detection.label - rotated_rect = ( - (detection.xmax + detection.xmin) / 2, - (detection.ymax + detection.ymin) / 2, - detection.xmax - detection.xmin, - detection.ymax - detection.ymin, - 0, + img_detection.labelName = detection.labelName + img_detection.setBoundingBox( + dai.RotatedRect( + dai.Point2f( + (detection.xmax + detection.xmin) / 2, + (detection.ymax + detection.ymin) / 2, + ), + dai.Size2f( + detection.xmax - detection.xmin, + detection.ymax - detection.ymin, + ), + 0, + ) ) - img_detection.rotated_rect = rotated_rect img_detection.confidence = detection.confidence - img_detections.detections.append(img_detection) + det_list.append(img_detection) + img_detections.detections = det_list depth_map = depth_msg.getFrame() colorred_depth_map = cv2.applyColorMap( diff --git a/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py b/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py index 371019ad6..7b7246711 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py +++ b/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended class DetectionMerger(dai.node.HostNode): @@ -22,7 +21,7 @@ def build(self, det_nn_1: dai.Node.Output, det_nn_2: dai.Node.Output): def process(self, det_nn_1: dai.Buffer, det_nn_2: dai.Buffer) -> dai.ImgDetections: assert isinstance( det_nn_1, - (dai.ImgDetections, ImgDetectionsExtended, dai.SpatialImgDetections), + (dai.ImgDetections, dai.SpatialImgDetections), ) assert type(det_nn_1) is type(det_nn_2) new_dets = type(det_nn_1)() diff --git a/neural-networks/object-detection/social-distancing/README.md b/neural-networks/object-detection/social-distancing/README.md index 0774666a7..115bb885b 100644 --- a/neural-networks/object-detection/social-distancing/README.md +++ b/neural-networks/object-detection/social-distancing/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/social-distancing/main.py b/neural-networks/object-detection/social-distancing/main.py index 0e5a32b94..a90a26449 100644 --- a/neural-networks/object-detection/social-distancing/main.py +++ b/neural-networks/object-detection/social-distancing/main.py @@ -75,16 +75,16 @@ nn_parser: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( input=rgb, - nn_source=det_model_nn_archive, + nnSource=det_model_nn_archive, ) # produce spatial detections depth_merger = pipeline.create(DepthMerger).build( - output_2d=nn_parser.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=nn_parser.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) # annotation diff --git a/neural-networks/object-detection/social-distancing/requirements.txt b/neural-networks/object-detection/social-distancing/requirements.txt index df8d7aa85..0939d9a84 100644 --- a/neural-networks/object-detection/social-distancing/requirements.txt +++ b/neural-networks/object-detection/social-distancing/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/spatial-detections/README.md b/neural-networks/object-detection/spatial-detections/README.md index ce228ec7e..64db5d4c5 100644 --- a/neural-networks/object-detection/spatial-detections/README.md +++ b/neural-networks/object-detection/spatial-detections/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/spatial-detections/main.py b/neural-networks/object-detection/spatial-detections/main.py index 965b2b64f..712ff1dd8 100644 --- a/neural-networks/object-detection/spatial-detections/main.py +++ b/neural-networks/object-detection/spatial-detections/main.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,7 +73,7 @@ input_detections=nn.out, depth=stereo.depth, labels=classes ) - apply_colormap = pipeline.create(ApplyColormap).build(stereo.depth) + apply_colormap = pipeline.create(ApplyDepthColormap).build(stereo.depth) # video encoding cam_nv12 = cam.requestOutput( diff --git a/neural-networks/object-detection/spatial-detections/requirements.txt b/neural-networks/object-detection/spatial-detections/requirements.txt index 56b6f790b..3b7d19fe5 100644 --- a/neural-networks/object-detection/spatial-detections/requirements.txt +++ b/neural-networks/object-detection/spatial-detections/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 diff --git a/neural-networks/object-detection/text-blur/README.md b/neural-networks/object-detection/text-blur/README.md index 4015282c2..c8da17f10 100644 --- a/neural-networks/object-detection/text-blur/README.md +++ b/neural-networks/object-detection/text-blur/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/text-blur/requirements.txt b/neural-networks/object-detection/text-blur/requirements.txt index df8d7aa85..0939d9a84 100644 --- a/neural-networks/object-detection/text-blur/requirements.txt +++ b/neural-networks/object-detection/text-blur/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/text-blur/utils/blur_detections.py b/neural-networks/object-detection/text-blur/utils/blur_detections.py index 2a888f52d..b6a69b6ac 100644 --- a/neural-networks/object-detection/text-blur/utils/blur_detections.py +++ b/neural-networks/object-detection/text-blur/utils/blur_detections.py @@ -18,10 +18,11 @@ def run(self) -> None: frame = self.input_frame.get() frame_copy = frame.getCvFrame() detections = self.input_detections.get().detections + dai.ImgDetections h, w = frame_copy.shape[:2] for detection in detections: - rect: dai.RotatedRect = detection.rotated_rect + rect: dai.RotatedRect = detection.getBoundingBox() rect = rect.denormalize(w, h) detection = rect.getOuterRect() bbox = [int(d) for d in detection] diff --git a/neural-networks/object-detection/thermal-detection/README.md b/neural-networks/object-detection/thermal-detection/README.md index b1e71e9ed..1b1e8e386 100644 --- a/neural-networks/object-detection/thermal-detection/README.md +++ b/neural-networks/object-detection/thermal-detection/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/thermal-detection/main.py b/neural-networks/object-detection/thermal-detection/main.py index 092fd4e21..5abb7d19d 100644 --- a/neural-networks/object-detection/thermal-detection/main.py +++ b/neural-networks/object-detection/thermal-detection/main.py @@ -27,10 +27,14 @@ print("Creating pipeline...") # detection model - det_model_description = dai.NNModelDescription.fromYamlFile( - f"thermal_person_detection.{platform}.yaml" - ) - if det_model_description.model != args.model: + model_yaml = Path(f"depthai_models/thermal_person_detection.{platform}.yaml") + if model_yaml.exists(): + det_model_description = dai.NNModelDescription.fromYamlFile(str(model_yaml)) + if det_model_description.model != args.model: + det_model_description = dai.NNModelDescription( + args.model, platform=platform + ) + else: det_model_description = dai.NNModelDescription(args.model, platform=platform) det_model_nn_archive = dai.NNArchive(dai.getModelFromZoo(det_model_description)) det_model_w, det_model_h = det_model_nn_archive.getInputSize() diff --git a/neural-networks/object-detection/thermal-detection/requirements.txt b/neural-networks/object-detection/thermal-detection/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/neural-networks/object-detection/thermal-detection/requirements.txt +++ b/neural-networks/object-detection/thermal-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-host-decoding/README.md b/neural-networks/object-detection/yolo-host-decoding/README.md index b11cde72d..011d21399 100644 --- a/neural-networks/object-detection/yolo-host-decoding/README.md +++ b/neural-networks/object-detection/yolo-host-decoding/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-host-decoding/requirements.txt b/neural-networks/object-detection/yolo-host-decoding/requirements.txt index 877bcf059..e0918ed1d 100644 --- a/neural-networks/object-detection/yolo-host-decoding/requirements.txt +++ b/neural-networks/object-detection/yolo-host-decoding/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-p/README.md b/neural-networks/object-detection/yolo-p/README.md index 2cfe16cdd..b406ab27c 100644 --- a/neural-networks/object-detection/yolo-p/README.md +++ b/neural-networks/object-detection/yolo-p/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-p/requirements.txt b/neural-networks/object-detection/yolo-p/requirements.txt index df8d7aa85..0939d9a84 100644 --- a/neural-networks/object-detection/yolo-p/requirements.txt +++ b/neural-networks/object-detection/yolo-p/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-p/utils/annotation_node.py b/neural-networks/object-detection/yolo-p/utils/annotation_node.py index 41ade20f8..7189d58d0 100644 --- a/neural-networks/object-detection/yolo-p/utils/annotation_node.py +++ b/neural-networks/object-detection/yolo-p/utils/annotation_node.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, SegmentationMask +from depthai_nodes import SegmentationMask import cv2 import numpy as np @@ -31,7 +31,7 @@ def process( lane_segmentations_message: dai.Buffer, ) -> None: assert isinstance(frame, dai.ImgFrame) - assert isinstance(detections_message, ImgDetectionsExtended) + assert isinstance(detections_message, dai.ImgDetections) assert isinstance(road_segmentations_message, SegmentationMask) assert isinstance(lane_segmentations_message, SegmentationMask) diff --git a/neural-networks/object-detection/yolo-world/README.md b/neural-networks/object-detection/yolo-world/README.md index e2a506cff..cd030feec 100644 --- a/neural-networks/object-detection/yolo-world/README.md +++ b/neural-networks/object-detection/yolo-world/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-world/main.py b/neural-networks/object-detection/yolo-world/main.py index 7eb41a5a9..647923fcc 100644 --- a/neural-networks/object-detection/yolo-world/main.py +++ b/neural-networks/object-detection/yolo-world/main.py @@ -8,7 +8,7 @@ from utils.helper_functions import extract_text_embeddings from utils.arguments import initialize_argparser -from utils.annotation_node import AnnotationNode +from utils.detections_label_mapper import DetectionsLabelMapper MAX_NUM_CLASSES = 80 @@ -82,20 +82,18 @@ # filter and rename detection labels det_process_filter = pipeline.create(ImgDetectionsFilter).build(nn_with_parser.out) - det_process_filter.setLabels( - labels=[i for i in range(len(args.class_names))], keep=True - ) - annotation_node = pipeline.create(AnnotationNode).build( + det_process_filter.keepLabels([i for i in range(len(args.class_names))]) + + label_mapper = pipeline.create(DetectionsLabelMapper).build( det_process_filter.out, label_encoding={k: v for k, v in enumerate(args.class_names)}, ) # visualization - visualizer.addTopic("Detections", annotation_node.out) + visualizer.addTopic("Detections", label_mapper.out) visualizer.addTopic("Video", nn_with_parser.passthroughs["images"]) print("Pipeline created.") - pipeline.start() visualizer.registerPipeline(pipeline) diff --git a/neural-networks/object-detection/yolo-world/requirements.txt b/neural-networks/object-detection/yolo-world/requirements.txt index 62271199f..ce2d52904 100644 --- a/neural-networks/object-detection/yolo-world/requirements.txt +++ b/neural-networks/object-detection/yolo-world/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 onnxruntime diff --git a/neural-networks/object-detection/yolo-world/utils/annotation_node.py b/neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py similarity index 73% rename from neural-networks/object-detection/yolo-world/utils/annotation_node.py rename to neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py index 95a14dd6a..2b36c0222 100644 --- a/neural-networks/object-detection/yolo-world/utils/annotation_node.py +++ b/neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py @@ -1,13 +1,11 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended from typing import Dict -class AnnotationNode(dai.node.HostNode): +class DetectionsLabelMapper(dai.node.HostNode): def __init__(self, label_encoding: Dict[int, str] = {}) -> None: super().__init__() self._label_encoding = label_encoding - self.out_detections = self.createOutput() def setLabelEncoding(self, label_encoding: Dict[int, str]) -> None: """Sets the label encoding. @@ -22,7 +20,7 @@ def setLabelEncoding(self, label_encoding: Dict[int, str]) -> None: def build( self, detections: dai.Node.Output, label_encoding: Dict[int, str] = None - ) -> "AnnotationNode": + ) -> "DetectionsLabelMapper": if label_encoding is not None: self.setLabelEncoding(label_encoding) self.link_args(detections) @@ -32,7 +30,7 @@ def process( self, detections_message: dai.Buffer, ) -> None: - assert isinstance(detections_message, ImgDetectionsExtended) + assert isinstance(detections_message, dai.ImgDetections) for detection in detections_message.detections: - detection.label_name = self._label_encoding.get(detection.label, "unknown") - return detections_message + detection.labelName = self._label_encoding.get(detection.label, "unknown") + self.out.send(detections_message) diff --git a/neural-networks/object-tracking/collision-avoidance/README.md b/neural-networks/object-tracking/collision-avoidance/README.md index d7c3c7e8b..de20d9ee7 100644 --- a/neural-networks/object-tracking/collision-avoidance/README.md +++ b/neural-networks/object-tracking/collision-avoidance/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/collision-avoidance/main.py b/neural-networks/object-tracking/collision-avoidance/main.py index b3b0b99f2..95da73804 100644 --- a/neural-networks/object-tracking/collision-avoidance/main.py +++ b/neural-networks/object-tracking/collision-avoidance/main.py @@ -66,9 +66,8 @@ nn_archive, numShaves=6 ) # TODO: change to numShaves=4 if running on OAK-D Lite - img_detections_filter = pipeline.create(ImgDetectionsFilter).build( - nn.out, labels_to_keep=[person_label] - ) + img_detections_filter = pipeline.create(ImgDetectionsFilter).build(nn.out) + img_detections_filter.keepLabels([person_label]) # keep only person detections # tracking tracker = pipeline.create(dai.node.ObjectTracker) @@ -92,8 +91,8 @@ # visualization visualizer.addTopic("Video", nn.passthrough, "images") visualizer.addTopic("Tracklets", collision_avoidance.out, "images") - visualizer.addTopic("Direction", collision_avoidance.out_direction, "images") - visualizer.addTopic("Bird Frame", birds_eye_view.output, "images") + visualizer.addTopic("Direction", collision_avoidance.out_direction) + visualizer.addTopic("Bird Frame", birds_eye_view.output) print("Pipeline created.") pipeline.start() diff --git a/neural-networks/object-tracking/collision-avoidance/requirements.txt b/neural-networks/object-tracking/collision-avoidance/requirements.txt index 8e4027fc1..1106ae10a 100644 --- a/neural-networks/object-tracking/collision-avoidance/requirements.txt +++ b/neural-networks/object-tracking/collision-avoidance/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-tracking/deepsort-tracking/README.md b/neural-networks/object-tracking/deepsort-tracking/README.md index fe3bf7a61..9e6b3ca28 100644 --- a/neural-networks/object-tracking/deepsort-tracking/README.md +++ b/neural-networks/object-tracking/deepsort-tracking/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/deepsort-tracking/main.py b/neural-networks/object-tracking/deepsort-tracking/main.py index b353c2ef2..00ca6ac98 100644 --- a/neural-networks/object-tracking/deepsort-tracking/main.py +++ b/neural-networks/object-tracking/deepsort-tracking/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.deepsort_tracking import DeepsortTracking @@ -79,30 +78,27 @@ ) # detection processing - script = pipeline.create(dai.node.Script) - det_nn.out.link(script.inputs["det_in"]) - det_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=embeddings_model_w, - resize_height=embeddings_model_h, - padding=0, + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=(embeddings_model_w, embeddings_model_h), + ) ) - script.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(embeddings_model_w, embeddings_model_h) - - script.outputs["manip_cfg"].link(crop_node.inputConfig) - script.outputs["manip_img"].link(crop_node.inputImage) embeddings_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, embeddings_model_nn_archive ) # detections and embeddings sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - det_nn.out.link(gather_data.input_reference) - embeddings_nn.out.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=embeddings_nn.out, + inputReference=det_nn.out, + ) # tracking deepsort_tracking = pipeline.create(DeepsortTracking).build( diff --git a/neural-networks/object-tracking/deepsort-tracking/requirements.txt b/neural-networks/object-tracking/deepsort-tracking/requirements.txt index e8f5a20a1..77bf06e35 100644 --- a/neural-networks/object-tracking/deepsort-tracking/requirements.txt +++ b/neural-networks/object-tracking/deepsort-tracking/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 -scipy \ No newline at end of file +scipy diff --git a/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py b/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py index b0b9f5824..10ba2c09d 100644 --- a/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py +++ b/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py @@ -2,7 +2,7 @@ from deep_sort_realtime.deepsort_tracker import DeepSort from typing import List -from depthai_nodes import GatheredData +from depthai_nodes.message import GatheredData from .visualized_tracklets import VisualizedTracklets @@ -41,7 +41,7 @@ def process(self, img_frame: dai.ImgFrame, gathered_data: dai.Buffer) -> None: detections: dai.ImgDetections = gathered_data.reference_data detections = detections.detections - recognitions: dai.NNData = gathered_data.gathered + recognitions: list[dai.NNData] = gathered_data.items tracklets = VisualizedTracklets() tracklets.setLabels(self._labels) diff --git a/neural-networks/object-tracking/kalman/README.md b/neural-networks/object-tracking/kalman/README.md index 1bc816326..652f560b8 100644 --- a/neural-networks/object-tracking/kalman/README.md +++ b/neural-networks/object-tracking/kalman/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/kalman/requirements.txt b/neural-networks/object-tracking/kalman/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/neural-networks/object-tracking/kalman/requirements.txt +++ b/neural-networks/object-tracking/kalman/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-tracking/people-tracker/README.md b/neural-networks/object-tracking/people-tracker/README.md index d66c3ff29..cdd5000b4 100644 --- a/neural-networks/object-tracking/people-tracker/README.md +++ b/neural-networks/object-tracking/people-tracker/README.md @@ -34,7 +34,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/people-tracker/main.py b/neural-networks/object-tracking/people-tracker/main.py index 9c399ec20..ff15aa988 100644 --- a/neural-networks/object-tracking/people-tracker/main.py +++ b/neural-networks/object-tracking/people-tracker/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsBridge +from depthai_nodes.node import ParsingNeuralNetwork from utils.arguments import initialize_argparser from utils.people_counter import PeopleCounter @@ -48,8 +48,6 @@ ) # tracking - bridge = pipeline.create(ImgDetectionsBridge).build(nn.out, ignore_angle=True) - tracker = pipeline.create(dai.node.ObjectTracker) tracker.setDetectionLabelsToTrack([0]) if platform == "RVC2": @@ -60,7 +58,7 @@ tracker.setTrackerThreshold(0.4) nn.passthrough.link(tracker.inputTrackerFrame) nn.passthrough.link(tracker.inputDetectionFrame) - bridge.out.link(tracker.inputDetections) + nn.out.link(tracker.inputDetections) # annotation tracklet_visualizer = pipeline.create(TrackletVisualizer).build( diff --git a/neural-networks/object-tracking/people-tracker/requirements.txt b/neural-networks/object-tracking/people-tracker/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/neural-networks/object-tracking/people-tracker/requirements.txt +++ b/neural-networks/object-tracking/people-tracker/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/README.md b/neural-networks/ocr/general-ocr/README.md index 86e4dbd4d..4fc68c3bd 100644 --- a/neural-networks/ocr/general-ocr/README.md +++ b/neural-networks/ocr/general-ocr/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/ocr/general-ocr/main.py b/neural-networks/ocr/general-ocr/main.py index 67ad009a7..5631141c6 100644 --- a/neural-networks/ocr/general-ocr/main.py +++ b/neural-networks/ocr/general-ocr/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.annotation_node import OCRAnnotationNode from utils.arguments import initialize_argparser @@ -72,22 +72,17 @@ ) det_nn.setNumPoolFrames(30) - # detection processing - detection_process_node = pipeline.create(CropConfigsCreator) - detection_process_node.build( + # detection processing and crops config creation + crop_configs_creator = pipeline.create(CropConfigsCreator) + crop_configs_creator.build( det_nn.out, (REQ_WIDTH, REQ_HEIGHT), (rec_model_w, rec_model_h) ) - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setReusePreviousImage(False) - crop_node.inputConfig.setReusePreviousMessage(False) - crop_node.inputImage.setReusePreviousMessage(True) - crop_node.inputConfig.setMaxSize(30) - crop_node.inputImage.setMaxSize(30) - crop_node.setNumFramesPool(30) - - detection_process_node.config_output.link(crop_node.inputConfig) - input_node_out.link(crop_node.inputImage) + crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(crop_configs_creator.config_output) + .build(input_node_out, (rec_model_w, rec_model_h)) + ) ocr_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive @@ -96,9 +91,11 @@ ocr_nn.input.setMaxSize(30) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - detection_process_node.detections_output.link(gather_data_node.input_reference) - ocr_nn.out.link(gather_data_node.input_data) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=ocr_nn.out, + inputReference=crop_configs_creator.detections_output, + ) # annotation annotation_node = pipeline.create(OCRAnnotationNode) diff --git a/neural-networks/ocr/general-ocr/requirements.txt b/neural-networks/ocr/general-ocr/requirements.txt index cf3ffa17a..65ae35051 100644 --- a/neural-networks/ocr/general-ocr/requirements.txt +++ b/neural-networks/ocr/general-ocr/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless==4.10.0.84 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/utils/annotation_node.py b/neural-networks/ocr/general-ocr/utils/annotation_node.py index 3003a46cb..502ead1c9 100644 --- a/neural-networks/ocr/general-ocr/utils/annotation_node.py +++ b/neural-networks/ocr/general-ocr/utils/annotation_node.py @@ -18,7 +18,7 @@ def run(self): passthrough_frame = self.passthrough.get() detections_list = text_descriptions.reference_data.detections - recognitions_list = text_descriptions.gathered + recognitions_list = text_descriptions.items w, h = passthrough_frame.getWidth(), passthrough_frame.getHeight() @@ -27,7 +27,7 @@ def run(self): for i, recognition in enumerate(recognitions_list): detection = detections_list[i] - points = detection.rotated_rect.getPoints() + points = detection.getBoundingBox().getPoints() text_line = "" for text, score in zip(recognition.classes, recognition.scores): diff --git a/neural-networks/ocr/general-ocr/utils/host_process_detections.py b/neural-networks/ocr/general-ocr/utils/host_process_detections.py index 0e2d1f371..730cdd542 100644 --- a/neural-networks/ocr/general-ocr/utils/host_process_detections.py +++ b/neural-networks/ocr/general-ocr/utils/host_process_detections.py @@ -2,8 +2,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended - class CropConfigsCreator(dai.node.HostNode): """A node to create and send a dai.ImageManipConfig crop configuration for each @@ -16,11 +14,11 @@ class CropConfigsCreator(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended | dai.ImgDetections message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfig messages. detections_output : dai.Output - The output link for the ImgDetectionsExtended message. + The output link for the dai.ImgDetections message. source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] = None @@ -32,11 +30,7 @@ class CropConfigsCreator(dai.node.HostNode): def __init__(self) -> None: """Initializes the node.""" super().__init__() - self.config_output = self.createOutput( - possibleDatatypes=[ - dai.Node.DatatypeHierarchy(dai.DatatypeEnum.ImageManipConfig, True) - ] - ) + self.config_output = self.createOutput() self.detections_output = self.createOutput( possibleDatatypes=[ dai.Node.DatatypeHierarchy(dai.DatatypeEnum.Buffer, True) @@ -96,7 +90,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -120,38 +114,26 @@ def build( def process(self, detections_input: dai.Buffer) -> None: """Process the input detections and create crop configurations. This function is - ran every time a new ImgDetectionsExtended or dai.ImgDetections message is + ran every time a new dai.ImgDetections message is received. Sends len(detections) number of crop configurations to the config_output link. - In addition sends an ImgDetectionsExtended object containing the corresponding + In addition sends a dai.ImgDetections object containing the corresponding detections to the detections_output link. """ - assert isinstance(detections_input, (ImgDetectionsExtended, dai.ImgDetections)) + assert isinstance(detections_input, dai.ImgDetections) sequence_num = detections_input.getSequenceNum() timestamp = detections_input.getTimestamp() - if isinstance(detections_input, dai.ImgDetections): - detections_msg = self._convert_to_extended(detections_input) - else: - detections_msg = detections_input - - detections = detections_msg.detections - - # Skip the current frame / load new frame - cfg = dai.ImageManipConfig() - cfg.setSkipCurrentImage(True) - cfg.setTimestamp(timestamp) - cfg.setSequenceNum(sequence_num) - send_status = False - while not send_status: - send_status = self.config_output.trySend(cfg) + detections = detections_input.detections + + configs_group = dai.MessageGroup() valid_detections = [] for detection in detections: if detection.confidence > 0.8: - rect = detection.rotated_rect + rect = detection.getBoundingBox() rect = self._expand_rect(rect) xmin, ymin, xmax, ymax = rect.getOuterRect() @@ -171,59 +153,29 @@ def process(self, detections_input: dai.Buffer) -> None: if self.target_w is not None and self.target_h is not None: cfg.setOutputSize(self.target_w, self.target_h, self.resize_mode) - cfg.setReusePreviousImage(True) cfg.setTimestamp(timestamp) cfg.setSequenceNum(sequence_num) + configs_group[f"cfg_{len(valid_detections) - 1}"] = cfg - send_status = False - while not send_status: - send_status = self.config_output.trySend(cfg) + configs_group.setTimestamp(timestamp) + configs_group.setSequenceNum(sequence_num) + self.config_output.send(configs_group) - valid_msg = ImgDetectionsExtended() + valid_msg = dai.ImgDetections() valid_msg.setSequenceNum(sequence_num) valid_msg.setTimestamp(timestamp) valid_msg.detections = valid_detections - valid_msg.setTransformation(detections_msg.getTransformation()) + valid_msg.setTransformation(detections_input.getTransformation()) self.detections_output.send(valid_msg) + def _validate_positive_integer(self, value: int) -> None: + if not isinstance(value, int) or value <= 0: + raise ValueError(f"Expected a positive integer, got {value!r}") + def _expand_rect(self, rect: dai.RotatedRect) -> dai.RotatedRect: s = rect.size rect.size = dai.Size2f(s.width * 1.03, s.height * 1.10) return rect - - def _convert_to_extended( - self, detections: dai.ImgDetections - ) -> ImgDetectionsExtended: - rotated_rectangle_detections = [] - for det in detections.detections: - img_detection = ImgDetectionExtended() - img_detection.label = det.label - img_detection.confidence = det.confidence - - x_center = (det.xmin + det.xmax) / 2 - y_center = (det.ymin + det.ymax) / 2 - width = det.xmax - det.xmin - height = det.ymax - det.ymin - - img_detection.rotated_rect = (x_center, y_center, width, height, 0.0) - - rotated_rectangle_detections.append(img_detection) - - img_detections_extended = ImgDetectionsExtended() - img_detections_extended.setSequenceNum(detections.getSequenceNum()) - img_detections_extended.setTimestamp(detections.getTimestamp()) - img_detections_extended.detections = rotated_rectangle_detections - transformation = detections.getTransformation() - if transformation is not None: - img_detections_extended.setTransformation(transformation) - - return img_detections_extended - - def _validate_positive_integer(self, value: int): - if not isinstance(value, int): - raise TypeError("Value must be an integer.") - if value < 1: - raise ValueError("Value must be greater than 1.") diff --git a/neural-networks/ocr/license-plate-recognition/README.md b/neural-networks/ocr/license-plate-recognition/README.md index b3208f4c3..b4033b969 100644 --- a/neural-networks/ocr/license-plate-recognition/README.md +++ b/neural-networks/ocr/license-plate-recognition/README.md @@ -38,7 +38,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/ocr/license-plate-recognition/requirements.txt b/neural-networks/ocr/license-plate-recognition/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/ocr/license-plate-recognition/requirements.txt +++ b/neural-networks/ocr/license-plate-recognition/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/pose-estimation/animal-pose/README.md b/neural-networks/pose-estimation/animal-pose/README.md index fc728069d..0bd40421e 100644 --- a/neural-networks/pose-estimation/animal-pose/README.md +++ b/neural-networks/pose-estimation/animal-pose/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/animal-pose/main.py b/neural-networks/pose-estimation/animal-pose/main.py index 43b679b89..25ff3a912 100644 --- a/neural-networks/pose-estimation/animal-pose/main.py +++ b/neural-networks/pose-estimation/animal-pose/main.py @@ -3,11 +3,10 @@ import depthai as dai from depthai_nodes.node import ( ParsingNeuralNetwork, - ImgDetectionsBridge, ImgDetectionsFilter, GatherData, + FrameCropper, ) -from depthai_nodes.node.utils import generate_script_content from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -62,41 +61,33 @@ input_node, det_nn_archive, fps=args.fps_limit ) - detections_filter = pipeline.create(ImgDetectionsFilter).build( - detection_nn.out, labels_to_keep=VALID_LABELS - ) + detections_filter = pipeline.create(ImgDetectionsFilter).build(detection_nn.out) + detections_filter.keepLabels(VALID_LABELS) # detection processing - script = pipeline.create(dai.node.Script) - detections_filter.out.link(script.inputs["det_in"]) - detection_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=pose_model_w, - resize_height=pose_model_h, - padding=PADDING, - resize_mode="STRETCH", + pose_manip = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=detections_filter.out, + padding=PADDING, + ) + .build( + inputImage=detection_nn.passthrough, + outputSize=(pose_model_w, pose_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + ) ) - script.setScript(script_content) - - pose_manip = pipeline.create(dai.node.ImageManip) - pose_manip.initialConfig.setOutputSize(pose_model_w, pose_model_h) - pose_manip.inputConfig.setWaitForMessage(True) - - script.outputs["manip_cfg"].link(pose_manip.inputConfig) - script.outputs["manip_img"].link(pose_manip.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( pose_manip.out, pose_nn_archive ) - detections_bridge = pipeline.create(ImgDetectionsBridge).build( - detections_filter.out - ) - # detections and pose estimations sync - gather_data = pipeline.create(GatherData).build(args.fps_limit) - detections_bridge.out.link(gather_data.input_reference) - pose_nn.out.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=pose_nn.out, + inputReference=detections_filter.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/pose-estimation/animal-pose/requirements.txt b/neural-networks/pose-estimation/animal-pose/requirements.txt index e561ab514..1106ae10a 100644 --- a/neural-networks/pose-estimation/animal-pose/requirements.txt +++ b/neural-networks/pose-estimation/animal-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py index bfd82ee63..2b8711851 100644 --- a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py @@ -1,8 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, - ImgDetectionExtended, - Keypoints, GatheredData, PRIMARY_COLOR, SECONDARY_COLOR, @@ -38,27 +35,25 @@ def build( def process(self, gathered_data: dai.Buffer) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data + detections_message: dai.ImgDetections = gathered_data.reference_data - detections_list: List[ImgDetectionExtended] = detections_message.detections + detections_list: List[dai.ImgDetection] = detections_message.detections annotation_helper = AnnotationHelper() padding = self.padding for ix, detection in enumerate(detections_list): - detection.label_name = ( - "Animal" # Because dai.ImgDetection does not have label_name - ) + detection.labelName = "Animal" - keypoints_message: Keypoints = gathered_data.gathered[ix] - xmin, ymin, xmax, ymax = detection.rotated_rect.getOuterRect() + keypoints_message: dai.KeypointsList = gathered_data.items[ix] + xmin, ymin, xmax, ymax = detection.getBoundingBox().getOuterRect() slope_x = (xmax + padding) - (xmin - padding) slope_y = (ymax + padding) - (ymin - padding) xs = [] ys = [] - for kp in keypoints_message.keypoints: + for kp in keypoints_message.getKeypoints(): x = min(max(xmin - padding + slope_x * kp.x, 0.0), 1.0) y = min(max(ymin - padding + slope_y * kp.y, 0.0), 1.0) xs.append(x) diff --git a/neural-networks/pose-estimation/hand-pose/README.md b/neural-networks/pose-estimation/hand-pose/README.md index 7771a6e80..42800d56a 100644 --- a/neural-networks/pose-estimation/hand-pose/README.md +++ b/neural-networks/pose-estimation/hand-pose/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index 82d5489c0..a0a192696 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -79,36 +79,27 @@ target_size=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), ) - script = pipeline.create(dai.node.Script) - script.setScriptPath(str(Path(__file__).parent / "utils/script.py")) - script.inputs["frame_input"].setMaxSize(30) - script.inputs["config_input"].setMaxSize(30) - script.inputs["num_configs_input"].setMaxSize(30) - - detection_nn.passthrough.link(script.inputs["frame_input"]) - detections_processor.config_output.link(script.inputs["config_input"]) - detections_processor.num_configs_output.link(script.inputs["num_configs_input"]) - - pose_manip = pipeline.create(dai.node.ImageManip) - pose_manip.initialConfig.setOutputSize( - pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight() + # hand crop + pose estimation + crop_output_size = ( + pose_nn_archive.getInputWidth(), + pose_nn_archive.getInputHeight(), + ) + hand_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detections_processor.config_output) + .build(detection_nn.passthrough, crop_output_size) ) - pose_manip.inputConfig.setMaxSize(30) - pose_manip.inputImage.setMaxSize(30) - pose_manip.setNumFramesPool(30) - pose_manip.inputConfig.setWaitForMessage(True) - - script.outputs["output_config"].link(pose_manip.inputConfig) - script.outputs["output_frame"].link(pose_manip.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( - pose_manip.out, pose_nn_archive + hand_crop_node.out, pose_nn_archive ) # detections and pose estimations sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - detection_nn.out.link(gather_data.input_reference) - pose_nn.outputs.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=pose_nn.outputs, + inputReference=detection_nn.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/pose-estimation/hand-pose/requirements.txt b/neural-networks/pose-estimation/hand-pose/requirements.txt index 56b6f790b..e42562133 100644 --- a/neural-networks/pose-estimation/hand-pose/requirements.txt +++ b/neural-networks/pose-estimation/hand-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py index 18efc36f1..09ff3cb14 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py @@ -1,8 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, - ImgDetectionExtended, - Keypoints, Predictions, GatheredData, SECONDARY_COLOR, @@ -43,18 +40,19 @@ def build( def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data - detections_list: List[ImgDetectionExtended] = detections_message.detections + detections_message: dai.ImgDetections = gathered_data.reference_data + detections_list: List[dai.ImgDetection] = detections_message.detections - new_dets = ImgDetectionsExtended() - new_dets.transformation = video_message.getTransformation() + new_dets = dai.ImgDetections() + new_dets.setTransformation(video_message.getTransformation()) annotation_helper = AnnotationHelper() + det_list = [] for ix, detection in enumerate(detections_list): - keypoints_msg: Keypoints = gathered_data.gathered[ix]["0"] - confidence_msg: Predictions = gathered_data.gathered[ix]["1"] - handness_msg: Predictions = gathered_data.gathered[ix]["2"] + keypoints_msg: dai.KeypointsList = gathered_data.items[ix]["0"] + confidence_msg: Predictions = gathered_data.items[ix]["1"] + handness_msg: Predictions = gathered_data.items[ix]["2"] hand_confidence = confidence_msg.prediction handness = handness_msg.prediction @@ -62,36 +60,40 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non if hand_confidence < self.confidence_threshold: continue - width = detection.rotated_rect.size.width - height = detection.rotated_rect.size.height + width = detection.getBoundingBox().size.width + height = detection.getBoundingBox().size.height - xmin = detection.rotated_rect.center.x - width / 2 - xmax = detection.rotated_rect.center.x + width / 2 - ymin = detection.rotated_rect.center.y - height / 2 - ymax = detection.rotated_rect.center.y + height / 2 + xmin = detection.getBoundingBox().center.x - width / 2 + xmax = detection.getBoundingBox().center.x + width / 2 + ymin = detection.getBoundingBox().center.y - height / 2 + ymax = detection.getBoundingBox().center.y + height / 2 padding = self.padding_factor slope_x = (xmax + padding) - (xmin - padding) slope_y = (ymax + padding) - (ymin - padding) - new_det = ImgDetectionExtended() - new_det.rotated_rect = ( - detection.rotated_rect.center.x, - detection.rotated_rect.center.y, - detection.rotated_rect.size.width + 2 * padding, - detection.rotated_rect.size.height + 2 * padding, - detection.rotated_rect.angle, + new_det = dai.ImgDetection() + rotated_rect = detection.getBoundingBox() + new_det.setBoundingBox( + dai.RotatedRect( + rotated_rect.center, + dai.Size2f( + rotated_rect.size.width + 2 * padding, + rotated_rect.size.height + 2 * padding, + ), + rotated_rect.angle, + ) ) new_det.label = 0 - new_det.label_name = "Hand" + new_det.labelName = "Hand" new_det.confidence = detection.confidence - new_dets.detections.append(new_det) + det_list.append(new_det) xs = [] ys = [] - for kp in keypoints_msg.keypoints: + for kp in keypoints_msg.getKeypoints(): x = min(max(xmin - padding + slope_x * kp.x, 0.0), 1.0) y = min(max(ymin - padding + slope_y * kp.y, 0.0), 1.0) xs.append(x) @@ -111,8 +113,8 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non text = "Left" if handness < 0.5 else "Right" text += f" {gesture}" - text_x = detection.rotated_rect.center.x - 0.05 - text_y = detection.rotated_rect.center.y - height / 2 - 0.10 + text_x = detection.getBoundingBox().center.x - 0.05 + text_y = detection.getBoundingBox().center.y - height / 2 - 0.10 annotation_helper.draw_text( text=text, @@ -125,6 +127,7 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non points=keypoints, color=SECONDARY_COLOR, thickness=2 ) + new_dets.detections = det_list new_dets.setTimestamp(detections_message.getTimestamp()) new_dets.setSequenceNum(detections_message.getSequenceNum()) self.out_detections.send(new_dets) diff --git a/neural-networks/pose-estimation/hand-pose/utils/process.py b/neural-networks/pose-estimation/hand-pose/utils/process.py index 9db495bf2..da06df3ba 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/process.py +++ b/neural-networks/pose-estimation/hand-pose/utils/process.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended from typing import Tuple @@ -14,9 +13,7 @@ class ProcessDetections(dai.node.HostNode): detections_input : dai.Input The input message for the detections. config_output : dai.Output - The output message for the ImageManipConfig objects. - num_configs_output : dai.Output - The output message for the number of configs. + The output message for the ImageManipConfig objects packed in a MessageGroup. padding: float The padding factor to enlarge the bounding box a little bit. @@ -26,7 +23,6 @@ def __init__(self): super().__init__() self.detections_input = self.createInput() self.config_output = self.createOutput() - self.num_configs_output = self.createOutput() self.padding = 0.1 self._target_h = None self._target_w = None @@ -44,26 +40,20 @@ def build( return self def process(self, img_detections: dai.Buffer) -> None: - assert isinstance(img_detections, ImgDetectionsExtended) + assert isinstance(img_detections, dai.ImgDetections) detections = img_detections.detections - num_detections = len(detections) - num_cfgs_message = dai.Buffer(num_detections) - - num_cfgs_message.setTimestamp(img_detections.getTimestamp()) - num_cfgs_message.setSequenceNum(img_detections.getSequenceNum()) - self.num_configs_output.send(num_cfgs_message) - + configs_group = dai.MessageGroup() for i, detection in enumerate(detections): cfg = dai.ImageManipConfig() - detection: ImgDetectionExtended = detection - rect = detection.rotated_rect + detection: dai.ImgDetection = detection + rect = detection.getBoundingBox() new_rect = dai.RotatedRect() new_rect.center.x = rect.center.x new_rect.center.y = rect.center.y - new_rect.size.width = rect.size.width + 0.1 * 2 - new_rect.size.height = rect.size.height + 0.1 * 2 + new_rect.size.width = rect.size.width + self.padding * 2 + new_rect.size.height = rect.size.height + self.padding * 2 new_rect.angle = 0 cfg.addCropRotatedRect(new_rect, normalizedCoords=True) @@ -75,4 +65,8 @@ def process(self, img_detections: dai.Buffer) -> None: cfg.setReusePreviousImage(False) cfg.setTimestamp(img_detections.getTimestamp()) cfg.setSequenceNum(img_detections.getSequenceNum()) - self.config_output.send(cfg) + configs_group[f"cfg_{i}"] = cfg + + configs_group.setTimestamp(img_detections.getTimestamp()) + configs_group.setSequenceNum(img_detections.getSequenceNum()) + self.config_output.send(configs_group) diff --git a/neural-networks/pose-estimation/human-pose/README.md b/neural-networks/pose-estimation/human-pose/README.md index 1591f0024..5ea6876df 100644 --- a/neural-networks/pose-estimation/human-pose/README.md +++ b/neural-networks/pose-estimation/human-pose/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/human-pose/main.py b/neural-networks/pose-estimation/human-pose/main.py index fb3458fc1..0098d5054 100644 --- a/neural-networks/pose-estimation/human-pose/main.py +++ b/neural-networks/pose-estimation/human-pose/main.py @@ -3,11 +3,11 @@ import depthai as dai from depthai_nodes.node import ( ParsingNeuralNetwork, - HRNetParser, GatherData, ImgDetectionsFilter, + FrameCropper, ) -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node.parsers import HRNetParser from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -68,28 +68,25 @@ valid_labels = [ det_model_nn_archive.getConfig().model.heads[0].metadata.classes.index("person") ] - detections_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, labels_to_keep=valid_labels + detections_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + detections_filter.keepLabels( + valid_labels ) # we only want to work with person detections - script_node = pipeline.create(dai.node.Script) - det_nn.out.link(script_node.inputs["det_in"]) - det_nn.passthrough.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), - padding=PADDING, + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + padding=PADDING, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight() - ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive @@ -102,9 +99,11 @@ ) # to get all keypoints so we can draw skeleton. We will filter them later. # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - detections_filter.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=detections_filter.out, + ) # annotation skeleton_edges = ( diff --git a/neural-networks/pose-estimation/human-pose/requirements.txt b/neural-networks/pose-estimation/human-pose/requirements.txt index e561ab514..e42562133 100644 --- a/neural-networks/pose-estimation/human-pose/requirements.txt +++ b/neural-networks/pose-estimation/human-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 \ No newline at end of file +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/pose-estimation/human-pose/utils/annotation_node.py b/neural-networks/pose-estimation/human-pose/utils/annotation_node.py index 42421ad99..2af03104a 100644 --- a/neural-networks/pose-estimation/human-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/human-pose/utils/annotation_node.py @@ -1,6 +1,5 @@ from typing import List, Optional import depthai as dai -from depthai_nodes import Keypoints from depthai_nodes.utils import AnnotationHelper @@ -34,9 +33,9 @@ def process(self, gather_data_msg: dai.Buffer) -> None: img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) - keypoints_msg_list: List[Keypoints] = gather_data_msg.gathered + keypoints_msg_list: List[dai.KeypointsList] = gather_data_msg.items assert isinstance(keypoints_msg_list, list) - assert all(isinstance(msg, Keypoints) for msg in keypoints_msg_list) + assert all(isinstance(msg, dai.KeypointsList) for msg in keypoints_msg_list) annotations = AnnotationHelper() @@ -55,7 +54,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: xs = [] ys = [] confidences = [] - for keypoint_msg in keypoints_msg.keypoints: + for keypoint_msg in keypoints_msg.getKeypoints(): x = min( max(xmin - self.padding + slope_x * keypoint_msg.x, 0.0), 1.0, diff --git a/neural-networks/reidentification/human-reidentification/README.md b/neural-networks/reidentification/human-reidentification/README.md index 744d93b6d..aae0d7b96 100644 --- a/neural-networks/reidentification/human-reidentification/README.md +++ b/neural-networks/reidentification/human-reidentification/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/reidentification/human-reidentification/main.py b/neural-networks/reidentification/human-reidentification/main.py index cd9bc3332..ca2d9a4f9 100644 --- a/neural-networks/reidentification/human-reidentification/main.py +++ b/neural-networks/reidentification/human-reidentification/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.identification import IdentificationNode @@ -91,36 +90,30 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_nn_archive.getInputWidth(), - resize_height=rec_nn_archive.getInputHeight(), - ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_nn_archive.getInputWidth(), rec_nn_archive.getInputHeight() + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=( + rec_nn_archive.getInputWidth(), + rec_nn_archive.getInputHeight(), + ), + ) ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=det_nn.out, + ) # idenfication id_node = pipeline.create(IdentificationNode).build(gather_data_node.out, csim=CSIM) diff --git a/neural-networks/reidentification/human-reidentification/requirements.txt b/neural-networks/reidentification/human-reidentification/requirements.txt index 4013f7f77..1780d3592 100644 --- a/neural-networks/reidentification/human-reidentification/requirements.txt +++ b/neural-networks/reidentification/human-reidentification/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 diff --git a/neural-networks/reidentification/human-reidentification/utils/identification.py b/neural-networks/reidentification/human-reidentification/utils/identification.py index 9c943f44f..5306ddaff 100644 --- a/neural-networks/reidentification/human-reidentification/utils/identification.py +++ b/neural-networks/reidentification/human-reidentification/utils/identification.py @@ -1,8 +1,6 @@ import numpy as np import depthai as dai -from depthai_nodes import ImgDetectionsExtended - class IdentificationNode(dai.node.HostNode): """A host node that re-identifies objects based on their embeddings similarity to a database of embeddings. @@ -52,15 +50,15 @@ def build( return self def process(self, gather_data_msg) -> None: - dets_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(dets_msg, ImgDetectionsExtended) + dets_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(dets_msg, dai.ImgDetections) - rec_msg_list = gather_data_msg.gathered + rec_msg_list = gather_data_msg.items assert isinstance(rec_msg_list, list) assert all(isinstance(msg, dai.NNData) for msg in rec_msg_list) for detection, rec in zip(dets_msg.detections, rec_msg_list): - detection.label_name = self._get_label_name(rec, self._label_basename) + detection.labelName = self._get_label_name(rec, self._label_basename) self.out.send(dets_msg) diff --git a/neural-networks/segmentation/blur-background/README.md b/neural-networks/segmentation/blur-background/README.md index 9c04f5df7..eb72b8c3d 100644 --- a/neural-networks/segmentation/blur-background/README.md +++ b/neural-networks/segmentation/blur-background/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/segmentation/blur-background/requirements.txt b/neural-networks/segmentation/blur-background/requirements.txt index df8d7aa85..1d4d576f6 100644 --- a/neural-networks/segmentation/blur-background/requirements.txt +++ b/neural-networks/segmentation/blur-background/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/segmentation/depth-crop/README.md b/neural-networks/segmentation/depth-crop/README.md index 0d8923087..55e183567 100644 --- a/neural-networks/segmentation/depth-crop/README.md +++ b/neural-networks/segmentation/depth-crop/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/segmentation/depth-crop/main.py b/neural-networks/segmentation/depth-crop/main.py index 7bde9b40d..97251240b 100755 --- a/neural-networks/segmentation/depth-crop/main.py +++ b/neural-networks/segmentation/depth-crop/main.py @@ -62,7 +62,7 @@ color_output.link(manip.inputImage) nn = pipeline.create(ParsingNeuralNetwork).build( - nn_source=nn_archive, input=manip.out + nnSource=nn_archive, input=manip.out ) # annotation diff --git a/neural-networks/segmentation/depth-crop/requirements.txt b/neural-networks/segmentation/depth-crop/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/neural-networks/segmentation/depth-crop/requirements.txt +++ b/neural-networks/segmentation/depth-crop/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/speech-recognition/whisper-tiny-en/README.md b/neural-networks/speech-recognition/whisper-tiny-en/README.md index a6f01ae9d..70d4f9d27 100644 --- a/neural-networks/speech-recognition/whisper-tiny-en/README.md +++ b/neural-networks/speech-recognition/whisper-tiny-en/README.md @@ -52,7 +52,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt index a36509b78..677a98fe7 100644 --- a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt +++ b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 scipy tqdm diff --git a/streaming/mjpeg-streaming/README.md b/streaming/mjpeg-streaming/README.md index 70132e804..f06a5cbc5 100644 --- a/streaming/mjpeg-streaming/README.md +++ b/streaming/mjpeg-streaming/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/mjpeg-streaming/requirements.txt b/streaming/mjpeg-streaming/requirements.txt index d3c270afc..09e49daa1 100644 --- a/streaming/mjpeg-streaming/requirements.txt +++ b/streaming/mjpeg-streaming/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/streaming/on-device-encoding/README.md b/streaming/on-device-encoding/README.md index e1c85a7cb..bb25338c6 100644 --- a/streaming/on-device-encoding/README.md +++ b/streaming/on-device-encoding/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/on-device-encoding/requirements.txt b/streaming/on-device-encoding/requirements.txt index 60dc17665..b81f2b18e 100644 --- a/streaming/on-device-encoding/requirements.txt +++ b/streaming/on-device-encoding/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.4.0 av==12.3.0 numpy>=1.22 \ No newline at end of file diff --git a/streaming/poe-mqtt/README.md b/streaming/poe-mqtt/README.md index e41fc969e..f68929440 100644 --- a/streaming/poe-mqtt/README.md +++ b/streaming/poe-mqtt/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/poe-mqtt/requirements.txt b/streaming/poe-mqtt/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/streaming/poe-mqtt/requirements.txt +++ b/streaming/poe-mqtt/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/streaming/poe-tcp-streaming/README.md b/streaming/poe-tcp-streaming/README.md index f4b1b504f..23cc4baf3 100644 --- a/streaming/poe-tcp-streaming/README.md +++ b/streaming/poe-tcp-streaming/README.md @@ -56,7 +56,7 @@ positional arguments: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/poe-tcp-streaming/requirements.txt b/streaming/poe-tcp-streaming/requirements.txt index 2da0fec82..059031e53 100644 --- a/streaming/poe-tcp-streaming/requirements.txt +++ b/streaming/poe-tcp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.4.0 opencv-python~=4.10.0 numpy>=1.22 diff --git a/streaming/rtsp-streaming/README.md b/streaming/rtsp-streaming/README.md index baea63247..ea585b432 100644 --- a/streaming/rtsp-streaming/README.md +++ b/streaming/rtsp-streaming/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/rtsp-streaming/requirements.txt b/streaming/rtsp-streaming/requirements.txt index 4e6b1d24f..124919f4e 100644 --- a/streaming/rtsp-streaming/requirements.txt +++ b/streaming/rtsp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.4.0 numpy>=1.22 PyGObject==3.46.0 \ No newline at end of file diff --git a/streaming/webrtc-streaming/README.md b/streaming/webrtc-streaming/README.md index 9bf74cdc3..67bb16d44 100644 --- a/streaming/webrtc-streaming/README.md +++ b/streaming/webrtc-streaming/README.md @@ -16,7 +16,7 @@ You can run the example fully on device ([`STANDALONE` mode](#standalone-mode-rv ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/webrtc-streaming/requirements.txt b/streaming/webrtc-streaming/requirements.txt index f8b01f680..540d3573e 100644 --- a/streaming/webrtc-streaming/requirements.txt +++ b/streaming/webrtc-streaming/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update aiortc==1.9.0 aiohttp>=3.10.0,<4.0 aiohttp-cors==0.7.0 diff --git a/streaming/webrtc-streaming/utils/transform.py b/streaming/webrtc-streaming/utils/transform.py index d6267c6ce..c1af2720e 100644 --- a/streaming/webrtc-streaming/utils/transform.py +++ b/streaming/webrtc-streaming/utils/transform.py @@ -4,7 +4,6 @@ from aiortc import VideoStreamTrack from av import VideoFrame from depthai_nodes.node import ParsingNeuralNetwork -from depthai_nodes import ImgDetectionExtended class VideoTransform(VideoStreamTrack): @@ -44,9 +43,7 @@ async def parse_frame(self): frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for detection in dets: - if isinstance(detection, ImgDetectionExtended): - bbox = frameNorm(frame, detection.rotated_rect.getOuterRect()) - elif isinstance(detection, dai.ImgDetection): + if isinstance(detection, dai.ImgDetection): bbox = frameNorm( frame, ( diff --git a/tests/constants.py b/tests/constants.py index ccefab174..a1e0e9b24 100644 --- a/tests/constants.py +++ b/tests/constants.py @@ -189,6 +189,5 @@ "Network compiled for 8 shaves, maximum available", "UserWarning: Specified provider 'TensorrtExecutionProvider' is not in available", "UserWarning: Specified provider 'CUDAExecutionProvider' is not in available", - "You are using ImgDetectionsBridge to transform from ImgDetectionsExtended to ImgDetections.", "Sync node has been trying to sync for", ] diff --git a/tutorials/camera-demo/README.md b/tutorials/camera-demo/README.md index 0f46f9d93..a26f30721 100644 --- a/tutorials/camera-demo/README.md +++ b/tutorials/camera-demo/README.md @@ -26,7 +26,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/camera-demo/requirements.txt b/tutorials/camera-demo/requirements.txt index 877bcf059..e0918ed1d 100644 --- a/tutorials/camera-demo/requirements.txt +++ b/tutorials/camera-demo/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/tutorials/camera-stereo-depth/README.md b/tutorials/camera-stereo-depth/README.md index d29bf3b73..377029808 100644 --- a/tutorials/camera-stereo-depth/README.md +++ b/tutorials/camera-stereo-depth/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/custom-models/README.md b/tutorials/custom-models/README.md index 75ed60804..90b2ee8d4 100644 --- a/tutorials/custom-models/README.md +++ b/tutorials/custom-models/README.md @@ -36,7 +36,7 @@ For more information see [README.md](generate_model/README.md) file in the `gene ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/custom-models/generate_model/requirements.txt b/tutorials/custom-models/generate_model/requirements.txt index 0804d8b48..d3595f13e 100644 --- a/tutorials/custom-models/generate_model/requirements.txt +++ b/tutorials/custom-models/generate_model/requirements.txt @@ -1,5 +1,5 @@ / -depthai==3.0.0 +depthai==3.4.0 modelconv==0.3.3 numpy==1.23.0 onnx==1.17.0 diff --git a/tutorials/custom-models/requirements.txt b/tutorials/custom-models/requirements.txt index 1fa4798a1..d9388de9e 100644 --- a/tutorials/custom-models/requirements.txt +++ b/tutorials/custom-models/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/display-detections/README.md b/tutorials/display-detections/README.md index 965157d2b..d3ce6c4f8 100644 --- a/tutorials/display-detections/README.md +++ b/tutorials/display-detections/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/display-detections/requirements.txt b/tutorials/display-detections/requirements.txt index 1fa4798a1..d9388de9e 100644 --- a/tutorials/display-detections/requirements.txt +++ b/tutorials/display-detections/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/full-fov-nn/README.md b/tutorials/full-fov-nn/README.md index 9d620e6da..6bed76b0e 100644 --- a/tutorials/full-fov-nn/README.md +++ b/tutorials/full-fov-nn/README.md @@ -60,7 +60,7 @@ These scripts run only in the corresponding mode, which cannot be toggled during ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/full-fov-nn/requirements.txt b/tutorials/full-fov-nn/requirements.txt index 56b6f790b..3b7d19fe5 100644 --- a/tutorials/full-fov-nn/requirements.txt +++ b/tutorials/full-fov-nn/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 diff --git a/tutorials/multiple-devices/multi-cam-calibration/README.md b/tutorials/multiple-devices/multi-cam-calibration/README.md index 70bbcb5b1..b5fb64831 100644 --- a/tutorials/multiple-devices/multi-cam-calibration/README.md +++ b/tutorials/multiple-devices/multi-cam-calibration/README.md @@ -86,7 +86,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt index 556ae9181..73ab045e9 100644 --- a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt +++ b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python==4.10.0.84 numpy>=1.22 diff --git a/tutorials/multiple-devices/multiple-device-stitch-nn/README.md b/tutorials/multiple-devices/multiple-device-stitch-nn/README.md index 1c967ab39..e0a2d0165 100644 --- a/tutorials/multiple-devices/multiple-device-stitch-nn/README.md +++ b/tutorials/multiple-devices/multiple-device-stitch-nn/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt index f02d8d291..9af2d834b 100644 --- a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt +++ b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 imutils stitching==0.6.1 \ No newline at end of file diff --git a/tutorials/multiple-devices/multiple-devices-preview/README.md b/tutorials/multiple-devices/multiple-devices-preview/README.md index e1878eccf..fd1deabce 100644 --- a/tutorials/multiple-devices/multiple-devices-preview/README.md +++ b/tutorials/multiple-devices/multiple-devices-preview/README.md @@ -53,7 +53,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/) diff --git a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt index 8e4027fc1..98ae4b842 100644 --- a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt +++ b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.4.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/tutorials/multiple-devices/spatial-detection-fusion/README.md b/tutorials/multiple-devices/spatial-detection-fusion/README.md index 4df79ae49..12091d7a3 100644 --- a/tutorials/multiple-devices/spatial-detection-fusion/README.md +++ b/tutorials/multiple-devices/spatial-detection-fusion/README.md @@ -52,7 +52,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt index c621ccab6..ec46ed409 100644 --- a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt +++ b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.4.0 +depthai-nodes==0.4.0 opencv-python-headless==4.10.0.84 numpy>=1.22 scipy diff --git a/tutorials/play-encoded-stream/README.md b/tutorials/play-encoded-stream/README.md index 223eb86bb..1543dad7d 100644 --- a/tutorials/play-encoded-stream/README.md +++ b/tutorials/play-encoded-stream/README.md @@ -37,7 +37,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/play-encoded-stream/requirements.txt b/tutorials/play-encoded-stream/requirements.txt index b815f908f..676c928b4 100644 --- a/tutorials/play-encoded-stream/requirements.txt +++ b/tutorials/play-encoded-stream/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.4.0 opencv-python-headless~=4.10.0 av==12.3.0 \ No newline at end of file diff --git a/tutorials/qr-with-tiling/README.md b/tutorials/qr-with-tiling/README.md index dbb450168..a26a8e664 100644 --- a/tutorials/qr-with-tiling/README.md +++ b/tutorials/qr-with-tiling/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/qr-with-tiling/requirements.txt b/tutorials/qr-with-tiling/requirements.txt index 8e7aaa353..a713324d3 100644 --- a/tutorials/qr-with-tiling/requirements.txt +++ b/tutorials/qr-with-tiling/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.1.0 -depthai-nodes==0.3.5 +depthai==3.4.0 +depthai-nodes==0.4.0 numpy>=1.22 pyzbar==0.1.9