From e1cdfb5f93a37352a462fffaa22f4091cb615991 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Tue, 17 Feb 2026 15:28:22 +0100 Subject: [PATCH 01/14] custom FE + yolo-world --- .../backend/src/nn/label_mapper_node.py | 20 ++++++------------- .../backend/src/nn/nn_detection_node.py | 4 ++-- .../yolo-world/requirements.txt | 4 ++-- .../yolo-world/utils/annotation_node.py | 5 ++--- 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py index 62fa463c3..2da7f2c4b 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py @@ -3,7 +3,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended logger = logging.getLogger(__name__) @@ -50,17 +49,10 @@ def build( def process( self, detections_message: dai.Buffer, frame_message: dai.ImgFrame ) -> None: - if isinstance(detections_message, ImgDetectionsExtended): - # Align detections to frame coordinate space - detections_message.setTransformation(frame_message.getTransformation()) - for detection in detections_message.detections: - detection.label_name = self._label_encoding.get( - detection.label, "unknown" - ) - elif isinstance(detections_message, dai.ImgDetections): - detections_message.setTransformation(frame_message.getTransformation()) - for detection in detections_message.detections: - detection.labelName = self._label_encoding.get( - detection.label, "unknown" - ) + assert isinstance(detections_message, dai.ImgDetections) + detections_message.setTransformation(frame_message.getTransformation()) + for detection in detections_message.detections: + detection.labelName = self._label_encoding.get( + detection.label, "unknown" + ) self.out.send(detections_message) diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py index 23e46bb69..58ba9a042 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/nn_detection_node.py @@ -27,7 +27,7 @@ class NNDetectionNode(dai.node.ThreadedHostNode): -> LabelMapperNode (add label names for visualization) Exposes: - - detections_extended: ImgDetectionsExtended with label names (for visualizer) + - detections_extended: dai.ImgDetections with label names (for visualizer) - detections: dai.ImgDetections with label names (for snapping) - controller: PromptController for dynamic prompt updates (classes, confidence threshold) """ @@ -80,7 +80,7 @@ def build( # Detection filter self._det_filter.build(self._nn.out) - # Add label for visualization (ImgDetectionsExtended) + # Add label for visualization self._det_label_mapper.build( input_detections=self._det_filter.out, input_frame=input_frame ) diff --git a/neural-networks/object-detection/yolo-world/requirements.txt b/neural-networks/object-detection/yolo-world/requirements.txt index 62271199f..7be5707f8 100644 --- a/neural-networks/object-detection/yolo-world/requirements.txt +++ b/neural-networks/object-detection/yolo-world/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 onnxruntime diff --git a/neural-networks/object-detection/yolo-world/utils/annotation_node.py b/neural-networks/object-detection/yolo-world/utils/annotation_node.py index 95a14dd6a..2a73df2c4 100644 --- a/neural-networks/object-detection/yolo-world/utils/annotation_node.py +++ b/neural-networks/object-detection/yolo-world/utils/annotation_node.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended from typing import Dict @@ -32,7 +31,7 @@ def process( self, detections_message: dai.Buffer, ) -> None: - assert isinstance(detections_message, ImgDetectionsExtended) + assert isinstance(detections_message, dai.ImgDetections) for detection in detections_message.detections: - detection.label_name = self._label_encoding.get(detection.label, "unknown") + detection.labelName = self._label_encoding.get(detection.label, "unknown") return detections_message From 29dbfd7876850b0dae600e9df4c0d717ef1feea5 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Fri, 20 Feb 2026 13:30:44 +0100 Subject: [PATCH 02/14] ImgDetectionsExtended -> dai.ImgDetetctions + GatherData + FrameCropper --- .../lossless-zooming/requirements.txt | 4 +- .../lossless-zooming/utils/crop_face.py | 11 ++-- .../backend/src/nn/label_mapper_node.py | 2 +- .../box-measurement/requirements.txt | 4 +- .../utils/box_processing_node.py | 22 +++---- .../triangulation/requirements.txt | 4 +- .../triangulation/utils/host_triangulation.py | 15 ++--- .../3D-detection/objectron/main.py | 32 ++++------ .../3D-detection/objectron/requirements.txt | 4 +- .../objectron/utils/annotation_node.py | 3 +- .../counting/people-counter/requirements.txt | 4 +- .../people-counter/utils/annotation_node.py | 3 +- .../face-detection/age-gender/main.py | 36 ++++------- .../age-gender/requirements.txt | 4 +- .../age-gender/utils/annotation_node.py | 17 +++--- .../blur-faces/requirements.txt | 4 +- .../blur-faces/utils/blur_detections.py | 6 +- .../emotion-recognition/main.py | 33 ++++------ .../emotion-recognition/requirements.txt | 4 +- .../utils/annotation_node.py | 8 +-- .../face-mask-detection/main.py | 6 +- .../face-mask-detection/requirements.txt | 4 +- .../face-detection/fatigue-detection/main.py | 33 ++++------ .../fatigue-detection/requirements.txt | 4 +- .../utils/annotation_node.py | 8 +-- .../face-detection/gaze-estimation/main.py | 8 ++- .../gaze-estimation/requirements.txt | 4 +- .../gaze-estimation/utils/annotation_node.py | 11 ++-- .../utils/process_keypoints.py | 11 ++-- .../head-posture-detection/main.py | 34 ++++------- .../head-posture-detection/requirements.txt | 4 +- .../utils/annotation_node.py | 18 +++--- .../human-machine-safety/main.py | 7 +-- .../human-machine-safety/requirements.txt | 4 +- .../utils/annotation_node.py | 24 ++++---- .../utils/detection_merger.py | 3 +- .../text-blur/requirements.txt | 4 +- .../text-blur/utils/blur_detections.py | 7 ++- .../object-detection/yolo-p/requirements.txt | 4 +- .../yolo-p/utils/annotation_node.py | 4 +- .../object-tracking/deepsort-tracking/main.py | 30 ++++----- .../deepsort-tracking/requirements.txt | 6 +- .../object-tracking/people-tracker/main.py | 6 +- neural-networks/ocr/general-ocr/main.py | 8 ++- .../ocr/general-ocr/requirements.txt | 4 +- .../ocr/general-ocr/utils/annotation_node.py | 2 +- .../utils/host_process_detections.py | 61 +++---------------- .../pose-estimation/animal-pose/main.py | 36 ++++------- .../animal-pose/requirements.txt | 4 +- .../animal-pose/utils/annotation_node.py | 12 ++-- .../pose-estimation/hand-pose/main.py | 8 ++- .../hand-pose/requirements.txt | 4 +- .../hand-pose/utils/annotation_node.py | 43 ++++++------- .../hand-pose/utils/process.py | 7 +-- .../pose-estimation/human-pose/main.py | 31 ++++------ .../human-pose/requirements.txt | 4 +- .../human-reidentification/main.py | 36 ++++------- .../human-reidentification/requirements.txt | 4 +- .../utils/identification.py | 8 +-- streaming/webrtc-streaming/utils/transform.py | 5 +- tests/constants.py | 1 - 61 files changed, 287 insertions(+), 455 deletions(-) diff --git a/camera-controls/lossless-zooming/requirements.txt b/camera-controls/lossless-zooming/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/camera-controls/lossless-zooming/requirements.txt +++ b/camera-controls/lossless-zooming/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/camera-controls/lossless-zooming/utils/crop_face.py b/camera-controls/lossless-zooming/utils/crop_face.py index 7d64e6e94..20b395fa9 100644 --- a/camera-controls/lossless-zooming/utils/crop_face.py +++ b/camera-controls/lossless-zooming/utils/crop_face.py @@ -1,6 +1,5 @@ import depthai as dai from typing import Tuple -from depthai_nodes import ImgDetectionsExtended AVG_MAX_NUM = 10 @@ -15,7 +14,7 @@ class CropFace(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfig messages. source_size : Tuple[int, int] @@ -49,7 +48,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -67,11 +66,11 @@ def build( def process(self, detection_message: dai.Buffer): """Process the input detections and create a crop config. This function is - ran every time a new ImgDetectionsExtended message is received. + ran every time a new dai.ImgDetections message is received. Sends one crop configuration to the config_output link. """ - assert isinstance(detection_message, ImgDetectionsExtended) + assert isinstance(detection_message, dai.ImgDetections) timestamp = detection_message.getTimestamp() sequence_num = detection_message.getSequenceNum() @@ -85,7 +84,7 @@ def process(self, detection_message: dai.Buffer): if len(dets) > 0: cfg.setSkipCurrentImage(False) coords = dets[0] - rect = coords.rotated_rect + rect = coords.getBoundingBox() x = rect.center.x y = rect.center.y diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py index 2da7f2c4b..b9392b683 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py @@ -12,7 +12,7 @@ class DetectionsLabelMapper(dai.node.HostNode): Adds label names to detections and aligns detections to a reference frame. Inputs: - - input_detections: dai.ImgDetections or ImgDetectionsExtended + - input_detections: dai.ImgDetections - input_frame: dai.ImgFrame (reference coordinate space) Output: diff --git a/depth-measurement/3d-measurement/box-measurement/requirements.txt b/depth-measurement/3d-measurement/box-measurement/requirements.txt index 3f3924ee0..f9dd6cc0a 100644 --- a/depth-measurement/3d-measurement/box-measurement/requirements.txt +++ b/depth-measurement/3d-measurement/box-measurement/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 open3d~=0.18 opencv-python-headless==4.10.0.84 diff --git a/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py b/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py index 1221bbb5f..c6b26086e 100644 --- a/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py +++ b/depth-measurement/3d-measurement/box-measurement/utils/box_processing_node.py @@ -1,10 +1,6 @@ import depthai as dai import numpy as np import cv2 -from depthai_nodes.message.img_detections import ( - ImgDetectionExtended, - ImgDetectionsExtended, -) from .helper_functions import reverse_resize_and_pad import time @@ -146,11 +142,11 @@ def _fit_cuboid( corners3d = np.asarray(outline.points) self._draw_cuboid_outline(corners3d) - def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: + def _draw_box_and_label(self, det: dai.ImgDetection) -> None: """Draws rotated rect and label""" # All annotation coordinates are normalized to the NN input size (512×320) - rr = det._rotated_rect + rr = det.getBoundingBox() cx, cy = rr.center.x, rr.center.y w, h = rr.size.width, rr.size.height angle = rr.angle @@ -173,18 +169,18 @@ def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: if self.fit: label = ( - f"Box ({det._confidence:.2f}) " + f"Box ({det.confidence:.2f}) " f"{self.dimensions[0]:.1f} x {self.dimensions[1]:.1f} x {self.dimensions[2]:.1f} cm" ) elif self.dimensions_cache is not None and ( time.time() - self.last_successful_fit < self.cache_duration ): label = ( - f"Box ({det._confidence:.2f}) " + f"Box ({det.confidence:.2f}) " f"{self.dimensions_cache[0]:.1f} x {self.dimensions_cache[1]:.1f} x {self.dimensions_cache[2]:.1f} cm" ) else: - label = f"{'Box'} {det._confidence:.2f}" + label = f"{'Box'} {det.confidence:.2f}" self.helper_det.draw_text( label, @@ -195,7 +191,7 @@ def _draw_box_and_label(self, det: ImgDetectionExtended) -> None: ) def _annotate_detection( - self, det: ImgDetectionExtended, idx: int, mask: np.ndarray, pcl, pcl_colors + self, det: dai.ImgDetection, idx: int, mask: np.ndarray, pcl, pcl_colors ): """Draw all annotations (mask, 3D box fit, bounding box + label) for a single detection.""" self._draw_mask(mask, idx) @@ -217,10 +213,10 @@ def run(self): assert isinstance(pcl_msg, dai.PointCloudData) assert isinstance(rgb_msg, dai.ImgFrame) - assert isinstance(det_msg, ImgDetectionsExtended) + assert isinstance(det_msg, dai.ImgDetections) inPointCloud: dai.PointCloudData = pcl_msg inRGB: dai.ImgFrame = rgb_msg - parser_output: ImgDetectionsExtended = det_msg + parser_output: dai.ImgDetections = det_msg try: points, colors = inPointCloud.getPointsRGB() @@ -230,7 +226,7 @@ def run(self): rgba_img = colors.reshape(IMG_HEIGHT, IMG_WIDTH, 4) bgr_img = cv2.cvtColor(rgba_img, cv2.COLOR_BGRA2BGR) - mask = parser_output._masks._mask + mask = parser_output.getCvSegmentationMask() detections = parser_output.detections mask_full = reverse_resize_and_pad( mask, (IMG_WIDTH, IMG_HEIGHT), INPUT_SHAPE diff --git a/depth-measurement/triangulation/requirements.txt b/depth-measurement/triangulation/requirements.txt index d3c270afc..657eada6e 100644 --- a/depth-measurement/triangulation/requirements.txt +++ b/depth-measurement/triangulation/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/depth-measurement/triangulation/utils/host_triangulation.py b/depth-measurement/triangulation/utils/host_triangulation.py index 9378a49bf..952e01285 100644 --- a/depth-measurement/triangulation/utils/host_triangulation.py +++ b/depth-measurement/triangulation/utils/host_triangulation.py @@ -2,9 +2,6 @@ import numpy as np import depthai as dai from typing import Tuple -from depthai_nodes import ( - ImgDetectionsExtended, -) from depthai_nodes.utils import AnnotationHelper from .stereo_inference import StereoInference @@ -73,15 +70,15 @@ def process( nn_face_left: dai.Buffer, nn_face_right: dai.Buffer, ) -> None: - assert isinstance(nn_face_left, ImgDetectionsExtended) - assert isinstance(nn_face_right, ImgDetectionsExtended) + assert isinstance(nn_face_left, dai.ImgDetections) + assert isinstance(nn_face_right, dai.ImgDetections) left_frame = face_left.getCvFrame() right_frame = face_right.getCvFrame() bbox_annot_left = AnnotationHelper() for detection in nn_face_left.detections: - rect = detection.rotated_rect + rect = detection.getBoundingBox() x = rect.center.x y = rect.center.y w = rect.size.width @@ -103,7 +100,7 @@ def process( bbox_annot_right = AnnotationHelper() for detection in nn_face_right.detections: - rect = detection.rotated_rect + rect = detection.getBoundingBox() x = rect.center.x y = rect.center.y w = rect.size.width @@ -137,8 +134,8 @@ def process( if nn_face_left.detections and nn_face_right.detections: spatials = [] keypoints = zip( - nn_face_left.detections[0].keypoints, - nn_face_right.detections[0].keypoints, + nn_face_left.detections[0].getKeypoints2f(), + nn_face_right.detections[0].getKeypoints2f(), ) for i, (keypoint_left, keypoint_right) in enumerate(keypoints): diff --git a/neural-networks/3D-detection/objectron/main.py b/neural-networks/3D-detection/objectron/main.py index 5a016a6ba..deaaafc02 100644 --- a/neural-networks/3D-detection/objectron/main.py +++ b/neural-networks/3D-detection/objectron/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsFilter, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsFilter, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -62,23 +61,14 @@ ) # detection processing - script = pipeline.create(dai.node.Script) - first_stage_filter.out.link(script.inputs["det_in"]) - det_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=pos_model_w, - resize_height=pos_model_h, + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=first_stage_filter.out, padding=PADDING, - resize_mode="STRETCH", + ).build( + inputImage=det_nn.passthrough, + outputSize=(pos_model_w, pos_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, ) - script.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(pos_model_w, pos_model_h) - crop_node.inputConfig.setWaitForMessage(True) - - script.outputs["manip_cfg"].link(crop_node.inputConfig) - script.outputs["manip_img"].link(crop_node.inputImage) pos_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, pos_nn_archive @@ -90,9 +80,11 @@ ) # detections and position estimations sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - detections_filter.out.link(gather_data.input_reference) - pos_nn.getOutput(0).link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=pos_nn.getOutput(0), + input_reference=detections_filter.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/3D-detection/objectron/requirements.txt b/neural-networks/3D-detection/objectron/requirements.txt index 338292859..dfe76c078 100644 --- a/neural-networks/3D-detection/objectron/requirements.txt +++ b/neural-networks/3D-detection/objectron/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/3D-detection/objectron/utils/annotation_node.py b/neural-networks/3D-detection/objectron/utils/annotation_node.py index f43202ab9..edb4e9aa8 100644 --- a/neural-networks/3D-detection/objectron/utils/annotation_node.py +++ b/neural-networks/3D-detection/objectron/utils/annotation_node.py @@ -1,6 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, Keypoints, GatheredData, PRIMARY_COLOR, @@ -36,7 +35,7 @@ def build( def process(self, gathered_data: dai.Buffer) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data + detections_message: dai.ImgDetections = gathered_data.reference_data detections_list: List[dai.ImgDetection] = detections_message.detections annotation_helper = AnnotationHelper() diff --git a/neural-networks/counting/people-counter/requirements.txt b/neural-networks/counting/people-counter/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/counting/people-counter/requirements.txt +++ b/neural-networks/counting/people-counter/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/counting/people-counter/utils/annotation_node.py b/neural-networks/counting/people-counter/utils/annotation_node.py index d592b1bc8..2acc9604e 100644 --- a/neural-networks/counting/people-counter/utils/annotation_node.py +++ b/neural-networks/counting/people-counter/utils/annotation_node.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended from depthai_nodes.utils import AnnotationHelper @@ -15,7 +14,7 @@ def build(self, det_msg: dai.Node.Output) -> "AnnotationNode": return self def process(self, det_msg: dai.Buffer) -> None: - assert isinstance(det_msg, (dai.ImgDetections, ImgDetectionsExtended)) + assert isinstance(det_msg, (dai.ImgDetections)) count = len(det_msg.detections) diff --git a/neural-networks/face-detection/age-gender/main.py b/neural-networks/face-detection/age-gender/main.py index 60d2c519c..4b399eefc 100644 --- a/neural-networks/face-detection/age-gender/main.py +++ b/neural-networks/face-detection/age-gender/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,36 +72,23 @@ ) det_nn.getParser(0).conf_threshold = 0.9 # for more stable detections - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=input_node_out, + outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight() - ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.outputs.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=rec_nn.outputs, + input_reference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/age-gender/requirements.txt b/neural-networks/face-detection/age-gender/requirements.txt index 8e4027fc1..c2a0c7bb7 100644 --- a/neural-networks/face-detection/age-gender/requirements.txt +++ b/neural-networks/face-detection/age-gender/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/utils/annotation_node.py b/neural-networks/face-detection/age-gender/utils/annotation_node.py index 30df380ca..1a78d356c 100644 --- a/neural-networks/face-detection/age-gender/utils/annotation_node.py +++ b/neural-networks/face-detection/age-gender/utils/annotation_node.py @@ -2,7 +2,6 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, Predictions, Classifications, SECONDARY_COLOR, @@ -22,10 +21,10 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_extended_msg: ImgDetectionsExtended = ( + img_detections_msg: dai.ImgDetections = ( gather_data_msg.reference_data ) - assert isinstance(img_detections_extended_msg, ImgDetectionsExtended) + assert isinstance(img_detections_msg, dai.ImgDetections) age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered assert isinstance(age_gender_msg_group_list, list) @@ -33,14 +32,14 @@ def process(self, gather_data_msg: dai.Buffer) -> None: isinstance(msg, dai.MessageGroup) for msg in age_gender_msg_group_list ) - assert len(img_detections_extended_msg.detections) == len( + assert len(img_detections_msg.detections) == len( age_gender_msg_group_list ) annotations = AnnotationHelper() - for img_detection_extended_msg, age_gender_msg_group in zip( - img_detections_extended_msg.detections, age_gender_msg_group_list + for img_detection_msg, age_gender_msg_group in zip( + img_detections_msg.detections, age_gender_msg_group_list ): age_msg: Predictions = age_gender_msg_group["0"] assert isinstance(age_msg, Predictions) @@ -48,7 +47,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: assert isinstance(gender_msg, Classifications) xmin, ymin, xmax, ymax = ( - img_detection_extended_msg.rotated_rect.getOuterRect() + img_detection_msg.getBoundingBox().getOuterRect() ) annotations.draw_rectangle( @@ -64,8 +63,8 @@ def process(self, gather_data_msg: dai.Buffer) -> None: ) annotations_msg = annotations.build( - timestamp=img_detections_extended_msg.getTimestamp(), - sequence_num=img_detections_extended_msg.getSequenceNum(), + timestamp=img_detections_msg.getTimestamp(), + sequence_num=img_detections_msg.getSequenceNum(), ) self.out.send(annotations_msg) diff --git a/neural-networks/face-detection/blur-faces/requirements.txt b/neural-networks/face-detection/blur-faces/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/face-detection/blur-faces/requirements.txt +++ b/neural-networks/face-detection/blur-faces/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/blur-faces/utils/blur_detections.py b/neural-networks/face-detection/blur-faces/utils/blur_detections.py index 1839df301..b970da439 100644 --- a/neural-networks/face-detection/blur-faces/utils/blur_detections.py +++ b/neural-networks/face-detection/blur-faces/utils/blur_detections.py @@ -21,7 +21,7 @@ def run(self) -> None: h, w = frame_copy.shape[:2] for detection in detections: - rect: dai.RotatedRect = detection.rotated_rect + rect: dai.RotatedRect = detection.getBoundingBox() rect = rect.denormalize(w, h) detection = rect.getOuterRect() bbox = [int(d) for d in detection] @@ -30,7 +30,7 @@ def run(self) -> None: bbox[2] = np.clip(bbox[2], 0, w) bbox[3] = np.clip(bbox[3], 0, h) - roi = frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] + roi = frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] roi_width = bbox[2] - bbox[0] roi_height = bbox[3] - bbox[1] @@ -56,7 +56,7 @@ def run(self) -> None: original_background = cv2.bitwise_and(roi, roi, mask=inverse_mask) combined = cv2.add(blurred_ellipse, original_background) - frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] = combined + frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] = combined ts = frame.getTimestamp() frame_type = frame.getType() diff --git a/neural-networks/face-detection/emotion-recognition/main.py b/neural-networks/face-detection/emotion-recognition/main.py index e01ec01ed..de68c2db4 100755 --- a/neural-networks/face-detection/emotion-recognition/main.py +++ b/neural-networks/face-detection/emotion-recognition/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -72,33 +71,23 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=input_node, + outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=rec_nn.out, + input_reference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/emotion-recognition/requirements.txt b/neural-networks/face-detection/emotion-recognition/requirements.txt index 7bfdaaf09..1577a1888 100644 --- a/neural-networks/face-detection/emotion-recognition/requirements.txt +++ b/neural-networks/face-detection/emotion-recognition/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py index efe5b9464..2f01f3b2a 100644 --- a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py +++ b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py @@ -2,7 +2,7 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, Classifications, SECONDARY_COLOR +from depthai_nodes import Classifications, SECONDARY_COLOR from depthai_nodes.utils import AnnotationHelper @@ -18,8 +18,8 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - dets_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(dets_msg, ImgDetectionsExtended) + dets_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(dets_msg, dai.ImgDetections) rec_msg_list: List[Classifications] = gather_data_msg.gathered assert isinstance(rec_msg_list, list) @@ -29,7 +29,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: annotations = AnnotationHelper() for det_msg, rec_msg in zip(dets_msg.detections, rec_msg_list): - xmin, ymin, xmax, ymax = det_msg.rotated_rect.getOuterRect() + xmin, ymin, xmax, ymax = det_msg.getBoundingBox().getOuterRect() annotations.draw_rectangle( (xmin, ymin), diff --git a/neural-networks/face-detection/face-mask-detection/main.py b/neural-networks/face-detection/face-mask-detection/main.py index cc0d0b90f..3d37de8c2 100644 --- a/neural-networks/face-detection/face-mask-detection/main.py +++ b/neural-networks/face-detection/face-mask-detection/main.py @@ -4,7 +4,6 @@ from depthai_nodes.node import ( ParsingNeuralNetwork, ImgDetectionsFilter, - ImgDetectionsBridge, ) from utils.arguments import initialize_argparser @@ -58,13 +57,10 @@ # filter and rename detection labels det_process_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) det_process_filter.setLabels(list(LABEL_ENCODING.keys()), keep=True) - det_process_bridge = pipeline.create(ImgDetectionsBridge).build( - det_process_filter.out, label_encoding=LABEL_ENCODING - ) # visualization visualizer.addTopic("Video", det_nn.passthrough, "images") - visualizer.addTopic("Detections", det_process_bridge.out, "images") + visualizer.addTopic("Detections", det_process_filter.out, "images") print("Pipeline created.") diff --git a/neural-networks/face-detection/face-mask-detection/requirements.txt b/neural-networks/face-detection/face-mask-detection/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/face-detection/face-mask-detection/requirements.txt +++ b/neural-networks/face-detection/face-mask-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/face-detection/fatigue-detection/main.py b/neural-networks/face-detection/fatigue-detection/main.py index eb3e88ab1..9b1dde588 100644 --- a/neural-networks/face-detection/fatigue-detection/main.py +++ b/neural-networks/face-detection/fatigue-detection/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsBridge, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,33 +72,23 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_w, - resize_height=rec_model_h, + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=input_node_out, + outputSize=(rec_model_w, rec_model_h), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) landmark_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_description ) # detections and gaze estimations sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - landmark_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=landmark_nn.out, + input_reference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/fatigue-detection/requirements.txt b/neural-networks/face-detection/fatigue-detection/requirements.txt index df8d7aa85..c5f9dee1b 100644 --- a/neural-networks/face-detection/fatigue-detection/requirements.txt +++ b/neural-networks/face-detection/fatigue-detection/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py index 5549087c3..ce9d69032 100644 --- a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py @@ -2,7 +2,7 @@ from collections import deque import depthai as dai from depthai_nodes.utils import AnnotationHelper -from depthai_nodes import ImgDetectionsExtended, Keypoints +from depthai_nodes import Keypoints from utils.face_landmarks import determine_fatigue @@ -18,9 +18,9 @@ def build(self, gather_data_msg) -> "AnnotationNode": return self def process(self, gather_data_msg) -> None: - detections_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(detections_msg, ImgDetectionsExtended) - src_w, src_h = detections_msg.transformation.getSize() + detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(detections_msg, dai.ImgDetections) + src_w, src_h = detections_msg.getTransformation().getSize() landmarks_msg_list: List[Keypoints] = gather_data_msg.gathered assert isinstance(landmarks_msg_list, list) diff --git a/neural-networks/face-detection/gaze-estimation/main.py b/neural-networks/face-detection/gaze-estimation/main.py index 6eb3719ca..285d681a7 100644 --- a/neural-networks/face-detection/gaze-estimation/main.py +++ b/neural-networks/face-detection/gaze-estimation/main.py @@ -137,9 +137,11 @@ gaze_estimation_node.inputs["head_pose_angles_yaw_pitch_roll"].setMaxSize(5) # detections and gaze estimations sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - gaze_estimation_node.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=gaze_estimation_node.out, + input_reference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/gaze-estimation/requirements.txt b/neural-networks/face-detection/gaze-estimation/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/face-detection/gaze-estimation/requirements.txt +++ b/neural-networks/face-detection/gaze-estimation/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py index 760d81454..3787f4dcb 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py +++ b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py @@ -1,7 +1,6 @@ from typing import List import depthai as dai -from depthai_nodes import ImgDetectionsExtended from depthai_nodes.utils import AnnotationHelper @@ -14,9 +13,9 @@ def build(self, gather_data_msg) -> "AnnotationNode": return self def process(self, gather_data_msg) -> None: - detections_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(detections_msg, ImgDetectionsExtended) - src_w, src_h = detections_msg.transformation.getSize() + detections_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(detections_msg, dai.ImgDetections) + src_w, src_h = detections_msg.getTransformation().getSize() gaze_msg_list: List[dai.NNData] = gather_data_msg.gathered assert isinstance(gaze_msg_list, list) @@ -26,8 +25,8 @@ def process(self, gather_data_msg) -> None: annotations = AnnotationHelper() for detection, gaze in zip(detections_msg.detections, gaze_msg_list): - face_bbox = detection.rotated_rect.getPoints() - keypoints = detection.keypoints + face_bbox = detection.getBoundingBox().getPoints() + keypoints = detection.getKeypoints2f() # Draw bbox annotations.draw_rectangle( diff --git a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py index 6c743ee2b..d790c84fe 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py +++ b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended class LandmarksProcessing(dai.node.ThreadedHostNode): @@ -27,9 +26,9 @@ def run(self) -> None: right_configs_message = dai.MessageGroup() face_configs_message = dai.MessageGroup() for i, detection in enumerate(detections): - detection: ImgDetectionExtended = detection - keypoints = detection.keypoints - face_size = detection.rotated_rect.size + detection: dai.ImgDetection = detection + keypoints = detection.getKeypoints2f() + face_size = detection.getBoundingBox().size face_w, face_h = face_size.width * self.w, face_size.height * self.h right_eye = self.crop_rectangle( @@ -46,7 +45,7 @@ def run(self) -> None: left_eye, img_detections ) - face_rect = detection.rotated_rect + face_rect = detection.getBoundingBox() face_rect = face_rect.denormalize(self.w, self.h) face_configs_message[str(i + 100)] = self.create_crop_cfg( face_rect, img_detections @@ -77,7 +76,7 @@ def crop_rectangle(self, center_keypoint: dai.Point2f, crop_w: int, crop_h: int) return croped_rectangle.denormalize(self.w, self.h) def create_crop_cfg( - self, rectangle: dai.RotatedRect, img_detections: ImgDetectionsExtended + self, rectangle: dai.RotatedRect, img_detections: dai.ImgDetections ): cfg = dai.ImageManipConfig() cfg.addCropRotatedRect(rectangle, normalizedCoords=False) diff --git a/neural-networks/face-detection/head-posture-detection/main.py b/neural-networks/face-detection/head-posture-detection/main.py index 727f9c8cf..973a0fa48 100644 --- a/neural-networks/face-detection/head-posture-detection/main.py +++ b/neural-networks/face-detection/head-posture-detection/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.annotation_node import AnnotationNode from utils.arguments import initialize_argparser @@ -74,34 +73,23 @@ ) det_nn.input.setBlocking(True) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=pose_model_w, - resize_height=pose_model_h, + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=input_node_out, + outputSize=(pose_model_w, pose_model_h), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(pose_model_w, pose_model_h) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, pose_model_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - pose_nn.outputs.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=pose_nn.outputs, + input_reference=det_nn.out, + ) # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) diff --git a/neural-networks/face-detection/head-posture-detection/requirements.txt b/neural-networks/face-detection/head-posture-detection/requirements.txt index 8e4027fc1..c2a0c7bb7 100644 --- a/neural-networks/face-detection/head-posture-detection/requirements.txt +++ b/neural-networks/face-detection/head-posture-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py index 20b6ff898..fd9477227 100644 --- a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py @@ -2,7 +2,7 @@ import numpy as np import depthai as dai -from depthai_nodes import ImgDetectionsExtended, Predictions +from depthai_nodes import Predictions from depthai_nodes.utils import AnnotationHelper @@ -19,21 +19,21 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_extended_msg: ImgDetectionsExtended = ( + img_detections_msg: dai.ImgDetections = ( gather_data_msg.reference_data ) - assert isinstance(img_detections_extended_msg, ImgDetectionsExtended) + assert isinstance(img_detections_msg, dai.ImgDetections) pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered assert isinstance(pose_msg_group_list, list) assert all(isinstance(msg, dai.MessageGroup) for msg in pose_msg_group_list) - assert len(img_detections_extended_msg.detections) == len(pose_msg_group_list) + assert len(img_detections_msg.detections) == len(pose_msg_group_list) annotations = AnnotationHelper() - for img_detection_extended_msg, pose_msg_group in zip( - img_detections_extended_msg.detections, pose_msg_group_list + for img_detection_msg, pose_msg_group in zip( + img_detections_msg.detections, pose_msg_group_list ): yaw_msg: Predictions = pose_msg_group["0"] assert isinstance(yaw_msg, Predictions) @@ -49,15 +49,15 @@ def process(self, gather_data_msg: dai.Buffer) -> None: pose_information = f"Pitch: {pitch:.0f} \nYaw: {yaw:.0f} \nRoll: {roll:.0f}" - outer_points = img_detection_extended_msg.rotated_rect.getOuterRect() + outer_points = img_detection_msg.getBoundingBox().getOuterRect() x_min, y_min, x_max, _ = [np.round(x, 2) for x in outer_points] annotations.draw_text(pose_information, (x_max, y_min + 0.1), size=16) annotations.draw_text(pose_text, (x_min, y_min), size=28) annotations_msg = annotations.build( - timestamp=img_detections_extended_msg.getTimestamp(), - sequence_num=img_detections_extended_msg.getSequenceNum(), + timestamp=img_detections_msg.getTimestamp(), + sequence_num=img_detections_msg.getSequenceNum(), ) self.out.send(annotations_msg) diff --git a/neural-networks/object-detection/human-machine-safety/main.py b/neural-networks/object-detection/human-machine-safety/main.py index ca818e59d..f2a4563eb 100644 --- a/neural-networks/object-detection/human-machine-safety/main.py +++ b/neural-networks/object-detection/human-machine-safety/main.py @@ -4,7 +4,6 @@ MPPalmDetectionParser, DepthMerger, ImgDetectionsFilter, - ImgDetectionsBridge, ) from utils.arguments import initialize_argparser @@ -113,10 +112,6 @@ parser: MPPalmDetectionParser = palm_det_nn.getParser(0) parser.setConfidenceThreshold(0.7) - adapter = pipeline.create(ImgDetectionsBridge).build( - palm_det_nn.out, ignore_angle=True - ) - detection_depth_merger = pipeline.create(DepthMerger).build( output_2d=obj_det_nn.out, output_depth=stereo.depth, @@ -125,7 +120,7 @@ shrinking_factor=0.1, ) palm_depth_merger = pipeline.create(DepthMerger).build( - output_2d=adapter.out, + output_2d=palm_det_nn.out, output_depth=stereo.depth, calib_data=device.readCalibration2(), depth_alignment_socket=dai.CameraBoardSocket.CAM_A, diff --git a/neural-networks/object-detection/human-machine-safety/requirements.txt b/neural-networks/object-detection/human-machine-safety/requirements.txt index 56b6f790b..6f5034885 100644 --- a/neural-networks/object-detection/human-machine-safety/requirements.txt +++ b/neural-networks/object-detection/human-machine-safety/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 diff --git a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py index 4a55d9d22..7aaf70cec 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py +++ b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py @@ -1,10 +1,9 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended import cv2 class AnnotationNode(dai.node.HostNode): - """Transforms ImgDetectionsExtended received from parsers to dai.ImgDetections""" + """Transforms received detections from parsers to dai.ImgDetections""" def __init__(self) -> None: super().__init__() @@ -28,19 +27,22 @@ def process( depth_msg: dai.ImgFrame, ): assert isinstance(detections_msg, dai.SpatialImgDetections) - img_detections = ImgDetectionsExtended() + img_detections = dai.ImgDetections() for detection in detections_msg.detections: detection: dai.SpatialImgDetection = detection - img_detection = ImgDetectionExtended() + img_detection = dai.ImgDetection() img_detection.label = detection.label - rotated_rect = ( - (detection.xmax + detection.xmin) / 2, - (detection.ymax + detection.ymin) / 2, - detection.xmax - detection.xmin, - detection.ymax - detection.ymin, + img_detection.setBoundingBox(dai.RotatedRect( + dai.Point2f( + (detection.xmax + detection.xmin) / 2, + (detection.ymax + detection.ymin) / 2, + ), + dai.Size2f( + detection.xmax - detection.xmin, + detection.ymax - detection.ymin, + ), 0, - ) - img_detection.rotated_rect = rotated_rect + )) img_detection.confidence = detection.confidence img_detections.detections.append(img_detection) diff --git a/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py b/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py index 371019ad6..7b7246711 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py +++ b/neural-networks/object-detection/human-machine-safety/utils/detection_merger.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended class DetectionMerger(dai.node.HostNode): @@ -22,7 +21,7 @@ def build(self, det_nn_1: dai.Node.Output, det_nn_2: dai.Node.Output): def process(self, det_nn_1: dai.Buffer, det_nn_2: dai.Buffer) -> dai.ImgDetections: assert isinstance( det_nn_1, - (dai.ImgDetections, ImgDetectionsExtended, dai.SpatialImgDetections), + (dai.ImgDetections, dai.SpatialImgDetections), ) assert type(det_nn_1) is type(det_nn_2) new_dets = type(det_nn_1)() diff --git a/neural-networks/object-detection/text-blur/requirements.txt b/neural-networks/object-detection/text-blur/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/object-detection/text-blur/requirements.txt +++ b/neural-networks/object-detection/text-blur/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/text-blur/utils/blur_detections.py b/neural-networks/object-detection/text-blur/utils/blur_detections.py index 2a888f52d..cde908b92 100644 --- a/neural-networks/object-detection/text-blur/utils/blur_detections.py +++ b/neural-networks/object-detection/text-blur/utils/blur_detections.py @@ -18,10 +18,11 @@ def run(self) -> None: frame = self.input_frame.get() frame_copy = frame.getCvFrame() detections = self.input_detections.get().detections + dai.ImgDetections h, w = frame_copy.shape[:2] for detection in detections: - rect: dai.RotatedRect = detection.rotated_rect + rect: dai.RotatedRect = detection.getBoundingBox() rect = rect.denormalize(w, h) detection = rect.getOuterRect() bbox = [int(d) for d in detection] @@ -30,7 +31,7 @@ def run(self) -> None: bbox[2] = np.clip(bbox[2], 0, w) bbox[3] = np.clip(bbox[3], 0, h) - roi = frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] + roi = frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] roi_width = bbox[2] - bbox[0] roi_height = bbox[3] - bbox[1] @@ -56,7 +57,7 @@ def run(self) -> None: original_background = cv2.bitwise_and(roi, roi, mask=inverse_mask) combined = cv2.add(blurred_ellipse, original_background) - frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] = combined + frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] = combined ts = frame.getTimestamp() frame_type = frame.getType() diff --git a/neural-networks/object-detection/yolo-p/requirements.txt b/neural-networks/object-detection/yolo-p/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/object-detection/yolo-p/requirements.txt +++ b/neural-networks/object-detection/yolo-p/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-p/utils/annotation_node.py b/neural-networks/object-detection/yolo-p/utils/annotation_node.py index 41ade20f8..7189d58d0 100644 --- a/neural-networks/object-detection/yolo-p/utils/annotation_node.py +++ b/neural-networks/object-detection/yolo-p/utils/annotation_node.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, SegmentationMask +from depthai_nodes import SegmentationMask import cv2 import numpy as np @@ -31,7 +31,7 @@ def process( lane_segmentations_message: dai.Buffer, ) -> None: assert isinstance(frame, dai.ImgFrame) - assert isinstance(detections_message, ImgDetectionsExtended) + assert isinstance(detections_message, dai.ImgDetections) assert isinstance(road_segmentations_message, SegmentationMask) assert isinstance(lane_segmentations_message, SegmentationMask) diff --git a/neural-networks/object-tracking/deepsort-tracking/main.py b/neural-networks/object-tracking/deepsort-tracking/main.py index b353c2ef2..5072728b9 100644 --- a/neural-networks/object-tracking/deepsort-tracking/main.py +++ b/neural-networks/object-tracking/deepsort-tracking/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.deepsort_tracking import DeepsortTracking @@ -79,30 +78,23 @@ ) # detection processing - script = pipeline.create(dai.node.Script) - det_nn.out.link(script.inputs["det_in"]) - det_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=embeddings_model_w, - resize_height=embeddings_model_h, - padding=0, + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=det_nn.passthrough, + outputSize=(embeddings_model_w, embeddings_model_h), ) - script.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize(embeddings_model_w, embeddings_model_h) - - script.outputs["manip_cfg"].link(crop_node.inputConfig) - script.outputs["manip_img"].link(crop_node.inputImage) embeddings_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, embeddings_model_nn_archive ) # detections and embeddings sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - det_nn.out.link(gather_data.input_reference) - embeddings_nn.out.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=embeddings_nn.out, + input_reference=det_nn.out, + ) # tracking deepsort_tracking = pipeline.create(DeepsortTracking).build( diff --git a/neural-networks/object-tracking/deepsort-tracking/requirements.txt b/neural-networks/object-tracking/deepsort-tracking/requirements.txt index e8f5a20a1..1886b4f90 100644 --- a/neural-networks/object-tracking/deepsort-tracking/requirements.txt +++ b/neural-networks/object-tracking/deepsort-tracking/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 -scipy \ No newline at end of file +scipy diff --git a/neural-networks/object-tracking/people-tracker/main.py b/neural-networks/object-tracking/people-tracker/main.py index 9c399ec20..ff15aa988 100644 --- a/neural-networks/object-tracking/people-tracker/main.py +++ b/neural-networks/object-tracking/people-tracker/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsBridge +from depthai_nodes.node import ParsingNeuralNetwork from utils.arguments import initialize_argparser from utils.people_counter import PeopleCounter @@ -48,8 +48,6 @@ ) # tracking - bridge = pipeline.create(ImgDetectionsBridge).build(nn.out, ignore_angle=True) - tracker = pipeline.create(dai.node.ObjectTracker) tracker.setDetectionLabelsToTrack([0]) if platform == "RVC2": @@ -60,7 +58,7 @@ tracker.setTrackerThreshold(0.4) nn.passthrough.link(tracker.inputTrackerFrame) nn.passthrough.link(tracker.inputDetectionFrame) - bridge.out.link(tracker.inputDetections) + nn.out.link(tracker.inputDetections) # annotation tracklet_visualizer = pipeline.create(TrackletVisualizer).build( diff --git a/neural-networks/ocr/general-ocr/main.py b/neural-networks/ocr/general-ocr/main.py index 67ad009a7..03f6f8cdd 100644 --- a/neural-networks/ocr/general-ocr/main.py +++ b/neural-networks/ocr/general-ocr/main.py @@ -96,9 +96,11 @@ ocr_nn.input.setMaxSize(30) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - detection_process_node.detections_output.link(gather_data_node.input_reference) - ocr_nn.out.link(gather_data_node.input_data) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=ocr_nn.out, + input_reference=detection_process_node.detections_output, + ) # annotation annotation_node = pipeline.create(OCRAnnotationNode) diff --git a/neural-networks/ocr/general-ocr/requirements.txt b/neural-networks/ocr/general-ocr/requirements.txt index cf3ffa17a..91e851566 100644 --- a/neural-networks/ocr/general-ocr/requirements.txt +++ b/neural-networks/ocr/general-ocr/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless==4.10.0.84 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/utils/annotation_node.py b/neural-networks/ocr/general-ocr/utils/annotation_node.py index 3003a46cb..e84f3ec6e 100644 --- a/neural-networks/ocr/general-ocr/utils/annotation_node.py +++ b/neural-networks/ocr/general-ocr/utils/annotation_node.py @@ -27,7 +27,7 @@ def run(self): for i, recognition in enumerate(recognitions_list): detection = detections_list[i] - points = detection.rotated_rect.getPoints() + points = detection.getBoundingBox().getPoints() text_line = "" for text, score in zip(recognition.classes, recognition.scores): diff --git a/neural-networks/ocr/general-ocr/utils/host_process_detections.py b/neural-networks/ocr/general-ocr/utils/host_process_detections.py index 0e2d1f371..b5bac1506 100644 --- a/neural-networks/ocr/general-ocr/utils/host_process_detections.py +++ b/neural-networks/ocr/general-ocr/utils/host_process_detections.py @@ -2,8 +2,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended - class CropConfigsCreator(dai.node.HostNode): """A node to create and send a dai.ImageManipConfig crop configuration for each @@ -16,11 +14,11 @@ class CropConfigsCreator(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended | dai.ImgDetections message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfig messages. detections_output : dai.Output - The output link for the ImgDetectionsExtended message. + The output link for the dai.ImgDetections message. source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] = None @@ -96,7 +94,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -120,25 +118,20 @@ def build( def process(self, detections_input: dai.Buffer) -> None: """Process the input detections and create crop configurations. This function is - ran every time a new ImgDetectionsExtended or dai.ImgDetections message is + ran every time a new dai.ImgDetections message is received. Sends len(detections) number of crop configurations to the config_output link. - In addition sends an ImgDetectionsExtended object containing the corresponding + In addition sends a dai.ImgDetections object containing the corresponding detections to the detections_output link. """ - assert isinstance(detections_input, (ImgDetectionsExtended, dai.ImgDetections)) + assert isinstance(detections_input, dai.ImgDetections) sequence_num = detections_input.getSequenceNum() timestamp = detections_input.getTimestamp() - if isinstance(detections_input, dai.ImgDetections): - detections_msg = self._convert_to_extended(detections_input) - else: - detections_msg = detections_input - - detections = detections_msg.detections + detections = detections_input.detections # Skip the current frame / load new frame cfg = dai.ImageManipConfig() @@ -151,7 +144,7 @@ def process(self, detections_input: dai.Buffer) -> None: valid_detections = [] for detection in detections: if detection.confidence > 0.8: - rect = detection.rotated_rect + rect = detection.getBoundingBox() rect = self._expand_rect(rect) xmin, ymin, xmax, ymax = rect.getOuterRect() @@ -179,11 +172,11 @@ def process(self, detections_input: dai.Buffer) -> None: while not send_status: send_status = self.config_output.trySend(cfg) - valid_msg = ImgDetectionsExtended() + valid_msg = dai.ImgDetections() valid_msg.setSequenceNum(sequence_num) valid_msg.setTimestamp(timestamp) valid_msg.detections = valid_detections - valid_msg.setTransformation(detections_msg.getTransformation()) + valid_msg.setTransformation(detections_input.getTransformation()) self.detections_output.send(valid_msg) @@ -193,37 +186,3 @@ def _expand_rect(self, rect: dai.RotatedRect) -> dai.RotatedRect: rect.size = dai.Size2f(s.width * 1.03, s.height * 1.10) return rect - - def _convert_to_extended( - self, detections: dai.ImgDetections - ) -> ImgDetectionsExtended: - rotated_rectangle_detections = [] - for det in detections.detections: - img_detection = ImgDetectionExtended() - img_detection.label = det.label - img_detection.confidence = det.confidence - - x_center = (det.xmin + det.xmax) / 2 - y_center = (det.ymin + det.ymax) / 2 - width = det.xmax - det.xmin - height = det.ymax - det.ymin - - img_detection.rotated_rect = (x_center, y_center, width, height, 0.0) - - rotated_rectangle_detections.append(img_detection) - - img_detections_extended = ImgDetectionsExtended() - img_detections_extended.setSequenceNum(detections.getSequenceNum()) - img_detections_extended.setTimestamp(detections.getTimestamp()) - img_detections_extended.detections = rotated_rectangle_detections - transformation = detections.getTransformation() - if transformation is not None: - img_detections_extended.setTransformation(transformation) - - return img_detections_extended - - def _validate_positive_integer(self, value: int): - if not isinstance(value, int): - raise TypeError("Value must be an integer.") - if value < 1: - raise ValueError("Value must be greater than 1.") diff --git a/neural-networks/pose-estimation/animal-pose/main.py b/neural-networks/pose-estimation/animal-pose/main.py index 43b679b89..89a4dbd78 100644 --- a/neural-networks/pose-estimation/animal-pose/main.py +++ b/neural-networks/pose-estimation/animal-pose/main.py @@ -3,11 +3,10 @@ import depthai as dai from depthai_nodes.node import ( ParsingNeuralNetwork, - ImgDetectionsBridge, ImgDetectionsFilter, GatherData, + FrameCropper, ) -from depthai_nodes.node.utils import generate_script_content from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -67,36 +66,25 @@ ) # detection processing - script = pipeline.create(dai.node.Script) - detections_filter.out.link(script.inputs["det_in"]) - detection_nn.passthrough.link(script.inputs["preview"]) - script_content = generate_script_content( - resize_width=pose_model_w, - resize_height=pose_model_h, + pose_manip = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=detections_filter.out, padding=PADDING, - resize_mode="STRETCH", + ).build( + inputImage=detection_nn.passthrough, + outputSize=(pose_model_w, pose_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, ) - script.setScript(script_content) - - pose_manip = pipeline.create(dai.node.ImageManip) - pose_manip.initialConfig.setOutputSize(pose_model_w, pose_model_h) - pose_manip.inputConfig.setWaitForMessage(True) - - script.outputs["manip_cfg"].link(pose_manip.inputConfig) - script.outputs["manip_img"].link(pose_manip.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( pose_manip.out, pose_nn_archive ) - detections_bridge = pipeline.create(ImgDetectionsBridge).build( - detections_filter.out - ) - # detections and pose estimations sync - gather_data = pipeline.create(GatherData).build(args.fps_limit) - detections_bridge.out.link(gather_data.input_reference) - pose_nn.out.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=pose_nn.out, + input_reference=detections_filter.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/pose-estimation/animal-pose/requirements.txt b/neural-networks/pose-estimation/animal-pose/requirements.txt index e561ab514..c2a0c7bb7 100644 --- a/neural-networks/pose-estimation/animal-pose/requirements.txt +++ b/neural-networks/pose-estimation/animal-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 \ No newline at end of file +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py index bfd82ee63..eb5722e82 100644 --- a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py @@ -1,7 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, - ImgDetectionExtended, Keypoints, GatheredData, PRIMARY_COLOR, @@ -38,21 +36,21 @@ def build( def process(self, gathered_data: dai.Buffer) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data + detections_message: dai.ImgDetections = gathered_data.reference_data - detections_list: List[ImgDetectionExtended] = detections_message.detections + detections_list: List[dai.ImgDetection] = detections_message.detections annotation_helper = AnnotationHelper() padding = self.padding for ix, detection in enumerate(detections_list): - detection.label_name = ( - "Animal" # Because dai.ImgDetection does not have label_name + detection.labelName = ( + "Animal" ) keypoints_message: Keypoints = gathered_data.gathered[ix] - xmin, ymin, xmax, ymax = detection.rotated_rect.getOuterRect() + xmin, ymin, xmax, ymax = detection.getBoundingBox().getOuterRect() slope_x = (xmax + padding) - (xmin - padding) slope_y = (ymax + padding) - (ymin - padding) diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index 82d5489c0..216e44b3c 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -106,9 +106,11 @@ ) # detections and pose estimations sync - gather_data = pipeline.create(GatherData).build(camera_fps=args.fps_limit) - detection_nn.out.link(gather_data.input_reference) - pose_nn.outputs.link(gather_data.input_data) + gather_data = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=pose_nn.outputs, + input_reference=detection_nn.out, + ) # annotation connection_pairs = ( diff --git a/neural-networks/pose-estimation/hand-pose/requirements.txt b/neural-networks/pose-estimation/hand-pose/requirements.txt index 56b6f790b..6f5034885 100644 --- a/neural-networks/pose-estimation/hand-pose/requirements.txt +++ b/neural-networks/pose-estimation/hand-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 diff --git a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py index 18efc36f1..23a2d4bf5 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py @@ -1,7 +1,5 @@ import depthai as dai from depthai_nodes import ( - ImgDetectionsExtended, - ImgDetectionExtended, Keypoints, Predictions, GatheredData, @@ -43,11 +41,11 @@ def build( def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> None: assert isinstance(gathered_data, GatheredData) - detections_message: ImgDetectionsExtended = gathered_data.reference_data - detections_list: List[ImgDetectionExtended] = detections_message.detections + detections_message: dai.ImgDetections = gathered_data.reference_data + detections_list: List[dai.ImgDetection] = detections_message.detections - new_dets = ImgDetectionsExtended() - new_dets.transformation = video_message.getTransformation() + new_dets = dai.ImgDetections() + new_dets.setTransformation(video_message.getTransformation()) annotation_helper = AnnotationHelper() @@ -62,29 +60,28 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non if hand_confidence < self.confidence_threshold: continue - width = detection.rotated_rect.size.width - height = detection.rotated_rect.size.height + width = detection.getBoundingBox().size.width + height = detection.getBoundingBox().size.height - xmin = detection.rotated_rect.center.x - width / 2 - xmax = detection.rotated_rect.center.x + width / 2 - ymin = detection.rotated_rect.center.y - height / 2 - ymax = detection.rotated_rect.center.y + height / 2 + xmin = detection.getBoundingBox().center.x - width / 2 + xmax = detection.getBoundingBox().center.x + width / 2 + ymin = detection.getBoundingBox().center.y - height / 2 + ymax = detection.getBoundingBox().center.y + height / 2 padding = self.padding_factor slope_x = (xmax + padding) - (xmin - padding) slope_y = (ymax + padding) - (ymin - padding) - new_det = ImgDetectionExtended() - new_det.rotated_rect = ( - detection.rotated_rect.center.x, - detection.rotated_rect.center.y, - detection.rotated_rect.size.width + 2 * padding, - detection.rotated_rect.size.height + 2 * padding, - detection.rotated_rect.angle, - ) + new_det = dai.ImgDetection() + rotated_rect = detection.getBoundingBox() + new_det.setBoundingBox(dai.RotatedRect( + rotated_rect.center, + dai.Size2f(rotated_rect.size.width + 2 * padding, rotated_rect.size.height + 2 * padding), + rotated_rect.angle, + )) new_det.label = 0 - new_det.label_name = "Hand" + new_det.labelName = "Hand" new_det.confidence = detection.confidence new_dets.detections.append(new_det) @@ -111,8 +108,8 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non text = "Left" if handness < 0.5 else "Right" text += f" {gesture}" - text_x = detection.rotated_rect.center.x - 0.05 - text_y = detection.rotated_rect.center.y - height / 2 - 0.10 + text_x = detection.getBoundingBox().center.x - 0.05 + text_y = detection.getBoundingBox().center.y - height / 2 - 0.10 annotation_helper.draw_text( text=text, diff --git a/neural-networks/pose-estimation/hand-pose/utils/process.py b/neural-networks/pose-estimation/hand-pose/utils/process.py index 9db495bf2..0e8623e95 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/process.py +++ b/neural-networks/pose-estimation/hand-pose/utils/process.py @@ -1,5 +1,4 @@ import depthai as dai -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended from typing import Tuple @@ -44,7 +43,7 @@ def build( return self def process(self, img_detections: dai.Buffer) -> None: - assert isinstance(img_detections, ImgDetectionsExtended) + assert isinstance(img_detections, dai.ImgDetections) detections = img_detections.detections num_detections = len(detections) @@ -56,8 +55,8 @@ def process(self, img_detections: dai.Buffer) -> None: for i, detection in enumerate(detections): cfg = dai.ImageManipConfig() - detection: ImgDetectionExtended = detection - rect = detection.rotated_rect + detection: dai.ImgDetection = detection + rect = detection.getBoundingBox() new_rect = dai.RotatedRect() new_rect.center.x = rect.center.x diff --git a/neural-networks/pose-estimation/human-pose/main.py b/neural-networks/pose-estimation/human-pose/main.py index fb3458fc1..a7a5f7aa6 100644 --- a/neural-networks/pose-estimation/human-pose/main.py +++ b/neural-networks/pose-estimation/human-pose/main.py @@ -6,8 +6,8 @@ HRNetParser, GatherData, ImgDetectionsFilter, + FrameCropper, ) -from depthai_nodes.node.utils import generate_script_content from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -72,24 +72,13 @@ det_nn.out, labels_to_keep=valid_labels ) # we only want to work with person detections - script_node = pipeline.create(dai.node.Script) - det_nn.out.link(script_node.inputs["det_in"]) - det_nn.passthrough.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_model_nn_archive.getInputWidth(), - resize_height=rec_model_nn_archive.getInputHeight(), + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, padding=PADDING, + ).build( + inputImage=det_nn.passthrough, + outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight() - ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive @@ -102,9 +91,11 @@ ) # to get all keypoints so we can draw skeleton. We will filter them later. # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - detections_filter.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=rec_nn.out, + input_reference=detections_filter.out, + ) # annotation skeleton_edges = ( diff --git a/neural-networks/pose-estimation/human-pose/requirements.txt b/neural-networks/pose-estimation/human-pose/requirements.txt index e561ab514..dfe76c078 100644 --- a/neural-networks/pose-estimation/human-pose/requirements.txt +++ b/neural-networks/pose-estimation/human-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.5 \ No newline at end of file +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/reidentification/human-reidentification/main.py b/neural-networks/reidentification/human-reidentification/main.py index cd9bc3332..6e94fdcbf 100644 --- a/neural-networks/reidentification/human-reidentification/main.py +++ b/neural-networks/reidentification/human-reidentification/main.py @@ -1,8 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, ImgDetectionsBridge -from depthai_nodes.node.utils import generate_script_content +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.identification import IdentificationNode @@ -91,36 +90,23 @@ resize_node.out, det_model_nn_archive ) - # detection processing - det_bridge = pipeline.create(ImgDetectionsBridge).build( - det_nn.out - ) # TODO: remove once we have it working with ImgDetectionsExtended - script_node = pipeline.create(dai.node.Script) - det_bridge.out.link(script_node.inputs["det_in"]) - input_node_out.link(script_node.inputs["preview"]) - script_content = generate_script_content( - resize_width=rec_nn_archive.getInputWidth(), - resize_height=rec_nn_archive.getInputHeight(), + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=det_nn.out, + ).build( + inputImage=input_node_out, + outputSize=(rec_nn_archive.getInputWidth(), rec_nn_archive.getInputHeight()), ) - script_node.setScript(script_content) - - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setOutputSize( - rec_nn_archive.getInputWidth(), rec_nn_archive.getInputHeight() - ) - crop_node.inputConfig.setWaitForMessage(True) - - script_node.outputs["manip_cfg"].link(crop_node.inputConfig) - script_node.outputs["manip_img"].link(crop_node.inputImage) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_nn_archive ) # detections and recognitions sync - gather_data_node = pipeline.create(GatherData).build(args.fps_limit) - rec_nn.out.link(gather_data_node.input_data) - det_nn.out.link(gather_data_node.input_reference) + gather_data_node = pipeline.create(GatherData).build( + camera_fps=args.fps_limit, + input_data=rec_nn.out, + input_reference=det_nn.out, + ) # idenfication id_node = pipeline.create(IdentificationNode).build(gather_data_node.out, csim=CSIM) diff --git a/neural-networks/reidentification/human-reidentification/requirements.txt b/neural-networks/reidentification/human-reidentification/requirements.txt index 4013f7f77..7368ad5e2 100644 --- a/neural-networks/reidentification/human-reidentification/requirements.txt +++ b/neural-networks/reidentification/human-reidentification/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 diff --git a/neural-networks/reidentification/human-reidentification/utils/identification.py b/neural-networks/reidentification/human-reidentification/utils/identification.py index 9c943f44f..dbc622b34 100644 --- a/neural-networks/reidentification/human-reidentification/utils/identification.py +++ b/neural-networks/reidentification/human-reidentification/utils/identification.py @@ -1,8 +1,6 @@ import numpy as np import depthai as dai -from depthai_nodes import ImgDetectionsExtended - class IdentificationNode(dai.node.HostNode): """A host node that re-identifies objects based on their embeddings similarity to a database of embeddings. @@ -52,15 +50,15 @@ def build( return self def process(self, gather_data_msg) -> None: - dets_msg: ImgDetectionsExtended = gather_data_msg.reference_data - assert isinstance(dets_msg, ImgDetectionsExtended) + dets_msg: dai.ImgDetections = gather_data_msg.reference_data + assert isinstance(dets_msg, dai.ImgDetections) rec_msg_list = gather_data_msg.gathered assert isinstance(rec_msg_list, list) assert all(isinstance(msg, dai.NNData) for msg in rec_msg_list) for detection, rec in zip(dets_msg.detections, rec_msg_list): - detection.label_name = self._get_label_name(rec, self._label_basename) + detection.labelName = self._get_label_name(rec, self._label_basename) self.out.send(dets_msg) diff --git a/streaming/webrtc-streaming/utils/transform.py b/streaming/webrtc-streaming/utils/transform.py index d6267c6ce..c1af2720e 100644 --- a/streaming/webrtc-streaming/utils/transform.py +++ b/streaming/webrtc-streaming/utils/transform.py @@ -4,7 +4,6 @@ from aiortc import VideoStreamTrack from av import VideoFrame from depthai_nodes.node import ParsingNeuralNetwork -from depthai_nodes import ImgDetectionExtended class VideoTransform(VideoStreamTrack): @@ -44,9 +43,7 @@ async def parse_frame(self): frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for detection in dets: - if isinstance(detection, ImgDetectionExtended): - bbox = frameNorm(frame, detection.rotated_rect.getOuterRect()) - elif isinstance(detection, dai.ImgDetection): + if isinstance(detection, dai.ImgDetection): bbox = frameNorm( frame, ( diff --git a/tests/constants.py b/tests/constants.py index f0f169583..71dd4de6f 100644 --- a/tests/constants.py +++ b/tests/constants.py @@ -171,6 +171,5 @@ "Network compiled for 8 shaves, maximum available", "UserWarning: Specified provider 'TensorrtExecutionProvider' is not in available", "UserWarning: Specified provider 'CUDAExecutionProvider' is not in available", - "You are using ImgDetectionsBridge to transform from ImgDetectionsExtended to ImgDetections.", "Sync node has been trying to sync for", ] From f5642367f04b6d169033180ec9ebfc088b19014b Mon Sep 17 00:00:00 2001 From: bblazeva Date: Sat, 21 Feb 2026 14:15:49 +0100 Subject: [PATCH 03/14] ImgDetectionsFilter update --- integrations/hub-snaps-events/main.py | 8 +++----- neural-networks/3D-detection/objectron/main.py | 12 ++++-------- neural-networks/counting/people-counter/main.py | 6 +++--- .../face-detection/face-mask-detection/main.py | 2 +- .../object-detection/human-machine-safety/main.py | 5 ++--- neural-networks/object-detection/yolo-world/main.py | 5 ++--- .../object-tracking/collision-avoidance/main.py | 5 ++--- neural-networks/pose-estimation/animal-pose/main.py | 5 ++--- neural-networks/pose-estimation/human-pose/main.py | 5 ++--- 9 files changed, 21 insertions(+), 32 deletions(-) diff --git a/integrations/hub-snaps-events/main.py b/integrations/hub-snaps-events/main.py index ba63bc019..68712ee06 100644 --- a/integrations/hub-snaps-events/main.py +++ b/integrations/hub-snaps-events/main.py @@ -69,11 +69,9 @@ except ValueError: print(f"Class `{curr_class}` not predicted by the model, skipping.") - det_process_filter = pipeline.create(ImgDetectionsFilter).build( - nn_with_parser.out, - labels_to_keep=labels_to_keep, - confidence_threshold=args.confidence_threshold, - ) + det_process_filter = pipeline.create(ImgDetectionsFilter).build(nn_with_parser.out) + det_process_filter.keepLabels(labels_to_keep) + det_process_filter.minConfidence(args.confidence_threshold) snaps_producer = pipeline.create(SnapsProducer).build( frame=nn_with_parser.passthrough, diff --git a/neural-networks/3D-detection/objectron/main.py b/neural-networks/3D-detection/objectron/main.py index deaaafc02..56ca9c6ba 100644 --- a/neural-networks/3D-detection/objectron/main.py +++ b/neural-networks/3D-detection/objectron/main.py @@ -55,10 +55,8 @@ input_node, det_model_description, args.fps_limit ) - first_stage_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, - labels_to_keep=VALID_LABELS, - ) + first_stage_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + first_stage_filter.keepLabels(VALID_LABELS) # detection processing crop_node = pipeline.create(FrameCropper).fromImgDetections( @@ -74,10 +72,8 @@ crop_node.out, pos_nn_archive ) - detections_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, - labels_to_keep=VALID_LABELS, - ) + detections_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + detections_filter.keepLabels(VALID_LABELS) # detections and position estimations sync gather_data = pipeline.create(GatherData).build( diff --git a/neural-networks/counting/people-counter/main.py b/neural-networks/counting/people-counter/main.py index 501cc52aa..16a9987df 100644 --- a/neural-networks/counting/people-counter/main.py +++ b/neural-networks/counting/people-counter/main.py @@ -51,9 +51,9 @@ # person detection filter classes = det_model_nn_archive.getConfig().model.heads[0].metadata.classes labels_to_keep = [classes.index("person")] if "person" in classes else [] - det_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, labels_to_keep=labels_to_keep, confidence_threshold=0.5 - ) + det_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + det_filter.keepLabels(labels_to_keep) + det_filter.minConfidence(0.5) # annotation annotation_node = pipeline.create(AnnotationNode).build(det_filter.out) diff --git a/neural-networks/face-detection/face-mask-detection/main.py b/neural-networks/face-detection/face-mask-detection/main.py index 3d37de8c2..dff669ce4 100644 --- a/neural-networks/face-detection/face-mask-detection/main.py +++ b/neural-networks/face-detection/face-mask-detection/main.py @@ -56,7 +56,7 @@ # filter and rename detection labels det_process_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) - det_process_filter.setLabels(list(LABEL_ENCODING.keys()), keep=True) + det_process_filter.keepLabels(list(LABEL_ENCODING.keys())) # visualization visualizer.addTopic("Video", det_nn.passthrough, "images") diff --git a/neural-networks/object-detection/human-machine-safety/main.py b/neural-networks/object-detection/human-machine-safety/main.py index f2a4563eb..4a5b0badc 100644 --- a/neural-networks/object-detection/human-machine-safety/main.py +++ b/neural-networks/object-detection/human-machine-safety/main.py @@ -137,9 +137,8 @@ merged_labels = classes + ["palm"] filter_labels = [merged_labels.index(i) for i in DANGEROUS_OBJECTS] filter_labels.append(merged_labels.index("palm")) - detection_filter = pipeline.create(ImgDetectionsFilter).build( - merge_detections.output, labels_to_keep=filter_labels - ) + detection_filter = pipeline.create(ImgDetectionsFilter).build(merge_detections.output) + detection_filter.keepLabels(filter_labels) # annotation measure_object_distance = pipeline.create(MeasureObjectDistance).build( diff --git a/neural-networks/object-detection/yolo-world/main.py b/neural-networks/object-detection/yolo-world/main.py index 7eb41a5a9..77168fae7 100644 --- a/neural-networks/object-detection/yolo-world/main.py +++ b/neural-networks/object-detection/yolo-world/main.py @@ -82,9 +82,8 @@ # filter and rename detection labels det_process_filter = pipeline.create(ImgDetectionsFilter).build(nn_with_parser.out) - det_process_filter.setLabels( - labels=[i for i in range(len(args.class_names))], keep=True - ) + det_process_filter.keepLabels([i for i in range(len(args.class_names))]) + annotation_node = pipeline.create(AnnotationNode).build( det_process_filter.out, label_encoding={k: v for k, v in enumerate(args.class_names)}, diff --git a/neural-networks/object-tracking/collision-avoidance/main.py b/neural-networks/object-tracking/collision-avoidance/main.py index b3b0b99f2..aac30a2df 100644 --- a/neural-networks/object-tracking/collision-avoidance/main.py +++ b/neural-networks/object-tracking/collision-avoidance/main.py @@ -66,9 +66,8 @@ nn_archive, numShaves=6 ) # TODO: change to numShaves=4 if running on OAK-D Lite - img_detections_filter = pipeline.create(ImgDetectionsFilter).build( - nn.out, labels_to_keep=[person_label] - ) + img_detections_filter = pipeline.create(ImgDetectionsFilter).build(nn.out) + img_detections_filter.keepLabels([person_label]) # tracking tracker = pipeline.create(dai.node.ObjectTracker) diff --git a/neural-networks/pose-estimation/animal-pose/main.py b/neural-networks/pose-estimation/animal-pose/main.py index 89a4dbd78..fa3e3583c 100644 --- a/neural-networks/pose-estimation/animal-pose/main.py +++ b/neural-networks/pose-estimation/animal-pose/main.py @@ -61,9 +61,8 @@ input_node, det_nn_archive, fps=args.fps_limit ) - detections_filter = pipeline.create(ImgDetectionsFilter).build( - detection_nn.out, labels_to_keep=VALID_LABELS - ) + detections_filter = pipeline.create(ImgDetectionsFilter).build(detection_nn.out) + detections_filter.keepLabels(VALID_LABELS) # detection processing pose_manip = pipeline.create(FrameCropper).fromImgDetections( diff --git a/neural-networks/pose-estimation/human-pose/main.py b/neural-networks/pose-estimation/human-pose/main.py index a7a5f7aa6..c9566d0ea 100644 --- a/neural-networks/pose-estimation/human-pose/main.py +++ b/neural-networks/pose-estimation/human-pose/main.py @@ -68,9 +68,8 @@ valid_labels = [ det_model_nn_archive.getConfig().model.heads[0].metadata.classes.index("person") ] - detections_filter = pipeline.create(ImgDetectionsFilter).build( - det_nn.out, labels_to_keep=valid_labels - ) # we only want to work with person detections + detections_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) + detections_filter.keepLabels(valid_labels) # we only want to work with person detections crop_node = pipeline.create(FrameCropper).fromImgDetections( inputImgDetections=det_nn.out, From c76e8580fa8f0095ce5b3cf2baee12e767ca53e9 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Sat, 21 Feb 2026 14:42:08 +0100 Subject: [PATCH 04/14] drawing dai.ImgDetections fix --- .../object-detection/human-machine-safety/requirements.txt | 2 +- .../human-machine-safety/utils/annotation_node.py | 5 ++++- neural-networks/object-detection/yolo-world/requirements.txt | 3 +-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/neural-networks/object-detection/human-machine-safety/requirements.txt b/neural-networks/object-detection/human-machine-safety/requirements.txt index 6f5034885..c2a0c7bb7 100644 --- a/neural-networks/object-detection/human-machine-safety/requirements.txt +++ b/neural-networks/object-detection/human-machine-safety/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py index 7aaf70cec..45af34d27 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py +++ b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py @@ -28,10 +28,12 @@ def process( ): assert isinstance(detections_msg, dai.SpatialImgDetections) img_detections = dai.ImgDetections() + det_list = [] for detection in detections_msg.detections: detection: dai.SpatialImgDetection = detection img_detection = dai.ImgDetection() img_detection.label = detection.label + img_detection.labelName = detection.labelName img_detection.setBoundingBox(dai.RotatedRect( dai.Point2f( (detection.xmax + detection.xmin) / 2, @@ -44,7 +46,8 @@ def process( 0, )) img_detection.confidence = detection.confidence - img_detections.detections.append(img_detection) + det_list.append(img_detection) + img_detections.detections = det_list depth_map = depth_msg.getFrame() colorred_depth_map = cv2.applyColorMap( diff --git a/neural-networks/object-detection/yolo-world/requirements.txt b/neural-networks/object-detection/yolo-world/requirements.txt index 7be5707f8..3b056a9d1 100644 --- a/neural-networks/object-detection/yolo-world/requirements.txt +++ b/neural-networks/object-detection/yolo-world/requirements.txt @@ -1,6 +1,5 @@ depthai==3.3.0 -depthai-nodes==0.4.0 -opencv-python-headless~=4.10.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_updateopencv-python-headless~=4.10.0 numpy>=1.22 onnxruntime # onnxruntime-gpu # if you want to use CUDAExecutionProvider From 2478e310c12120d0333ba4e53dc29a8065f40276 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Sat, 21 Feb 2026 16:18:24 +0100 Subject: [PATCH 05/14] dai.ImgDetections display gix --- .../objectron/utils/annotation_node.py | 2 +- .../face-mask-detection/requirements.txt | 2 +- .../pose-estimation/hand-pose/main.py | 36 ++------- .../hand-pose/requirements.txt | 2 +- .../hand-pose/utils/annotation_node.py | 4 +- .../hand-pose/utils/process.py | 77 ------------------- 6 files changed, 14 insertions(+), 109 deletions(-) delete mode 100644 neural-networks/pose-estimation/hand-pose/utils/process.py diff --git a/neural-networks/3D-detection/objectron/utils/annotation_node.py b/neural-networks/3D-detection/objectron/utils/annotation_node.py index edb4e9aa8..9d585c475 100644 --- a/neural-networks/3D-detection/objectron/utils/annotation_node.py +++ b/neural-networks/3D-detection/objectron/utils/annotation_node.py @@ -73,7 +73,7 @@ def process(self, gathered_data: dai.Buffer) -> None: ) annotation_helper.draw_text( - text=f"{(detection.confidence * 100):.2f}%", + text=f"{int(detection.confidence * 100)}%", position=(detection.xmin, detection.ymin - 0.05), color=SECONDARY_COLOR, size=16.0, diff --git a/neural-networks/face-detection/face-mask-detection/requirements.txt b/neural-networks/face-detection/face-mask-detection/requirements.txt index 218b6f03b..dfe76c078 100644 --- a/neural-networks/face-detection/face-mask-detection/requirements.txt +++ b/neural-networks/face-detection/face-mask-detection/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 \ No newline at end of file +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index 216e44b3c..dd4373804 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -1,11 +1,10 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode -from utils.process import ProcessDetections _, args = initialize_argparser() @@ -73,36 +72,17 @@ ) # detection processing - detections_processor = pipeline.create(ProcessDetections).build( - detections_input=detection_nn.out, + crop_node = pipeline.create(FrameCropper).fromImgDetections( + inputImgDetections=detection_nn.out, padding=PADDING, - target_size=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), + ).build( + inputImage=detection_nn.passthrough, + outputSize=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, ) - script = pipeline.create(dai.node.Script) - script.setScriptPath(str(Path(__file__).parent / "utils/script.py")) - script.inputs["frame_input"].setMaxSize(30) - script.inputs["config_input"].setMaxSize(30) - script.inputs["num_configs_input"].setMaxSize(30) - - detection_nn.passthrough.link(script.inputs["frame_input"]) - detections_processor.config_output.link(script.inputs["config_input"]) - detections_processor.num_configs_output.link(script.inputs["num_configs_input"]) - - pose_manip = pipeline.create(dai.node.ImageManip) - pose_manip.initialConfig.setOutputSize( - pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight() - ) - pose_manip.inputConfig.setMaxSize(30) - pose_manip.inputImage.setMaxSize(30) - pose_manip.setNumFramesPool(30) - pose_manip.inputConfig.setWaitForMessage(True) - - script.outputs["output_config"].link(pose_manip.inputConfig) - script.outputs["output_frame"].link(pose_manip.inputImage) - pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( - pose_manip.out, pose_nn_archive + crop_node.out, pose_nn_archive ) # detections and pose estimations sync diff --git a/neural-networks/pose-estimation/hand-pose/requirements.txt b/neural-networks/pose-estimation/hand-pose/requirements.txt index 6f5034885..dfe76c078 100644 --- a/neural-networks/pose-estimation/hand-pose/requirements.txt +++ b/neural-networks/pose-estimation/hand-pose/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py index 23a2d4bf5..6cc4c14ff 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py @@ -48,6 +48,7 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non new_dets.setTransformation(video_message.getTransformation()) annotation_helper = AnnotationHelper() + det_list = [] for ix, detection in enumerate(detections_list): keypoints_msg: Keypoints = gathered_data.gathered[ix]["0"] @@ -83,7 +84,7 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non new_det.label = 0 new_det.labelName = "Hand" new_det.confidence = detection.confidence - new_dets.detections.append(new_det) + det_list.append(new_det) xs = [] ys = [] @@ -122,6 +123,7 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non points=keypoints, color=SECONDARY_COLOR, thickness=2 ) + new_dets.detections = det_list new_dets.setTimestamp(detections_message.getTimestamp()) new_dets.setSequenceNum(detections_message.getSequenceNum()) self.out_detections.send(new_dets) diff --git a/neural-networks/pose-estimation/hand-pose/utils/process.py b/neural-networks/pose-estimation/hand-pose/utils/process.py deleted file mode 100644 index 0e8623e95..000000000 --- a/neural-networks/pose-estimation/hand-pose/utils/process.py +++ /dev/null @@ -1,77 +0,0 @@ -import depthai as dai -from typing import Tuple - - -class ProcessDetections(dai.node.HostNode): - """A host node for processing a list of detections in a two-stage pipeline. - The node iterates over a list of detections and sends a dai.MessageGroup with - a list of ImageManipConfig objects that can be executed by the ImageManip node. - - Before use, the target size need to be set with the set_target_size method. - Attributes - ---------- - detections_input : dai.Input - The input message for the detections. - config_output : dai.Output - The output message for the ImageManipConfig objects. - num_configs_output : dai.Output - The output message for the number of configs. - padding: float - The padding factor to enlarge the bounding box a little bit. - - """ - - def __init__(self): - super().__init__() - self.detections_input = self.createInput() - self.config_output = self.createOutput() - self.num_configs_output = self.createOutput() - self.padding = 0.1 - self._target_h = None - self._target_w = None - - def build( - self, - detections_input: dai.Node.Output, - padding: float, - target_size: Tuple[int, int], - ) -> "ProcessDetections": - self.padding = padding - self._target_w = target_size[0] - self._target_h = target_size[1] - self.link_args(detections_input) - return self - - def process(self, img_detections: dai.Buffer) -> None: - assert isinstance(img_detections, dai.ImgDetections) - detections = img_detections.detections - - num_detections = len(detections) - num_cfgs_message = dai.Buffer(num_detections) - - num_cfgs_message.setTimestamp(img_detections.getTimestamp()) - num_cfgs_message.setSequenceNum(img_detections.getSequenceNum()) - self.num_configs_output.send(num_cfgs_message) - - for i, detection in enumerate(detections): - cfg = dai.ImageManipConfig() - detection: dai.ImgDetection = detection - rect = detection.getBoundingBox() - - new_rect = dai.RotatedRect() - new_rect.center.x = rect.center.x - new_rect.center.y = rect.center.y - new_rect.size.width = rect.size.width + 0.1 * 2 - new_rect.size.height = rect.size.height + 0.1 * 2 - new_rect.angle = 0 - - cfg.addCropRotatedRect(new_rect, normalizedCoords=True) - cfg.setOutputSize( - self._target_w, - self._target_h, - dai.ImageManipConfig.ResizeMode.STRETCH, - ) - cfg.setReusePreviousImage(False) - cfg.setTimestamp(img_detections.getTimestamp()) - cfg.setSequenceNum(img_detections.getSequenceNum()) - self.config_output.send(cfg) From df857ea42c46de6887e865a4bb23396565353019 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Mon, 23 Feb 2026 10:21:33 +0100 Subject: [PATCH 06/14] hand pose remove FrameCropper --- .../text-blur/requirements.txt | 2 +- .../pose-estimation/hand-pose/main.py | 36 +++++++-- .../hand-pose/utils/process.py | 77 +++++++++++++++++++ 3 files changed, 106 insertions(+), 9 deletions(-) create mode 100644 neural-networks/pose-estimation/hand-pose/utils/process.py diff --git a/neural-networks/object-detection/text-blur/requirements.txt b/neural-networks/object-detection/text-blur/requirements.txt index 77fbe7cf7..c5f9dee1b 100644 --- a/neural-networks/object-detection/text-blur/requirements.txt +++ b/neural-networks/object-detection/text-blur/requirements.txt @@ -1,4 +1,4 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index dd4373804..216e44b3c 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -1,10 +1,11 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper +from depthai_nodes.node import ParsingNeuralNetwork, GatherData from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode +from utils.process import ProcessDetections _, args = initialize_argparser() @@ -72,17 +73,36 @@ ) # detection processing - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=detection_nn.out, + detections_processor = pipeline.create(ProcessDetections).build( + detections_input=detection_nn.out, padding=PADDING, - ).build( - inputImage=detection_nn.passthrough, - outputSize=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), - resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + target_size=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), ) + script = pipeline.create(dai.node.Script) + script.setScriptPath(str(Path(__file__).parent / "utils/script.py")) + script.inputs["frame_input"].setMaxSize(30) + script.inputs["config_input"].setMaxSize(30) + script.inputs["num_configs_input"].setMaxSize(30) + + detection_nn.passthrough.link(script.inputs["frame_input"]) + detections_processor.config_output.link(script.inputs["config_input"]) + detections_processor.num_configs_output.link(script.inputs["num_configs_input"]) + + pose_manip = pipeline.create(dai.node.ImageManip) + pose_manip.initialConfig.setOutputSize( + pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight() + ) + pose_manip.inputConfig.setMaxSize(30) + pose_manip.inputImage.setMaxSize(30) + pose_manip.setNumFramesPool(30) + pose_manip.inputConfig.setWaitForMessage(True) + + script.outputs["output_config"].link(pose_manip.inputConfig) + script.outputs["output_frame"].link(pose_manip.inputImage) + pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( - crop_node.out, pose_nn_archive + pose_manip.out, pose_nn_archive ) # detections and pose estimations sync diff --git a/neural-networks/pose-estimation/hand-pose/utils/process.py b/neural-networks/pose-estimation/hand-pose/utils/process.py new file mode 100644 index 000000000..0e8623e95 --- /dev/null +++ b/neural-networks/pose-estimation/hand-pose/utils/process.py @@ -0,0 +1,77 @@ +import depthai as dai +from typing import Tuple + + +class ProcessDetections(dai.node.HostNode): + """A host node for processing a list of detections in a two-stage pipeline. + The node iterates over a list of detections and sends a dai.MessageGroup with + a list of ImageManipConfig objects that can be executed by the ImageManip node. + + Before use, the target size need to be set with the set_target_size method. + Attributes + ---------- + detections_input : dai.Input + The input message for the detections. + config_output : dai.Output + The output message for the ImageManipConfig objects. + num_configs_output : dai.Output + The output message for the number of configs. + padding: float + The padding factor to enlarge the bounding box a little bit. + + """ + + def __init__(self): + super().__init__() + self.detections_input = self.createInput() + self.config_output = self.createOutput() + self.num_configs_output = self.createOutput() + self.padding = 0.1 + self._target_h = None + self._target_w = None + + def build( + self, + detections_input: dai.Node.Output, + padding: float, + target_size: Tuple[int, int], + ) -> "ProcessDetections": + self.padding = padding + self._target_w = target_size[0] + self._target_h = target_size[1] + self.link_args(detections_input) + return self + + def process(self, img_detections: dai.Buffer) -> None: + assert isinstance(img_detections, dai.ImgDetections) + detections = img_detections.detections + + num_detections = len(detections) + num_cfgs_message = dai.Buffer(num_detections) + + num_cfgs_message.setTimestamp(img_detections.getTimestamp()) + num_cfgs_message.setSequenceNum(img_detections.getSequenceNum()) + self.num_configs_output.send(num_cfgs_message) + + for i, detection in enumerate(detections): + cfg = dai.ImageManipConfig() + detection: dai.ImgDetection = detection + rect = detection.getBoundingBox() + + new_rect = dai.RotatedRect() + new_rect.center.x = rect.center.x + new_rect.center.y = rect.center.y + new_rect.size.width = rect.size.width + 0.1 * 2 + new_rect.size.height = rect.size.height + 0.1 * 2 + new_rect.angle = 0 + + cfg.addCropRotatedRect(new_rect, normalizedCoords=True) + cfg.setOutputSize( + self._target_w, + self._target_h, + dai.ImageManipConfig.ResizeMode.STRETCH, + ) + cfg.setReusePreviousImage(False) + cfg.setTimestamp(img_detections.getTimestamp()) + cfg.setSequenceNum(img_detections.getSequenceNum()) + self.config_output.send(cfg) From fe653997416727b82fe0f68e31d86e1084d2f9d5 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Thu, 26 Feb 2026 12:16:37 +0100 Subject: [PATCH 07/14] updated requirements + examples: integrations, gaze-estimation, barcode, yolo-world, ocr, hand-pose, streaming --- .../lossless-zooming/requirements.txt | 2 +- .../backend/requirements.txt | 4 +- .../backend/src/nn/label_mapper_node.py | 4 +- .../box-measurement/requirements.txt | 2 +- .../triangulation/requirements.txt | 2 +- .../backend/src/core/annotation_node.py | 22 +++--- .../backend/src/requirements.txt | 4 +- .../3D-detection/objectron/main.py | 25 ++++--- .../counting/people-counter/requirements.txt | 2 +- .../face-detection/age-gender/main.py | 17 +++-- .../age-gender/utils/annotation_node.py | 12 +--- .../blur-faces/utils/blur_detections.py | 4 +- .../emotion-recognition/main.py | 17 +++-- .../face-detection/fatigue-detection/main.py | 14 ++-- .../face-detection/gaze-estimation/main.py | 25 ++++--- .../gaze-estimation/requirements.txt | 2 +- .../gaze-estimation/utils/node_creators.py | 32 --------- .../utils/process_keypoints.py | 6 +- .../head-posture-detection/main.py | 14 ++-- .../utils/annotation_node.py | 4 +- .../requirements.txt | 4 +- .../utils/annotation_node.py | 19 ++---- .../utils/host_crop_config_creator.py | 68 +++++-------------- .../utils/simple_barcode_overlay.py | 7 +- .../human-machine-safety/main.py | 4 +- .../utils/annotation_node.py | 24 ++++--- .../text-blur/utils/blur_detections.py | 4 +- .../object-detection/yolo-p/requirements.txt | 2 +- .../object-detection/yolo-world/main.py | 7 +- .../yolo-world/requirements.txt | 3 +- ...ion_node.py => detections_label_mapper.py} | 7 +- .../object-tracking/deepsort-tracking/main.py | 14 ++-- neural-networks/ocr/general-ocr/main.py | 25 +++---- .../ocr/general-ocr/requirements.txt | 2 +- .../utils/host_process_detections.py | 27 +++----- .../pose-estimation/animal-pose/main.py | 18 +++-- .../animal-pose/utils/annotation_node.py | 4 +- .../pose-estimation/hand-pose/main.py | 33 +++------ .../hand-pose/utils/annotation_node.py | 15 ++-- .../hand-pose/utils/process.py | 23 +++---- .../pose-estimation/human-pose/main.py | 25 ++++--- .../human-reidentification/main.py | 17 +++-- streaming/webrtc-streaming/requirements.txt | 4 +- 43 files changed, 266 insertions(+), 304 deletions(-) delete mode 100644 neural-networks/face-detection/gaze-estimation/utils/node_creators.py rename neural-networks/object-detection/yolo-world/utils/{annotation_node.py => detections_label_mapper.py} (88%) diff --git a/camera-controls/lossless-zooming/requirements.txt b/camera-controls/lossless-zooming/requirements.txt index 218b6f03b..dfe76c078 100644 --- a/camera-controls/lossless-zooming/requirements.txt +++ b/camera-controls/lossless-zooming/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 \ No newline at end of file +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt index 88bd04809..f93a5cc2b 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt +++ b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.2.1 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@f40211e5665473b5db48457640bed18fd1f2cc8d #InstanceToSemanticMask +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 tokenizers~=0.21.0 diff --git a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py index b9392b683..b303edfe5 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py +++ b/custom-frontend/open-vocabulary-object-detection/backend/src/nn/label_mapper_node.py @@ -52,7 +52,5 @@ def process( assert isinstance(detections_message, dai.ImgDetections) detections_message.setTransformation(frame_message.getTransformation()) for detection in detections_message.detections: - detection.labelName = self._label_encoding.get( - detection.label, "unknown" - ) + detection.labelName = self._label_encoding.get(detection.label, "unknown") self.out.send(detections_message) diff --git a/depth-measurement/3d-measurement/box-measurement/requirements.txt b/depth-measurement/3d-measurement/box-measurement/requirements.txt index f9dd6cc0a..e201dc209 100644 --- a/depth-measurement/3d-measurement/box-measurement/requirements.txt +++ b/depth-measurement/3d-measurement/box-measurement/requirements.txt @@ -1,5 +1,5 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 open3d~=0.18 opencv-python-headless==4.10.0.84 diff --git a/depth-measurement/triangulation/requirements.txt b/depth-measurement/triangulation/requirements.txt index 657eada6e..c93725a89 100644 --- a/depth-measurement/triangulation/requirements.txt +++ b/depth-measurement/triangulation/requirements.txt @@ -1,4 +1,4 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/integrations/roboflow-workflow/backend/src/core/annotation_node.py b/integrations/roboflow-workflow/backend/src/core/annotation_node.py index fc7dd3d89..90f78ca98 100644 --- a/integrations/roboflow-workflow/backend/src/core/annotation_node.py +++ b/integrations/roboflow-workflow/backend/src/core/annotation_node.py @@ -1,7 +1,6 @@ import logging import depthai as dai from enum import Enum -from depthai_nodes import ImgDetectionsExtended, ImgDetectionExtended class OutputType(Enum): @@ -20,7 +19,7 @@ def __init__( self.frames = {} # key -> ImgFrame self.output_frames = {"passthrough": self.createOutput()} - self.detections = {} # key -> ImgDetectionsExtended + self.detections = {} # key -> dai.ImgDetections self.output_detections = {} self._logger = logging.getLogger(self.__class__.__name__) @@ -80,13 +79,13 @@ def on_prediction(self, result, frame): self.frames[key] = vis_frame elif output_type == OutputType.DETECTION: - dets = ImgDetectionsExtended() + dets = dai.ImgDetections() try: for det in value: # Roboflow prediction output: xyxy, mask, conf, class_id, tracker, extra xyxy, _, conf, class_id, _, extra = det - new_det = ImgDetectionExtended() + new_det = dai.ImgDetection() h, w = extra["image_dimensions"] class_label = extra["class_name"] @@ -98,22 +97,19 @@ def on_prediction(self, result, frame): y0 /= h y1 /= h - new_det.rotated_rect = ( - float((x0 + x1) / 2), - float((y0 + y1) / 2), - float(x1 - x0), - float(y1 - y0), - 0, - ) + new_det.xmin = float(x0) + new_det.ymin = float(y0) + new_det.xmax = float(x1) + new_det.ymax = float(y1) new_det.confidence = float(conf) new_det.label = int(class_id) - new_det.label_name = str(class_label) + new_det.labelName = str(class_label) dets.detections.append(new_det) except Exception: self._logger.info( - f"Failed to parse output `{key}` as ImgDetectionExtended. " + f"Failed to parse output `{key}` as ImgDetection. " "Verify that this output contains a valid Roboflow Detection. " "If it does not, consider renaming the output in your Workflow so that " "'predictions' is not a substring of the output name." diff --git a/integrations/roboflow-workflow/backend/src/requirements.txt b/integrations/roboflow-workflow/backend/src/requirements.txt index be833a15b..237af3772 100644 --- a/integrations/roboflow-workflow/backend/src/requirements.txt +++ b/integrations/roboflow-workflow/backend/src/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.2.1 -depthai-nodes==0.3.6 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python~=4.10.0 inference \ No newline at end of file diff --git a/neural-networks/3D-detection/objectron/main.py b/neural-networks/3D-detection/objectron/main.py index 56ca9c6ba..c0c4c5c13 100644 --- a/neural-networks/3D-detection/objectron/main.py +++ b/neural-networks/3D-detection/objectron/main.py @@ -1,7 +1,12 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, ImgDetectionsFilter, GatherData, FrameCropper +from depthai_nodes.node import ( + ParsingNeuralNetwork, + ImgDetectionsFilter, + GatherData, + FrameCropper, +) from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -59,13 +64,17 @@ first_stage_filter.keepLabels(VALID_LABELS) # detection processing - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=first_stage_filter.out, - padding=PADDING, - ).build( - inputImage=det_nn.passthrough, - outputSize=(pos_model_w, pos_model_h), - resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=first_stage_filter.out, + padding=PADDING, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=(pos_model_w, pos_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + ) ) pos_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/counting/people-counter/requirements.txt b/neural-networks/counting/people-counter/requirements.txt index 218b6f03b..c2a0c7bb7 100644 --- a/neural-networks/counting/people-counter/requirements.txt +++ b/neural-networks/counting/people-counter/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 \ No newline at end of file +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/main.py b/neural-networks/face-detection/age-gender/main.py index 4b399eefc..2b07d944e 100644 --- a/neural-networks/face-detection/age-gender/main.py +++ b/neural-networks/face-detection/age-gender/main.py @@ -72,11 +72,18 @@ ) det_nn.getParser(0).conf_threshold = 0.9 # for more stable detections - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=input_node_out, - outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/face-detection/age-gender/utils/annotation_node.py b/neural-networks/face-detection/age-gender/utils/annotation_node.py index 1a78d356c..747d5a3cd 100644 --- a/neural-networks/face-detection/age-gender/utils/annotation_node.py +++ b/neural-networks/face-detection/age-gender/utils/annotation_node.py @@ -21,9 +21,7 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_msg: dai.ImgDetections = ( - gather_data_msg.reference_data - ) + img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered @@ -32,9 +30,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: isinstance(msg, dai.MessageGroup) for msg in age_gender_msg_group_list ) - assert len(img_detections_msg.detections) == len( - age_gender_msg_group_list - ) + assert len(img_detections_msg.detections) == len(age_gender_msg_group_list) annotations = AnnotationHelper() @@ -46,9 +42,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: gender_msg: Classifications = age_gender_msg_group["1"] assert isinstance(gender_msg, Classifications) - xmin, ymin, xmax, ymax = ( - img_detection_msg.getBoundingBox().getOuterRect() - ) + xmin, ymin, xmax, ymax = img_detection_msg.getBoundingBox().getOuterRect() annotations.draw_rectangle( (xmin, ymin), diff --git a/neural-networks/face-detection/blur-faces/utils/blur_detections.py b/neural-networks/face-detection/blur-faces/utils/blur_detections.py index b970da439..ebeeea394 100644 --- a/neural-networks/face-detection/blur-faces/utils/blur_detections.py +++ b/neural-networks/face-detection/blur-faces/utils/blur_detections.py @@ -30,7 +30,7 @@ def run(self) -> None: bbox[2] = np.clip(bbox[2], 0, w) bbox[3] = np.clip(bbox[3], 0, h) - roi = frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] + roi = frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] roi_width = bbox[2] - bbox[0] roi_height = bbox[3] - bbox[1] @@ -56,7 +56,7 @@ def run(self) -> None: original_background = cv2.bitwise_and(roi, roi, mask=inverse_mask) combined = cv2.add(blurred_ellipse, original_background) - frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] = combined + frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] = combined ts = frame.getTimestamp() frame_type = frame.getType() diff --git a/neural-networks/face-detection/emotion-recognition/main.py b/neural-networks/face-detection/emotion-recognition/main.py index de68c2db4..e4d7d4504 100755 --- a/neural-networks/face-detection/emotion-recognition/main.py +++ b/neural-networks/face-detection/emotion-recognition/main.py @@ -71,11 +71,18 @@ resize_node.out, det_model_nn_archive ) - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=input_node, - outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/face-detection/fatigue-detection/main.py b/neural-networks/face-detection/fatigue-detection/main.py index 9b1dde588..45dd4048e 100644 --- a/neural-networks/face-detection/fatigue-detection/main.py +++ b/neural-networks/face-detection/fatigue-detection/main.py @@ -72,11 +72,15 @@ resize_node.out, det_model_nn_archive ) - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=input_node_out, - outputSize=(rec_model_w, rec_model_h), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=(rec_model_w, rec_model_h), + ) ) landmark_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/face-detection/gaze-estimation/main.py b/neural-networks/face-detection/gaze-estimation/main.py index 285d681a7..26f18d3e4 100644 --- a/neural-networks/face-detection/gaze-estimation/main.py +++ b/neural-networks/face-detection/gaze-estimation/main.py @@ -1,11 +1,10 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.process_keypoints import LandmarksProcessing -from utils.node_creators import create_crop_node from utils.annotation_node import AnnotationNode from utils.host_concatenate_head_pose import ConcatenateHeadPose @@ -101,14 +100,24 @@ ) det_nn.out.link(detection_process_node.detections_input) - left_eye_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.left_config_output + crop_output_size = ( + head_pose_model_nn_archive.getInputWidth(), + head_pose_model_nn_archive.getInputHeight(), + ) + left_eye_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.left_config_output) + .build(input_node_out, crop_output_size) ) - right_eye_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.right_config_output + right_eye_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.right_config_output) + .build(input_node_out, crop_output_size) ) - face_crop_node = create_crop_node( - pipeline, input_node_out, detection_process_node.face_config_output + face_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detection_process_node.face_config_output) + .build(input_node_out, crop_output_size) ) # head pose estimation diff --git a/neural-networks/face-detection/gaze-estimation/requirements.txt b/neural-networks/face-detection/gaze-estimation/requirements.txt index 218b6f03b..c2a0c7bb7 100644 --- a/neural-networks/face-detection/gaze-estimation/requirements.txt +++ b/neural-networks/face-detection/gaze-estimation/requirements.txt @@ -1,2 +1,2 @@ depthai==3.3.0 -depthai-nodes==0.4.0 \ No newline at end of file +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/gaze-estimation/utils/node_creators.py b/neural-networks/face-detection/gaze-estimation/utils/node_creators.py deleted file mode 100644 index 3b361794b..000000000 --- a/neural-networks/face-detection/gaze-estimation/utils/node_creators.py +++ /dev/null @@ -1,32 +0,0 @@ -import depthai as dai -from pathlib import Path - - -def create_crop_node( - pipeline: dai.Pipeline, - input_frame: dai.Node.Output, - configs_message: dai.Node.Output, -) -> dai.node.ImageManip: - script_path = Path(__file__).parent / "config_sender_script.py" - with script_path.open("r") as script_file: - script_content = script_file.read() - - config_sender_script = pipeline.create(dai.node.Script) - config_sender_script.setScript(script_content) - config_sender_script.inputs["frame_input"].setBlocking(True) - config_sender_script.inputs["config_input"].setBlocking(True) - - img_manip_node = pipeline.create(dai.node.ImageManip) - img_manip_node.initialConfig.setReusePreviousImage(False) - img_manip_node.inputConfig.setReusePreviousMessage(False) - img_manip_node.inputImage.setReusePreviousMessage(False) - img_manip_node.inputConfig.setBlocking(True) - img_manip_node.inputImage.setBlocking(True) - - input_frame.link(config_sender_script.inputs["frame_input"]) - configs_message.link(config_sender_script.inputs["config_input"]) - - config_sender_script.outputs["output_config"].link(img_manip_node.inputConfig) - config_sender_script.outputs["output_frame"].link(img_manip_node.inputImage) - - return img_manip_node diff --git a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py index d790c84fe..d24a66e8b 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py +++ b/neural-networks/face-detection/gaze-estimation/utils/process_keypoints.py @@ -34,20 +34,20 @@ def run(self) -> None: right_eye = self.crop_rectangle( keypoints[0], face_w * 0.25, face_h * 0.25 ) - right_configs_message[str(i + 100)] = self.create_crop_cfg( + right_configs_message[f"cfg_{i}"] = self.create_crop_cfg( right_eye, img_detections ) left_eye = self.crop_rectangle( keypoints[1], face_w * 0.25, face_h * 0.25 ) - left_configs_message[str(i + 100)] = self.create_crop_cfg( + left_configs_message[f"cfg_{i}"] = self.create_crop_cfg( left_eye, img_detections ) face_rect = detection.getBoundingBox() face_rect = face_rect.denormalize(self.w, self.h) - face_configs_message[str(i + 100)] = self.create_crop_cfg( + face_configs_message[f"cfg_{i}"] = self.create_crop_cfg( face_rect, img_detections ) diff --git a/neural-networks/face-detection/head-posture-detection/main.py b/neural-networks/face-detection/head-posture-detection/main.py index 973a0fa48..5553f7057 100644 --- a/neural-networks/face-detection/head-posture-detection/main.py +++ b/neural-networks/face-detection/head-posture-detection/main.py @@ -73,11 +73,15 @@ ) det_nn.input.setBlocking(True) - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=input_node_out, - outputSize=(pose_model_w, pose_model_h), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=(pose_model_w, pose_model_h), + ) ) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py index fd9477227..3b2c04b66 100644 --- a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py @@ -19,9 +19,7 @@ def build( return self def process(self, gather_data_msg: dai.Buffer) -> None: - img_detections_msg: dai.ImgDetections = ( - gather_data_msg.reference_data - ) + img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt index 445462a22..7425d21f0 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt @@ -1,5 +1,5 @@ -depthai>=3.0.0 -depthai-nodes==0.3.4 +depthai>=3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 pyzbar==0.1.9 diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py index b1a38851f..e2b987d2b 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/annotation_node.py @@ -1,7 +1,7 @@ from typing import List import depthai as dai -from depthai_nodes import ImgDetectionsExtended, SECONDARY_COLOR +from depthai_nodes import SECONDARY_COLOR from depthai_nodes.utils import AnnotationHelper @@ -26,7 +26,7 @@ def run(self) -> None: while self.isRunning(): gather_data_msg: dai.Buffer = self.input.get() - img_detections_extended_msg: ImgDetectionsExtended = ( + img_detections_extended_msg: dai.ImgDetections = ( gather_data_msg.reference_data ) @@ -37,17 +37,10 @@ def run(self) -> None: for img_detection_extended_msg, msg_group in zip( img_detections_extended_msg.detections, msg_group_list ): - xmin, ymin, xmax, ymax = ( - img_detection_extended_msg.rotated_rect.getOuterRect() - ) - - try: - xmin = float(xmin) - ymin = float(ymin) - xmax = float(xmax) - ymax = float(ymax) - except Exception: - pass + xmin = img_detection_extended_msg.xmin + ymin = img_detection_extended_msg.ymin + xmax = img_detection_extended_msg.xmax + ymax = img_detection_extended_msg.ymax xmin = max(0.0, min(1.0, xmin)) ymin = max(0.0, min(1.0, ymin)) diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py index bcee42f3b..1b79222fb 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/host_crop_config_creator.py @@ -3,8 +3,6 @@ import depthai as dai -from depthai_nodes import ImgDetectionExtended, ImgDetectionsExtended - class CropConfigsCreator(dai.node.HostNode): """A node to create and send a dai.ImageManipConfigV2 crop configuration for each @@ -17,11 +15,11 @@ class CropConfigsCreator(dai.node.HostNode): Attributes ---------- detections_input : dai.Input - The input link for the ImageDetectionsExtended | dai.ImgDetections message. + The input link for the dai.ImgDetections message. config_output : dai.Output The output link for the ImageManipConfigV2 messages. detections_output : dai.Output - The output link for the ImgDetectionsExtended message. + The output link for the dai.ImgDetections message. source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] = None @@ -146,7 +144,7 @@ def build( Parameters ---------- detections_input : dai.Node.Output - The input link for the ImgDetectionsExtended message + The input link for the dai.ImgDetections message source_size : Tuple[int, int] The size of the source image (width, height). target_size : Optional[Tuple[int, int]] @@ -170,24 +168,18 @@ def build( def process(self, detections_input: dai.Buffer) -> None: """Process the input detections and create crop configurations. This function is - ran every time a new ImgDetectionsExtended or dai.ImgDetections message is - received. + ran every time a new dai.ImgDetections message is received. Sends len(detections) number of crop configurations to the config_output link. - In addition sends an ImgDetectionsExtended object containing the corresponding + In addition sends a dai.ImgDetections object containing the corresponding detections to the detections_output link. """ - assert isinstance(detections_input, (ImgDetectionsExtended, dai.ImgDetections)) + assert isinstance(detections_input, dai.ImgDetections) sequence_num = detections_input.getSequenceNum() timestamp = detections_input.getTimestamp() - if isinstance(detections_input, dai.ImgDetections): - detections_msg = self._convert_to_extended(detections_input) - else: - detections_msg = detections_input - - detections = detections_msg.detections + detections = detections_input.detections # Skip the current frame / load new frame cfg = dai.ImageManipConfig() @@ -206,11 +198,17 @@ def process(self, detections_input: dai.Buffer) -> None: for i in range(len(detections)): cfg = dai.ImageManipConfig() - detection: ImgDetectionExtended = detections[i] - rect = detection.rotated_rect - rect = rect.denormalize(self.w, self.h) + detection: dai.ImgDetection = detections[i] + + x_center = (detection.xmin + detection.xmax) / 2 + y_center = (detection.ymin + detection.ymax) / 2 + width = (detection.xmax - detection.xmin) * 1.15 + height = (detection.ymax - detection.ymin) * 1.15 + rect = dai.RotatedRect( + dai.Point2f(x_center, y_center), dai.Size2f(width, height), 0.0 + ) - cfg.addCropRotatedRect(rect, normalizedCoords=False) + cfg.addCropRotatedRect(rect, normalizedCoords=True) if self.target_w is not None and self.target_h is not None: cfg.setOutputSize(self.target_w, self.target_h, self.resize_mode) @@ -229,37 +227,7 @@ def process(self, detections_input: dai.Buffer) -> None: attempts += 1 time.sleep(0.001) # Small delay to prevent busy waiting - self.detections_output.send(detections_msg) - - def _convert_to_extended( - self, detections: dai.ImgDetections - ) -> ImgDetectionsExtended: - rotated_rectangle_detections = [] - for det in detections.detections: - img_detection = ImgDetectionExtended() - img_detection.label = det.label - img_detection.confidence = det.confidence - - x_center = (det.xmin + det.xmax) / 2 - y_center = (det.ymin + det.ymax) / 2 - width = det.xmax - det.xmin - height = det.ymax - det.ymin - width = width * 1.15 - height = height * 1.15 - - img_detection.rotated_rect = (x_center, y_center, width, height, 0.0) - - rotated_rectangle_detections.append(img_detection) - - img_detections_extended = ImgDetectionsExtended() - img_detections_extended.setSequenceNum(detections.getSequenceNum()) - img_detections_extended.setTimestamp(detections.getTimestamp()) - img_detections_extended.detections = rotated_rectangle_detections - transformation = detections.getTransformation() - if transformation is not None: - img_detections_extended.setTransformation(transformation) - - return img_detections_extended + self.detections_output.send(detections_input) def _validate_positive_integer(self, value: int): """Validates that the set size is a positive integer. diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py index 3a99ee463..6ee6d6b85 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/utils/simple_barcode_overlay.py @@ -228,7 +228,12 @@ def _draw_detection_boxes(self, frame, detections): h, w = frame.shape[:2] for detection in detections.detections: - xmin, ymin, xmax, ymax = detection.rotated_rect.getOuterRect() + xmin, ymin, xmax, ymax = ( + detection.xmin, + detection.ymin, + detection.xmax, + detection.ymax, + ) x1 = int(xmin * w) y1 = int(ymin * h) diff --git a/neural-networks/object-detection/human-machine-safety/main.py b/neural-networks/object-detection/human-machine-safety/main.py index 4a5b0badc..bdb5040c0 100644 --- a/neural-networks/object-detection/human-machine-safety/main.py +++ b/neural-networks/object-detection/human-machine-safety/main.py @@ -137,7 +137,9 @@ merged_labels = classes + ["palm"] filter_labels = [merged_labels.index(i) for i in DANGEROUS_OBJECTS] filter_labels.append(merged_labels.index("palm")) - detection_filter = pipeline.create(ImgDetectionsFilter).build(merge_detections.output) + detection_filter = pipeline.create(ImgDetectionsFilter).build( + merge_detections.output + ) detection_filter.keepLabels(filter_labels) # annotation diff --git a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py index 45af34d27..fc6bf59cd 100644 --- a/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py +++ b/neural-networks/object-detection/human-machine-safety/utils/annotation_node.py @@ -34,17 +34,19 @@ def process( img_detection = dai.ImgDetection() img_detection.label = detection.label img_detection.labelName = detection.labelName - img_detection.setBoundingBox(dai.RotatedRect( - dai.Point2f( - (detection.xmax + detection.xmin) / 2, - (detection.ymax + detection.ymin) / 2, - ), - dai.Size2f( - detection.xmax - detection.xmin, - detection.ymax - detection.ymin, - ), - 0, - )) + img_detection.setBoundingBox( + dai.RotatedRect( + dai.Point2f( + (detection.xmax + detection.xmin) / 2, + (detection.ymax + detection.ymin) / 2, + ), + dai.Size2f( + detection.xmax - detection.xmin, + detection.ymax - detection.ymin, + ), + 0, + ) + ) img_detection.confidence = detection.confidence det_list.append(img_detection) img_detections.detections = det_list diff --git a/neural-networks/object-detection/text-blur/utils/blur_detections.py b/neural-networks/object-detection/text-blur/utils/blur_detections.py index cde908b92..b6a69b6ac 100644 --- a/neural-networks/object-detection/text-blur/utils/blur_detections.py +++ b/neural-networks/object-detection/text-blur/utils/blur_detections.py @@ -31,7 +31,7 @@ def run(self) -> None: bbox[2] = np.clip(bbox[2], 0, w) bbox[3] = np.clip(bbox[3], 0, h) - roi = frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] + roi = frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] roi_width = bbox[2] - bbox[0] roi_height = bbox[3] - bbox[1] @@ -57,7 +57,7 @@ def run(self) -> None: original_background = cv2.bitwise_and(roi, roi, mask=inverse_mask) combined = cv2.add(blurred_ellipse, original_background) - frame_copy[bbox[1]: bbox[3], bbox[0]: bbox[2]] = combined + frame_copy[bbox[1] : bbox[3], bbox[0] : bbox[2]] = combined ts = frame.getTimestamp() frame_type = frame.getType() diff --git a/neural-networks/object-detection/yolo-p/requirements.txt b/neural-networks/object-detection/yolo-p/requirements.txt index 77fbe7cf7..c5f9dee1b 100644 --- a/neural-networks/object-detection/yolo-p/requirements.txt +++ b/neural-networks/object-detection/yolo-p/requirements.txt @@ -1,4 +1,4 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-world/main.py b/neural-networks/object-detection/yolo-world/main.py index 77168fae7..647923fcc 100644 --- a/neural-networks/object-detection/yolo-world/main.py +++ b/neural-networks/object-detection/yolo-world/main.py @@ -8,7 +8,7 @@ from utils.helper_functions import extract_text_embeddings from utils.arguments import initialize_argparser -from utils.annotation_node import AnnotationNode +from utils.detections_label_mapper import DetectionsLabelMapper MAX_NUM_CLASSES = 80 @@ -84,17 +84,16 @@ det_process_filter = pipeline.create(ImgDetectionsFilter).build(nn_with_parser.out) det_process_filter.keepLabels([i for i in range(len(args.class_names))]) - annotation_node = pipeline.create(AnnotationNode).build( + label_mapper = pipeline.create(DetectionsLabelMapper).build( det_process_filter.out, label_encoding={k: v for k, v in enumerate(args.class_names)}, ) # visualization - visualizer.addTopic("Detections", annotation_node.out) + visualizer.addTopic("Detections", label_mapper.out) visualizer.addTopic("Video", nn_with_parser.passthroughs["images"]) print("Pipeline created.") - pipeline.start() visualizer.registerPipeline(pipeline) diff --git a/neural-networks/object-detection/yolo-world/requirements.txt b/neural-networks/object-detection/yolo-world/requirements.txt index 3b056a9d1..3b156fb88 100644 --- a/neural-networks/object-detection/yolo-world/requirements.txt +++ b/neural-networks/object-detection/yolo-world/requirements.txt @@ -1,5 +1,6 @@ depthai==3.3.0 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_updateopencv-python-headless~=4.10.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update +opencv-python-headless~=4.10.0 numpy>=1.22 onnxruntime # onnxruntime-gpu # if you want to use CUDAExecutionProvider diff --git a/neural-networks/object-detection/yolo-world/utils/annotation_node.py b/neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py similarity index 88% rename from neural-networks/object-detection/yolo-world/utils/annotation_node.py rename to neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py index 2a73df2c4..2b36c0222 100644 --- a/neural-networks/object-detection/yolo-world/utils/annotation_node.py +++ b/neural-networks/object-detection/yolo-world/utils/detections_label_mapper.py @@ -2,11 +2,10 @@ from typing import Dict -class AnnotationNode(dai.node.HostNode): +class DetectionsLabelMapper(dai.node.HostNode): def __init__(self, label_encoding: Dict[int, str] = {}) -> None: super().__init__() self._label_encoding = label_encoding - self.out_detections = self.createOutput() def setLabelEncoding(self, label_encoding: Dict[int, str]) -> None: """Sets the label encoding. @@ -21,7 +20,7 @@ def setLabelEncoding(self, label_encoding: Dict[int, str]) -> None: def build( self, detections: dai.Node.Output, label_encoding: Dict[int, str] = None - ) -> "AnnotationNode": + ) -> "DetectionsLabelMapper": if label_encoding is not None: self.setLabelEncoding(label_encoding) self.link_args(detections) @@ -34,4 +33,4 @@ def process( assert isinstance(detections_message, dai.ImgDetections) for detection in detections_message.detections: detection.labelName = self._label_encoding.get(detection.label, "unknown") - return detections_message + self.out.send(detections_message) diff --git a/neural-networks/object-tracking/deepsort-tracking/main.py b/neural-networks/object-tracking/deepsort-tracking/main.py index 5072728b9..292e845e3 100644 --- a/neural-networks/object-tracking/deepsort-tracking/main.py +++ b/neural-networks/object-tracking/deepsort-tracking/main.py @@ -78,11 +78,15 @@ ) # detection processing - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=det_nn.passthrough, - outputSize=(embeddings_model_w, embeddings_model_h), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=(embeddings_model_w, embeddings_model_h), + ) ) embeddings_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/ocr/general-ocr/main.py b/neural-networks/ocr/general-ocr/main.py index 03f6f8cdd..730d149d8 100644 --- a/neural-networks/ocr/general-ocr/main.py +++ b/neural-networks/ocr/general-ocr/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.annotation_node import OCRAnnotationNode from utils.arguments import initialize_argparser @@ -72,22 +72,17 @@ ) det_nn.setNumPoolFrames(30) - # detection processing - detection_process_node = pipeline.create(CropConfigsCreator) - detection_process_node.build( + # detection processing and crops config creation + crop_configs_creator = pipeline.create(CropConfigsCreator) + crop_configs_creator.build( det_nn.out, (REQ_WIDTH, REQ_HEIGHT), (rec_model_w, rec_model_h) ) - crop_node = pipeline.create(dai.node.ImageManip) - crop_node.initialConfig.setReusePreviousImage(False) - crop_node.inputConfig.setReusePreviousMessage(False) - crop_node.inputImage.setReusePreviousMessage(True) - crop_node.inputConfig.setMaxSize(30) - crop_node.inputImage.setMaxSize(30) - crop_node.setNumFramesPool(30) - - detection_process_node.config_output.link(crop_node.inputConfig) - input_node_out.link(crop_node.inputImage) + crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(crop_configs_creator.config_output) + .build(input_node_out, (rec_model_w, rec_model_h)) + ) ocr_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( crop_node.out, rec_model_nn_archive @@ -99,7 +94,7 @@ gather_data_node = pipeline.create(GatherData).build( camera_fps=args.fps_limit, input_data=ocr_nn.out, - input_reference=detection_process_node.detections_output, + input_reference=crop_configs_creator.detections_output, ) # annotation diff --git a/neural-networks/ocr/general-ocr/requirements.txt b/neural-networks/ocr/general-ocr/requirements.txt index 91e851566..f72c0b1c8 100644 --- a/neural-networks/ocr/general-ocr/requirements.txt +++ b/neural-networks/ocr/general-ocr/requirements.txt @@ -1,4 +1,4 @@ depthai==3.3.0 -depthai-nodes==0.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless==4.10.0.84 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/utils/host_process_detections.py b/neural-networks/ocr/general-ocr/utils/host_process_detections.py index b5bac1506..730cdd542 100644 --- a/neural-networks/ocr/general-ocr/utils/host_process_detections.py +++ b/neural-networks/ocr/general-ocr/utils/host_process_detections.py @@ -30,11 +30,7 @@ class CropConfigsCreator(dai.node.HostNode): def __init__(self) -> None: """Initializes the node.""" super().__init__() - self.config_output = self.createOutput( - possibleDatatypes=[ - dai.Node.DatatypeHierarchy(dai.DatatypeEnum.ImageManipConfig, True) - ] - ) + self.config_output = self.createOutput() self.detections_output = self.createOutput( possibleDatatypes=[ dai.Node.DatatypeHierarchy(dai.DatatypeEnum.Buffer, True) @@ -133,14 +129,7 @@ def process(self, detections_input: dai.Buffer) -> None: detections = detections_input.detections - # Skip the current frame / load new frame - cfg = dai.ImageManipConfig() - cfg.setSkipCurrentImage(True) - cfg.setTimestamp(timestamp) - cfg.setSequenceNum(sequence_num) - send_status = False - while not send_status: - send_status = self.config_output.trySend(cfg) + configs_group = dai.MessageGroup() valid_detections = [] for detection in detections: if detection.confidence > 0.8: @@ -164,13 +153,13 @@ def process(self, detections_input: dai.Buffer) -> None: if self.target_w is not None and self.target_h is not None: cfg.setOutputSize(self.target_w, self.target_h, self.resize_mode) - cfg.setReusePreviousImage(True) cfg.setTimestamp(timestamp) cfg.setSequenceNum(sequence_num) + configs_group[f"cfg_{len(valid_detections) - 1}"] = cfg - send_status = False - while not send_status: - send_status = self.config_output.trySend(cfg) + configs_group.setTimestamp(timestamp) + configs_group.setSequenceNum(sequence_num) + self.config_output.send(configs_group) valid_msg = dai.ImgDetections() valid_msg.setSequenceNum(sequence_num) @@ -180,6 +169,10 @@ def process(self, detections_input: dai.Buffer) -> None: self.detections_output.send(valid_msg) + def _validate_positive_integer(self, value: int) -> None: + if not isinstance(value, int) or value <= 0: + raise ValueError(f"Expected a positive integer, got {value!r}") + def _expand_rect(self, rect: dai.RotatedRect) -> dai.RotatedRect: s = rect.size diff --git a/neural-networks/pose-estimation/animal-pose/main.py b/neural-networks/pose-estimation/animal-pose/main.py index fa3e3583c..40f23b429 100644 --- a/neural-networks/pose-estimation/animal-pose/main.py +++ b/neural-networks/pose-estimation/animal-pose/main.py @@ -65,13 +65,17 @@ detections_filter.keepLabels(VALID_LABELS) # detection processing - pose_manip = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=detections_filter.out, - padding=PADDING, - ).build( - inputImage=detection_nn.passthrough, - outputSize=(pose_model_w, pose_model_h), - resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + pose_manip = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=detections_filter.out, + padding=PADDING, + ) + .build( + inputImage=detection_nn.passthrough, + outputSize=(pose_model_w, pose_model_h), + resizeMode=dai.ImageManipConfig.ResizeMode.STRETCH, + ) ) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py index eb5722e82..30e8cb082 100644 --- a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py @@ -45,9 +45,7 @@ def process(self, gathered_data: dai.Buffer) -> None: padding = self.padding for ix, detection in enumerate(detections_list): - detection.labelName = ( - "Animal" - ) + detection.labelName = "Animal" keypoints_message: Keypoints = gathered_data.gathered[ix] xmin, ymin, xmax, ymax = detection.getBoundingBox().getOuterRect() diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index 216e44b3c..3e6e5059f 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -1,7 +1,7 @@ from pathlib import Path import depthai as dai -from depthai_nodes.node import ParsingNeuralNetwork, GatherData +from depthai_nodes.node import ParsingNeuralNetwork, GatherData, FrameCropper from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -79,30 +79,19 @@ target_size=(pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight()), ) - script = pipeline.create(dai.node.Script) - script.setScriptPath(str(Path(__file__).parent / "utils/script.py")) - script.inputs["frame_input"].setMaxSize(30) - script.inputs["config_input"].setMaxSize(30) - script.inputs["num_configs_input"].setMaxSize(30) - - detection_nn.passthrough.link(script.inputs["frame_input"]) - detections_processor.config_output.link(script.inputs["config_input"]) - detections_processor.num_configs_output.link(script.inputs["num_configs_input"]) - - pose_manip = pipeline.create(dai.node.ImageManip) - pose_manip.initialConfig.setOutputSize( - pose_nn_archive.getInputWidth(), pose_nn_archive.getInputHeight() + # hand crop + pose estimation + crop_output_size = ( + pose_nn_archive.getInputWidth(), + pose_nn_archive.getInputHeight(), + ) + hand_crop_node = ( + pipeline.create(FrameCropper) + .fromManipConfigs(detections_processor.config_output) + .build(detection_nn.passthrough, crop_output_size) ) - pose_manip.inputConfig.setMaxSize(30) - pose_manip.inputImage.setMaxSize(30) - pose_manip.setNumFramesPool(30) - pose_manip.inputConfig.setWaitForMessage(True) - - script.outputs["output_config"].link(pose_manip.inputConfig) - script.outputs["output_frame"].link(pose_manip.inputImage) pose_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( - pose_manip.out, pose_nn_archive + hand_crop_node.out, pose_nn_archive ) # detections and pose estimations sync diff --git a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py index 6cc4c14ff..53e792518 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py @@ -76,11 +76,16 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non new_det = dai.ImgDetection() rotated_rect = detection.getBoundingBox() - new_det.setBoundingBox(dai.RotatedRect( - rotated_rect.center, - dai.Size2f(rotated_rect.size.width + 2 * padding, rotated_rect.size.height + 2 * padding), - rotated_rect.angle, - )) + new_det.setBoundingBox( + dai.RotatedRect( + rotated_rect.center, + dai.Size2f( + rotated_rect.size.width + 2 * padding, + rotated_rect.size.height + 2 * padding, + ), + rotated_rect.angle, + ) + ) new_det.label = 0 new_det.labelName = "Hand" new_det.confidence = detection.confidence diff --git a/neural-networks/pose-estimation/hand-pose/utils/process.py b/neural-networks/pose-estimation/hand-pose/utils/process.py index 0e8623e95..da06df3ba 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/process.py +++ b/neural-networks/pose-estimation/hand-pose/utils/process.py @@ -13,9 +13,7 @@ class ProcessDetections(dai.node.HostNode): detections_input : dai.Input The input message for the detections. config_output : dai.Output - The output message for the ImageManipConfig objects. - num_configs_output : dai.Output - The output message for the number of configs. + The output message for the ImageManipConfig objects packed in a MessageGroup. padding: float The padding factor to enlarge the bounding box a little bit. @@ -25,7 +23,6 @@ def __init__(self): super().__init__() self.detections_input = self.createInput() self.config_output = self.createOutput() - self.num_configs_output = self.createOutput() self.padding = 0.1 self._target_h = None self._target_w = None @@ -46,13 +43,7 @@ def process(self, img_detections: dai.Buffer) -> None: assert isinstance(img_detections, dai.ImgDetections) detections = img_detections.detections - num_detections = len(detections) - num_cfgs_message = dai.Buffer(num_detections) - - num_cfgs_message.setTimestamp(img_detections.getTimestamp()) - num_cfgs_message.setSequenceNum(img_detections.getSequenceNum()) - self.num_configs_output.send(num_cfgs_message) - + configs_group = dai.MessageGroup() for i, detection in enumerate(detections): cfg = dai.ImageManipConfig() detection: dai.ImgDetection = detection @@ -61,8 +52,8 @@ def process(self, img_detections: dai.Buffer) -> None: new_rect = dai.RotatedRect() new_rect.center.x = rect.center.x new_rect.center.y = rect.center.y - new_rect.size.width = rect.size.width + 0.1 * 2 - new_rect.size.height = rect.size.height + 0.1 * 2 + new_rect.size.width = rect.size.width + self.padding * 2 + new_rect.size.height = rect.size.height + self.padding * 2 new_rect.angle = 0 cfg.addCropRotatedRect(new_rect, normalizedCoords=True) @@ -74,4 +65,8 @@ def process(self, img_detections: dai.Buffer) -> None: cfg.setReusePreviousImage(False) cfg.setTimestamp(img_detections.getTimestamp()) cfg.setSequenceNum(img_detections.getSequenceNum()) - self.config_output.send(cfg) + configs_group[f"cfg_{i}"] = cfg + + configs_group.setTimestamp(img_detections.getTimestamp()) + configs_group.setSequenceNum(img_detections.getSequenceNum()) + self.config_output.send(configs_group) diff --git a/neural-networks/pose-estimation/human-pose/main.py b/neural-networks/pose-estimation/human-pose/main.py index c9566d0ea..3cf2d3d25 100644 --- a/neural-networks/pose-estimation/human-pose/main.py +++ b/neural-networks/pose-estimation/human-pose/main.py @@ -69,14 +69,23 @@ det_model_nn_archive.getConfig().model.heads[0].metadata.classes.index("person") ] detections_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) - detections_filter.keepLabels(valid_labels) # we only want to work with person detections - - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - padding=PADDING, - ).build( - inputImage=det_nn.passthrough, - outputSize=(rec_model_nn_archive.getInputWidth(), rec_model_nn_archive.getInputHeight()), + detections_filter.keepLabels( + valid_labels + ) # we only want to work with person detections + + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + padding=PADDING, + ) + .build( + inputImage=det_nn.passthrough, + outputSize=( + rec_model_nn_archive.getInputWidth(), + rec_model_nn_archive.getInputHeight(), + ), + ) ) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/neural-networks/reidentification/human-reidentification/main.py b/neural-networks/reidentification/human-reidentification/main.py index 6e94fdcbf..970dad4c4 100644 --- a/neural-networks/reidentification/human-reidentification/main.py +++ b/neural-networks/reidentification/human-reidentification/main.py @@ -90,11 +90,18 @@ resize_node.out, det_model_nn_archive ) - crop_node = pipeline.create(FrameCropper).fromImgDetections( - inputImgDetections=det_nn.out, - ).build( - inputImage=input_node_out, - outputSize=(rec_nn_archive.getInputWidth(), rec_nn_archive.getInputHeight()), + crop_node = ( + pipeline.create(FrameCropper) + .fromImgDetections( + inputImgDetections=det_nn.out, + ) + .build( + inputImage=input_node_out, + outputSize=( + rec_nn_archive.getInputWidth(), + rec_nn_archive.getInputHeight(), + ), + ) ) rec_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( diff --git a/streaming/webrtc-streaming/requirements.txt b/streaming/webrtc-streaming/requirements.txt index f8b01f680..942d4ae2b 100644 --- a/streaming/webrtc-streaming/requirements.txt +++ b/streaming/webrtc-streaming/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update aiortc==1.9.0 aiohttp>=3.10.0,<4.0 aiohttp-cors==0.7.0 From fd521e9490c47676a560d8334e309399744d6a4c Mon Sep 17 00:00:00 2001 From: bblazeva Date: Mon, 2 Mar 2026 13:22:08 +0100 Subject: [PATCH 08/14] hub-snaps-events --- integrations/hub-snaps-events/oakapp.toml | 11 ++++++++++- integrations/hub-snaps-events/requirements.txt | 4 ++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/integrations/hub-snaps-events/oakapp.toml b/integrations/hub-snaps-events/oakapp.toml index 8ac09306a..06416fdb7 100644 --- a/integrations/hub-snaps-events/oakapp.toml +++ b/integrations/hub-snaps-events/oakapp.toml @@ -14,4 +14,13 @@ build_steps = [] depthai_models = { yaml_path = "./depthai_models" } -entrypoint = ["bash", "-c", "python3 -u /app/main.py"] +entrypoint = ["bash", "-c", "python3 -u /app/main.py --api_key "] + +[base_image] +api_url = "https://registry-1.docker.io" +service = "registry.docker.io" +oauth_url = "https://auth.docker.io/token" +auth_type = "repository" +auth_name = "luxonis/oakapp-base" +image_name = "luxonis/oakapp-base" +image_tag = "1.2.6" \ No newline at end of file diff --git a/integrations/hub-snaps-events/requirements.txt b/integrations/hub-snaps-events/requirements.txt index bd5223a56..9ed3cb17b 100644 --- a/integrations/hub-snaps-events/requirements.txt +++ b/integrations/hub-snaps-events/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.2.1 -depthai-nodes==0.3.7 +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update python-dotenv From 6347c5885fdb9e1fdc18d489234b55a1a0389c1b Mon Sep 17 00:00:00 2001 From: bblazeva Date: Mon, 2 Mar 2026 15:56:08 +0100 Subject: [PATCH 09/14] python >= 3.10 --- apps/conference-demos/rgb-depth-connections/README.md | 2 +- apps/default-app/README.md | 2 +- camera-controls/depth-driven-focus/README.md | 2 +- camera-controls/lossless-zooming/README.md | 2 +- camera-controls/manual-camera-control/README.md | 2 +- custom-frontend/raw-stream/README.md | 2 +- depth-measurement/3d-measurement/box-measurement/README.md | 2 +- depth-measurement/3d-measurement/rgbd-pointcloud/README.md | 2 +- depth-measurement/3d-measurement/tof-pointcloud/README.md | 2 +- depth-measurement/calc-spatial-on-host/README.md | 2 +- depth-measurement/dynamic-calibration/README.md | 2 +- depth-measurement/stereo-on-host/README.md | 2 +- depth-measurement/stereo-runtime-configuration/README.md | 2 +- depth-measurement/triangulation/README.md | 2 +- depth-measurement/wls-filter/README.md | 2 +- integrations/foxglove/README.md | 2 +- integrations/hub-snaps-events/README.md | 2 +- integrations/rerun/README.md | 2 +- integrations/roboflow-dataset/README.md | 2 +- neural-networks/3D-detection/objectron/README.md | 2 +- neural-networks/counting/crowdcounting/README.md | 2 +- neural-networks/counting/cumulative-object-counting/README.md | 2 +- neural-networks/counting/depth-people-counting/README.md | 2 +- neural-networks/counting/people-counter/README.md | 2 +- .../depth-estimation/crestereo-stereo-matching/README.md | 2 +- neural-networks/depth-estimation/neural-depth/README.md | 2 +- .../depth-estimation/neural-depth/host_eval/README.md | 2 +- neural-networks/face-detection/age-gender/README.md | 2 +- neural-networks/face-detection/blur-faces/README.md | 2 +- neural-networks/face-detection/emotion-recognition/README.md | 2 +- neural-networks/face-detection/face-mask-detection/README.md | 2 +- neural-networks/face-detection/fatigue-detection/README.md | 2 +- neural-networks/face-detection/gaze-estimation/README.md | 2 +- neural-networks/face-detection/head-posture-detection/README.md | 2 +- neural-networks/feature-detection/xfeat/README.md | 2 +- neural-networks/generic-example/README.md | 2 +- .../object-detection/barcode-detection-conveyor-belt/README.md | 2 +- neural-networks/object-detection/human-machine-safety/README.md | 2 +- neural-networks/object-detection/social-distancing/README.md | 2 +- neural-networks/object-detection/spatial-detections/README.md | 2 +- neural-networks/object-detection/text-blur/README.md | 2 +- neural-networks/object-detection/thermal-detection/README.md | 2 +- neural-networks/object-detection/yolo-host-decoding/README.md | 2 +- neural-networks/object-detection/yolo-p/README.md | 2 +- neural-networks/object-detection/yolo-world/README.md | 2 +- neural-networks/object-tracking/collision-avoidance/README.md | 2 +- neural-networks/object-tracking/deepsort-tracking/README.md | 2 +- neural-networks/object-tracking/kalman/README.md | 2 +- neural-networks/object-tracking/people-tracker/README.md | 2 +- neural-networks/ocr/general-ocr/README.md | 2 +- neural-networks/ocr/license-plate-recognition/README.md | 2 +- neural-networks/pose-estimation/animal-pose/README.md | 2 +- neural-networks/pose-estimation/hand-pose/README.md | 2 +- neural-networks/pose-estimation/human-pose/README.md | 2 +- .../reidentification/human-reidentification/README.md | 2 +- neural-networks/segmentation/blur-background/README.md | 2 +- neural-networks/segmentation/depth-crop/README.md | 2 +- neural-networks/speech-recognition/whisper-tiny-en/README.md | 2 +- streaming/mjpeg-streaming/README.md | 2 +- streaming/on-device-encoding/README.md | 2 +- streaming/poe-mqtt/README.md | 2 +- streaming/poe-tcp-streaming/README.md | 2 +- streaming/rtsp-streaming/README.md | 2 +- streaming/webrtc-streaming/README.md | 2 +- tutorials/camera-demo/README.md | 2 +- tutorials/camera-stereo-depth/README.md | 2 +- tutorials/custom-models/README.md | 2 +- tutorials/display-detections/README.md | 2 +- tutorials/full-fov-nn/README.md | 2 +- tutorials/multiple-devices/multi-cam-calibration/README.md | 2 +- tutorials/multiple-devices/multiple-device-stitch-nn/README.md | 2 +- tutorials/multiple-devices/multiple-devices-preview/README.md | 2 +- tutorials/multiple-devices/spatial-detection-fusion/README.md | 2 +- tutorials/play-encoded-stream/README.md | 2 +- tutorials/qr-with-tiling/README.md | 2 +- 75 files changed, 75 insertions(+), 75 deletions(-) diff --git a/apps/conference-demos/rgb-depth-connections/README.md b/apps/conference-demos/rgb-depth-connections/README.md index 887a47581..39b06b70f 100644 --- a/apps/conference-demos/rgb-depth-connections/README.md +++ b/apps/conference-demos/rgb-depth-connections/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/apps/default-app/README.md b/apps/default-app/README.md index 4cfc2d426..7d479a21d 100644 --- a/apps/default-app/README.md +++ b/apps/default-app/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/depth-driven-focus/README.md b/camera-controls/depth-driven-focus/README.md index 88bd1efa9..3206ed577 100644 --- a/camera-controls/depth-driven-focus/README.md +++ b/camera-controls/depth-driven-focus/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/lossless-zooming/README.md b/camera-controls/lossless-zooming/README.md index cd8d72f29..87d2c376d 100644 --- a/camera-controls/lossless-zooming/README.md +++ b/camera-controls/lossless-zooming/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/camera-controls/manual-camera-control/README.md b/camera-controls/manual-camera-control/README.md index eb43bbcf3..aa0919898 100644 --- a/camera-controls/manual-camera-control/README.md +++ b/camera-controls/manual-camera-control/README.md @@ -56,7 +56,7 @@ The following controls can be selected and modified with `+` and `-` keys: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/custom-frontend/raw-stream/README.md b/custom-frontend/raw-stream/README.md index 78226d6f6..cdf50b7ed 100644 --- a/custom-frontend/raw-stream/README.md +++ b/custom-frontend/raw-stream/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: #### BackEnd -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/3d-measurement/box-measurement/README.md b/depth-measurement/3d-measurement/box-measurement/README.md index db9cac3ee..314cc9600 100644 --- a/depth-measurement/3d-measurement/box-measurement/README.md +++ b/depth-measurement/3d-measurement/box-measurement/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/3d-measurement/rgbd-pointcloud/README.md b/depth-measurement/3d-measurement/rgbd-pointcloud/README.md index 0c712fa4c..a48c33dda 100644 --- a/depth-measurement/3d-measurement/rgbd-pointcloud/README.md +++ b/depth-measurement/3d-measurement/rgbd-pointcloud/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/depth-measurement/3d-measurement/tof-pointcloud/README.md b/depth-measurement/3d-measurement/tof-pointcloud/README.md index 135eaffab..ac56ad292 100644 --- a/depth-measurement/3d-measurement/tof-pointcloud/README.md +++ b/depth-measurement/3d-measurement/tof-pointcloud/README.md @@ -16,7 +16,7 @@ Running this example requires a **Luxonis device** connected to your computer. R ### Installation -You need to first prepare a **Python 3.10** environment (python versions 3.8 - 3.13 should work too) with the following packages installed: +You need to first prepare a **Python >= 3.10** environment (python versions 3.8 - 3.13 should work too) with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [Open3D](https://pypi.org/project/open3d/) diff --git a/depth-measurement/calc-spatial-on-host/README.md b/depth-measurement/calc-spatial-on-host/README.md index 5bf5bded8..df157e958 100644 --- a/depth-measurement/calc-spatial-on-host/README.md +++ b/depth-measurement/calc-spatial-on-host/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/dynamic-calibration/README.md b/depth-measurement/dynamic-calibration/README.md index 2b7e6ba66..678996fe1 100644 --- a/depth-measurement/dynamic-calibration/README.md +++ b/depth-measurement/dynamic-calibration/README.md @@ -87,7 +87,7 @@ Use these keys while the app is running (focus the browser visualizer window): ## Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/stereo-on-host/README.md b/depth-measurement/stereo-on-host/README.md index b0a243434..e5183581a 100644 --- a/depth-measurement/stereo-on-host/README.md +++ b/depth-measurement/stereo-on-host/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/stereo-runtime-configuration/README.md b/depth-measurement/stereo-runtime-configuration/README.md index 75e868efe..811f2793c 100644 --- a/depth-measurement/stereo-runtime-configuration/README.md +++ b/depth-measurement/stereo-runtime-configuration/README.md @@ -34,7 +34,7 @@ To change the stereo depth settings, use the following keys: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/triangulation/README.md b/depth-measurement/triangulation/README.md index e782a34d1..34559cd9c 100644 --- a/depth-measurement/triangulation/README.md +++ b/depth-measurement/triangulation/README.md @@ -23,7 +23,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/depth-measurement/wls-filter/README.md b/depth-measurement/wls-filter/README.md index e980a66eb..3f3788eee 100644 --- a/depth-measurement/wls-filter/README.md +++ b/depth-measurement/wls-filter/README.md @@ -36,7 +36,7 @@ Use the following keyboard controls in the visualizer to adjust WLS filtering pa ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/foxglove/README.md b/integrations/foxglove/README.md index 4680a0205..b2e10e217 100644 --- a/integrations/foxglove/README.md +++ b/integrations/foxglove/README.md @@ -32,7 +32,7 @@ To see the streams, open [Foxglove Studio](https://app.foxglove.dev/), choose `O ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/hub-snaps-events/README.md b/integrations/hub-snaps-events/README.md index dcf3f721c..31cf26f60 100644 --- a/integrations/hub-snaps-events/README.md +++ b/integrations/hub-snaps-events/README.md @@ -37,7 +37,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/integrations/rerun/README.md b/integrations/rerun/README.md index 8a9e77496..68519b96c 100644 --- a/integrations/rerun/README.md +++ b/integrations/rerun/README.md @@ -32,7 +32,7 @@ By default, the example will open local Rerun Viewer on the device. You can also ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/integrations/roboflow-dataset/README.md b/integrations/roboflow-dataset/README.md index 143ac21c9..12b219020 100644 --- a/integrations/roboflow-dataset/README.md +++ b/integrations/roboflow-dataset/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/3D-detection/objectron/README.md b/neural-networks/3D-detection/objectron/README.md index 443952b3c..b8a5532a4 100644 --- a/neural-networks/3D-detection/objectron/README.md +++ b/neural-networks/3D-detection/objectron/README.md @@ -30,7 +30,7 @@ Camera and shoes can not be detected with general YOLOv6 detector. So, you need ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/crowdcounting/README.md b/neural-networks/counting/crowdcounting/README.md index eee7f6fe7..dddca5da6 100644 --- a/neural-networks/counting/crowdcounting/README.md +++ b/neural-networks/counting/crowdcounting/README.md @@ -32,7 +32,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/cumulative-object-counting/README.md b/neural-networks/counting/cumulative-object-counting/README.md index 3212ab39a..51289e932 100644 --- a/neural-networks/counting/cumulative-object-counting/README.md +++ b/neural-networks/counting/cumulative-object-counting/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/depth-people-counting/README.md b/neural-networks/counting/depth-people-counting/README.md index 7f0e6e9cd..34e3ea97b 100644 --- a/neural-networks/counting/depth-people-counting/README.md +++ b/neural-networks/counting/depth-people-counting/README.md @@ -65,7 +65,7 @@ To use the recording with the example: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/counting/people-counter/README.md b/neural-networks/counting/people-counter/README.md index 1e8318c32..af023b958 100644 --- a/neural-networks/counting/people-counter/README.md +++ b/neural-networks/counting/people-counter/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/crestereo-stereo-matching/README.md b/neural-networks/depth-estimation/crestereo-stereo-matching/README.md index cd076e99c..7da7632d2 100644 --- a/neural-networks/depth-estimation/crestereo-stereo-matching/README.md +++ b/neural-networks/depth-estimation/crestereo-stereo-matching/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/neural-depth/README.md b/neural-networks/depth-estimation/neural-depth/README.md index b051c6952..7a39f9b4b 100644 --- a/neural-networks/depth-estimation/neural-depth/README.md +++ b/neural-networks/depth-estimation/neural-depth/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/depth-estimation/neural-depth/host_eval/README.md b/neural-networks/depth-estimation/neural-depth/host_eval/README.md index 8db9c1eb6..a9226ea76 100644 --- a/neural-networks/depth-estimation/neural-depth/host_eval/README.md +++ b/neural-networks/depth-estimation/neural-depth/host_eval/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/age-gender/README.md b/neural-networks/face-detection/age-gender/README.md index ddce6f240..4a5cb25be 100644 --- a/neural-networks/face-detection/age-gender/README.md +++ b/neural-networks/face-detection/age-gender/README.md @@ -28,7 +28,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/blur-faces/README.md b/neural-networks/face-detection/blur-faces/README.md index 0102b211f..fc094adaa 100644 --- a/neural-networks/face-detection/blur-faces/README.md +++ b/neural-networks/face-detection/blur-faces/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/emotion-recognition/README.md b/neural-networks/face-detection/emotion-recognition/README.md index 1ef62e048..ba9b00531 100644 --- a/neural-networks/face-detection/emotion-recognition/README.md +++ b/neural-networks/face-detection/emotion-recognition/README.md @@ -28,7 +28,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/face-mask-detection/README.md b/neural-networks/face-detection/face-mask-detection/README.md index b28abaff2..d6c2b15b0 100644 --- a/neural-networks/face-detection/face-mask-detection/README.md +++ b/neural-networks/face-detection/face-mask-detection/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/fatigue-detection/README.md b/neural-networks/face-detection/fatigue-detection/README.md index 0d1900b84..661fbad5d 100644 --- a/neural-networks/face-detection/fatigue-detection/README.md +++ b/neural-networks/face-detection/fatigue-detection/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/gaze-estimation/README.md b/neural-networks/face-detection/gaze-estimation/README.md index 345a76b70..3ed6976b0 100644 --- a/neural-networks/face-detection/gaze-estimation/README.md +++ b/neural-networks/face-detection/gaze-estimation/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/face-detection/head-posture-detection/README.md b/neural-networks/face-detection/head-posture-detection/README.md index 08389af1e..b2ea6fc0a 100644 --- a/neural-networks/face-detection/head-posture-detection/README.md +++ b/neural-networks/face-detection/head-posture-detection/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/feature-detection/xfeat/README.md b/neural-networks/feature-detection/xfeat/README.md index 809d7e464..06faa9a81 100644 --- a/neural-networks/feature-detection/xfeat/README.md +++ b/neural-networks/feature-detection/xfeat/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/generic-example/README.md b/neural-networks/generic-example/README.md index ad0ec4a87..2e3e761a8 100644 --- a/neural-networks/generic-example/README.md +++ b/neural-networks/generic-example/README.md @@ -30,7 +30,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md b/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md index 2c2001b7e..d4b490595 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/README.md @@ -59,7 +59,7 @@ sudo apt-get update && apt-get install -y libzbar0 libzbar-dev brew install zbar ``` -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/human-machine-safety/README.md b/neural-networks/object-detection/human-machine-safety/README.md index 48e0c27c6..e7c12fa0e 100644 --- a/neural-networks/object-detection/human-machine-safety/README.md +++ b/neural-networks/object-detection/human-machine-safety/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/social-distancing/README.md b/neural-networks/object-detection/social-distancing/README.md index 0774666a7..115bb885b 100644 --- a/neural-networks/object-detection/social-distancing/README.md +++ b/neural-networks/object-detection/social-distancing/README.md @@ -29,7 +29,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/spatial-detections/README.md b/neural-networks/object-detection/spatial-detections/README.md index ce228ec7e..64db5d4c5 100644 --- a/neural-networks/object-detection/spatial-detections/README.md +++ b/neural-networks/object-detection/spatial-detections/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/text-blur/README.md b/neural-networks/object-detection/text-blur/README.md index 4015282c2..c8da17f10 100644 --- a/neural-networks/object-detection/text-blur/README.md +++ b/neural-networks/object-detection/text-blur/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/thermal-detection/README.md b/neural-networks/object-detection/thermal-detection/README.md index b1e71e9ed..1b1e8e386 100644 --- a/neural-networks/object-detection/thermal-detection/README.md +++ b/neural-networks/object-detection/thermal-detection/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-host-decoding/README.md b/neural-networks/object-detection/yolo-host-decoding/README.md index b11cde72d..011d21399 100644 --- a/neural-networks/object-detection/yolo-host-decoding/README.md +++ b/neural-networks/object-detection/yolo-host-decoding/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-p/README.md b/neural-networks/object-detection/yolo-p/README.md index 2cfe16cdd..b406ab27c 100644 --- a/neural-networks/object-detection/yolo-p/README.md +++ b/neural-networks/object-detection/yolo-p/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-detection/yolo-world/README.md b/neural-networks/object-detection/yolo-world/README.md index e2a506cff..cd030feec 100644 --- a/neural-networks/object-detection/yolo-world/README.md +++ b/neural-networks/object-detection/yolo-world/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/collision-avoidance/README.md b/neural-networks/object-tracking/collision-avoidance/README.md index d7c3c7e8b..de20d9ee7 100644 --- a/neural-networks/object-tracking/collision-avoidance/README.md +++ b/neural-networks/object-tracking/collision-avoidance/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/deepsort-tracking/README.md b/neural-networks/object-tracking/deepsort-tracking/README.md index fe3bf7a61..9e6b3ca28 100644 --- a/neural-networks/object-tracking/deepsort-tracking/README.md +++ b/neural-networks/object-tracking/deepsort-tracking/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/kalman/README.md b/neural-networks/object-tracking/kalman/README.md index 1bc816326..652f560b8 100644 --- a/neural-networks/object-tracking/kalman/README.md +++ b/neural-networks/object-tracking/kalman/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/object-tracking/people-tracker/README.md b/neural-networks/object-tracking/people-tracker/README.md index d66c3ff29..cdd5000b4 100644 --- a/neural-networks/object-tracking/people-tracker/README.md +++ b/neural-networks/object-tracking/people-tracker/README.md @@ -34,7 +34,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/ocr/general-ocr/README.md b/neural-networks/ocr/general-ocr/README.md index 86e4dbd4d..4fc68c3bd 100644 --- a/neural-networks/ocr/general-ocr/README.md +++ b/neural-networks/ocr/general-ocr/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/ocr/license-plate-recognition/README.md b/neural-networks/ocr/license-plate-recognition/README.md index b3208f4c3..b4033b969 100644 --- a/neural-networks/ocr/license-plate-recognition/README.md +++ b/neural-networks/ocr/license-plate-recognition/README.md @@ -38,7 +38,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/animal-pose/README.md b/neural-networks/pose-estimation/animal-pose/README.md index fc728069d..0bd40421e 100644 --- a/neural-networks/pose-estimation/animal-pose/README.md +++ b/neural-networks/pose-estimation/animal-pose/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/hand-pose/README.md b/neural-networks/pose-estimation/hand-pose/README.md index 7771a6e80..42800d56a 100644 --- a/neural-networks/pose-estimation/hand-pose/README.md +++ b/neural-networks/pose-estimation/hand-pose/README.md @@ -31,7 +31,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/pose-estimation/human-pose/README.md b/neural-networks/pose-estimation/human-pose/README.md index 1591f0024..5ea6876df 100644 --- a/neural-networks/pose-estimation/human-pose/README.md +++ b/neural-networks/pose-estimation/human-pose/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/reidentification/human-reidentification/README.md b/neural-networks/reidentification/human-reidentification/README.md index 744d93b6d..aae0d7b96 100644 --- a/neural-networks/reidentification/human-reidentification/README.md +++ b/neural-networks/reidentification/human-reidentification/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/segmentation/blur-background/README.md b/neural-networks/segmentation/blur-background/README.md index 9c04f5df7..eb72b8c3d 100644 --- a/neural-networks/segmentation/blur-background/README.md +++ b/neural-networks/segmentation/blur-background/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/segmentation/depth-crop/README.md b/neural-networks/segmentation/depth-crop/README.md index 0d8923087..55e183567 100644 --- a/neural-networks/segmentation/depth-crop/README.md +++ b/neural-networks/segmentation/depth-crop/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/neural-networks/speech-recognition/whisper-tiny-en/README.md b/neural-networks/speech-recognition/whisper-tiny-en/README.md index a6f01ae9d..70d4f9d27 100644 --- a/neural-networks/speech-recognition/whisper-tiny-en/README.md +++ b/neural-networks/speech-recognition/whisper-tiny-en/README.md @@ -52,7 +52,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/mjpeg-streaming/README.md b/streaming/mjpeg-streaming/README.md index 70132e804..f06a5cbc5 100644 --- a/streaming/mjpeg-streaming/README.md +++ b/streaming/mjpeg-streaming/README.md @@ -27,7 +27,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/on-device-encoding/README.md b/streaming/on-device-encoding/README.md index e1c85a7cb..bb25338c6 100644 --- a/streaming/on-device-encoding/README.md +++ b/streaming/on-device-encoding/README.md @@ -33,7 +33,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/poe-mqtt/README.md b/streaming/poe-mqtt/README.md index e41fc969e..f68929440 100644 --- a/streaming/poe-mqtt/README.md +++ b/streaming/poe-mqtt/README.md @@ -42,7 +42,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/streaming/poe-tcp-streaming/README.md b/streaming/poe-tcp-streaming/README.md index f4b1b504f..23cc4baf3 100644 --- a/streaming/poe-tcp-streaming/README.md +++ b/streaming/poe-tcp-streaming/README.md @@ -56,7 +56,7 @@ positional arguments: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/rtsp-streaming/README.md b/streaming/rtsp-streaming/README.md index baea63247..ea585b432 100644 --- a/streaming/rtsp-streaming/README.md +++ b/streaming/rtsp-streaming/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/streaming/webrtc-streaming/README.md b/streaming/webrtc-streaming/README.md index 9bf74cdc3..67bb16d44 100644 --- a/streaming/webrtc-streaming/README.md +++ b/streaming/webrtc-streaming/README.md @@ -16,7 +16,7 @@ You can run the example fully on device ([`STANDALONE` mode](#standalone-mode-rv ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/camera-demo/README.md b/tutorials/camera-demo/README.md index 0f46f9d93..a26f30721 100644 --- a/tutorials/camera-demo/README.md +++ b/tutorials/camera-demo/README.md @@ -26,7 +26,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/camera-stereo-depth/README.md b/tutorials/camera-stereo-depth/README.md index d29bf3b73..377029808 100644 --- a/tutorials/camera-stereo-depth/README.md +++ b/tutorials/camera-stereo-depth/README.md @@ -25,7 +25,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/custom-models/README.md b/tutorials/custom-models/README.md index 75ed60804..90b2ee8d4 100644 --- a/tutorials/custom-models/README.md +++ b/tutorials/custom-models/README.md @@ -36,7 +36,7 @@ For more information see [README.md](generate_model/README.md) file in the `gene ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/display-detections/README.md b/tutorials/display-detections/README.md index 965157d2b..d3ce6c4f8 100644 --- a/tutorials/display-detections/README.md +++ b/tutorials/display-detections/README.md @@ -41,7 +41,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/full-fov-nn/README.md b/tutorials/full-fov-nn/README.md index 9d620e6da..6bed76b0e 100644 --- a/tutorials/full-fov-nn/README.md +++ b/tutorials/full-fov-nn/README.md @@ -60,7 +60,7 @@ These scripts run only in the corresponding mode, which cannot be toggled during ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/multiple-devices/multi-cam-calibration/README.md b/tutorials/multiple-devices/multi-cam-calibration/README.md index 70bbcb5b1..b5fb64831 100644 --- a/tutorials/multiple-devices/multi-cam-calibration/README.md +++ b/tutorials/multiple-devices/multi-cam-calibration/README.md @@ -86,7 +86,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/multiple-devices/multiple-device-stitch-nn/README.md b/tutorials/multiple-devices/multiple-device-stitch-nn/README.md index 1c967ab39..e0a2d0165 100644 --- a/tutorials/multiple-devices/multiple-device-stitch-nn/README.md +++ b/tutorials/multiple-devices/multiple-device-stitch-nn/README.md @@ -39,7 +39,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). diff --git a/tutorials/multiple-devices/multiple-devices-preview/README.md b/tutorials/multiple-devices/multiple-devices-preview/README.md index e1878eccf..fd1deabce 100644 --- a/tutorials/multiple-devices/multiple-devices-preview/README.md +++ b/tutorials/multiple-devices/multiple-devices-preview/README.md @@ -53,7 +53,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/) diff --git a/tutorials/multiple-devices/spatial-detection-fusion/README.md b/tutorials/multiple-devices/spatial-detection-fusion/README.md index 4df79ae49..12091d7a3 100644 --- a/tutorials/multiple-devices/spatial-detection-fusion/README.md +++ b/tutorials/multiple-devices/spatial-detection-fusion/README.md @@ -52,7 +52,7 @@ Here is a list of all available parameters: Running in peripheral mode requires a host computer and there will be communication between device and host which could affect the overall speed of the app. You can find more information about the supported devices and the set up instructions in our [Documentation](https://rvc4.docs.luxonis.com/hardware). -Moreover, you need to prepare a **Python 3.10** environment with the following packages installed: +Moreover, you need to prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/play-encoded-stream/README.md b/tutorials/play-encoded-stream/README.md index 223eb86bb..1543dad7d 100644 --- a/tutorials/play-encoded-stream/README.md +++ b/tutorials/play-encoded-stream/README.md @@ -37,7 +37,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/) diff --git a/tutorials/qr-with-tiling/README.md b/tutorials/qr-with-tiling/README.md index dbb450168..a26a8e664 100644 --- a/tutorials/qr-with-tiling/README.md +++ b/tutorials/qr-with-tiling/README.md @@ -35,7 +35,7 @@ Here is a list of all available parameters: ### Installation -You need to first prepare a **Python 3.10** environment with the following packages installed: +You need to first prepare a **Python >= 3.10** environment with the following packages installed: - [DepthAI](https://pypi.org/project/depthai/), - [DepthAI Nodes](https://pypi.org/project/depthai-nodes/). From 0b8d31b328b5db8fa90abe3008f379df4d3ea163 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Tue, 3 Mar 2026 12:39:24 +0100 Subject: [PATCH 10/14] update requirements --- camera-controls/depth-driven-focus/requirements.txt | 4 ++-- camera-controls/manual-camera-control/requirements.txt | 2 +- custom-frontend/raw-stream/requirements.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/camera-controls/depth-driven-focus/requirements.txt b/camera-controls/depth-driven-focus/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/camera-controls/depth-driven-focus/requirements.txt +++ b/camera-controls/depth-driven-focus/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/camera-controls/manual-camera-control/requirements.txt b/camera-controls/manual-camera-control/requirements.txt index 04985657c..a5175aba8 100644 --- a/camera-controls/manual-camera-control/requirements.txt +++ b/camera-controls/manual-camera-control/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.3.0 opencv-python-headless~=4.10.0 \ No newline at end of file diff --git a/custom-frontend/raw-stream/requirements.txt b/custom-frontend/raw-stream/requirements.txt index 877bcf059..ab5ac5c32 100644 --- a/custom-frontend/raw-stream/requirements.txt +++ b/custom-frontend/raw-stream/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.3.0 numpy>=1.22 \ No newline at end of file From 0afe071529bcddd7bbe151f99bcfd5f2b16adf0c Mon Sep 17 00:00:00 2001 From: bblazeva Date: Wed, 4 Mar 2026 15:56:15 +0100 Subject: [PATCH 11/14] update requirements + ApplyColormap -> ApplyDepthColormap --- .../3d-measurement/rgbd-pointcloud/requirements.txt | 2 +- depth-measurement/dynamic-calibration/main.py | 5 ++--- depth-measurement/dynamic-calibration/requirements.txt | 4 ++-- depth-measurement/stereo-on-host/main.py | 5 ++--- depth-measurement/stereo-on-host/requirements.txt | 4 ++-- depth-measurement/triangulation/requirements.txt | 2 +- depth-measurement/wls-filter/main.py | 8 +++----- depth-measurement/wls-filter/requirements.txt | 4 ++-- integrations/roboflow-dataset/requirements.txt | 4 ++-- neural-networks/counting/crowdcounting/requirements.txt | 4 ++-- .../counting/cumulative-object-counting/requirements.txt | 4 ++-- neural-networks/counting/depth-people-counting/main.py | 6 +++--- .../counting/depth-people-counting/requirements.txt | 4 ++-- .../depth-estimation/foundation-stereo/main.py | 4 ++-- .../depth-estimation/foundation-stereo/requirements.txt | 4 ++-- .../neural-depth/host_eval/requirements.txt | 2 +- .../depth-estimation/neural-depth/requirements.txt | 4 ++-- neural-networks/feature-detection/xfeat/requirements.txt | 4 ++-- neural-networks/generic-example/requirements.txt | 4 ++-- .../object-detection/social-distancing/requirements.txt | 4 ++-- .../object-detection/spatial-detections/main.py | 4 ++-- .../object-detection/spatial-detections/requirements.txt | 4 ++-- .../object-detection/thermal-detection/requirements.txt | 4 ++-- .../object-detection/yolo-host-decoding/requirements.txt | 2 +- .../object-tracking/collision-avoidance/main.py | 2 +- .../object-tracking/collision-avoidance/requirements.txt | 4 ++-- neural-networks/object-tracking/kalman/requirements.txt | 4 ++-- .../object-tracking/people-tracker/requirements.txt | 4 ++-- .../ocr/license-plate-recognition/requirements.txt | 4 ++-- .../segmentation/blur-background/requirements.txt | 4 ++-- neural-networks/segmentation/depth-crop/requirements.txt | 4 ++-- .../speech-recognition/whisper-tiny-en/requirements.txt | 4 ++-- streaming/mjpeg-streaming/requirements.txt | 4 ++-- streaming/on-device-encoding/requirements.txt | 2 +- streaming/poe-mqtt/requirements.txt | 4 ++-- streaming/poe-tcp-streaming/requirements.txt | 2 +- streaming/rtsp-streaming/requirements.txt | 2 +- tutorials/camera-demo/requirements.txt | 2 +- tutorials/custom-models/generate_model/requirements.txt | 2 +- tutorials/custom-models/requirements.txt | 4 ++-- tutorials/display-detections/requirements.txt | 4 ++-- tutorials/full-fov-nn/requirements.txt | 4 ++-- .../multi-cam-calibration/requirements.txt | 4 ++-- .../multiple-device-stitch-nn/requirements.txt | 4 ++-- .../multiple-devices-preview/requirements.txt | 4 ++-- .../spatial-detection-fusion/requirements.txt | 4 ++-- tutorials/play-encoded-stream/requirements.txt | 2 +- tutorials/qr-with-tiling/requirements.txt | 4 ++-- 48 files changed, 87 insertions(+), 91 deletions(-) diff --git a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt index 877bcf059..ab5ac5c32 100644 --- a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt +++ b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.3.0 numpy>=1.22 \ No newline at end of file diff --git a/depth-measurement/dynamic-calibration/main.py b/depth-measurement/dynamic-calibration/main.py index 92e49943e..b4e55f6fe 100644 --- a/depth-measurement/dynamic-calibration/main.py +++ b/depth-measurement/dynamic-calibration/main.py @@ -1,6 +1,6 @@ import cv2 -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap import depthai as dai from utils.dynamic_controler import DynamicCalibrationControler @@ -35,8 +35,7 @@ right_out.link(dyn_calib.right) # Output queues - depth_parser = pipeline.create(ApplyColormap).build(stereo.disparity) - # depth_parser.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) # NOTE: Uncomment when DAI fixes a bug + depth_parser = pipeline.create(ApplyDepthColormap).build(stereo.disparity) depth_parser.setColormap(cv2.COLORMAP_JET) calibration = device.readCalibration() diff --git a/depth-measurement/dynamic-calibration/requirements.txt b/depth-measurement/dynamic-calibration/requirements.txt index c7a442e6f..f63997c07 100644 --- a/depth-measurement/dynamic-calibration/requirements.txt +++ b/depth-measurement/dynamic-calibration/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.2.1 -depthai-nodes==0.3.6 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python==4.10.0.84 opencv-contrib-python==4.10.0.84 diff --git a/depth-measurement/stereo-on-host/main.py b/depth-measurement/stereo-on-host/main.py index e6c7a2e9a..38309bba5 100644 --- a/depth-measurement/stereo-on-host/main.py +++ b/depth-measurement/stereo-on-host/main.py @@ -3,7 +3,7 @@ from utils.arguments import initialize_argparser from utils.host_stereo_sgbm import StereoSGBM from utils.host_ssim import SSIM -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap import cv2 RESOLUTION_SIZE = (640, 400) @@ -51,8 +51,7 @@ stereo.setExtendedDisparity(False) stereo.setSubpixel(True) - depth_parser = pipeline.create(ApplyColormap).build(stereo.disparity) - depth_parser.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) + depth_parser = pipeline.create(ApplyDepthColormap).build(stereo.disparity) depth_parser.setColormap(cv2.COLORMAP_JET) ssim = pipeline.create(SSIM).build( diff --git a/depth-measurement/stereo-on-host/requirements.txt b/depth-measurement/stereo-on-host/requirements.txt index 05c5898ec..fb9fadfe1 100644 --- a/depth-measurement/stereo-on-host/requirements.txt +++ b/depth-measurement/stereo-on-host/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 scikit-image numpy>=1.22 diff --git a/depth-measurement/triangulation/requirements.txt b/depth-measurement/triangulation/requirements.txt index c93725a89..657eada6e 100644 --- a/depth-measurement/triangulation/requirements.txt +++ b/depth-measurement/triangulation/requirements.txt @@ -1,4 +1,4 @@ depthai==3.3.0 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/depth-measurement/wls-filter/main.py b/depth-measurement/wls-filter/main.py index 5610d9e23..a0d081091 100755 --- a/depth-measurement/wls-filter/main.py +++ b/depth-measurement/wls-filter/main.py @@ -1,7 +1,7 @@ import cv2 import depthai as dai from utils.host_wls_filter import WLSFilter -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser _, args = initialize_argparser() @@ -39,14 +39,12 @@ baseline=baseline, ) - disp_colored = pipeline.create(ApplyColormap).build(stereo.disparity) - disp_colored.setMaxValue(int(stereo.initialConfig.getMaxDisparity())) + disp_colored = pipeline.create(ApplyDepthColormap).build(stereo.disparity) disp_colored.setColormap(cv2.COLORMAP_JET) - filtered_disp_colored = pipeline.create(ApplyColormap).build( + filtered_disp_colored = pipeline.create(ApplyDepthColormap).build( wls_filter.filtered_disp ) - filtered_disp_colored.setMaxValue(255) filtered_disp_colored.setColormap(cv2.COLORMAP_JET) visualizer.addTopic("Rectified Right", stereo.rectifiedRight) diff --git a/depth-measurement/wls-filter/requirements.txt b/depth-measurement/wls-filter/requirements.txt index 7552cb7ce..680926232 100644 --- a/depth-measurement/wls-filter/requirements.txt +++ b/depth-measurement/wls-filter/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-contrib-python==4.10.0.84 numpy>=1.22 diff --git a/integrations/roboflow-dataset/requirements.txt b/integrations/roboflow-dataset/requirements.txt index b6d95df4c..21663e495 100644 --- a/integrations/roboflow-dataset/requirements.txt +++ b/integrations/roboflow-dataset/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 roboflow==1.1.36 diff --git a/neural-networks/counting/crowdcounting/requirements.txt b/neural-networks/counting/crowdcounting/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/counting/crowdcounting/requirements.txt +++ b/neural-networks/counting/crowdcounting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/cumulative-object-counting/requirements.txt b/neural-networks/counting/cumulative-object-counting/requirements.txt index 7bfdaaf09..702238a7b 100644 --- a/neural-networks/counting/cumulative-object-counting/requirements.txt +++ b/neural-networks/counting/cumulative-object-counting/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/depth-people-counting/main.py b/neural-networks/counting/depth-people-counting/main.py index baaac67f8..cedf4c361 100644 --- a/neural-networks/counting/depth-people-counting/main.py +++ b/neural-networks/counting/depth-people-counting/main.py @@ -1,6 +1,6 @@ import depthai as dai import os -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.frame_editor import FrameEditor @@ -74,7 +74,7 @@ dai.TrackerIdAssignmentPolicy.SMALLEST_ID ) - color_transform_disparity = pipeline.create(ApplyColormap).build(stereo.disparity) + color_transform_disparity = pipeline.create(ApplyDepthColormap).build(stereo.disparity) color_transform_disparity.out.link(objectTracker.inputTrackerFrame) color_transform_disparity.out.link(objectTracker.inputDetectionFrame) detection_generator.out.link(objectTracker.inputDetections) @@ -86,7 +86,7 @@ # visualization visualizer.addTopic("Disparity", color_transform_disparity.out, "disparity") - visualizer.addTopic("Count", annotation_node.out) + visualizer.addTopic("Count", annotation_node.out, "disparity") print("Pipeline created.") diff --git a/neural-networks/counting/depth-people-counting/requirements.txt b/neural-networks/counting/depth-people-counting/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/counting/depth-people-counting/requirements.txt +++ b/neural-networks/counting/depth-people-counting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/depth-estimation/foundation-stereo/main.py b/neural-networks/depth-estimation/foundation-stereo/main.py index 7da76ad96..07c6e3192 100644 --- a/neural-networks/depth-estimation/foundation-stereo/main.py +++ b/neural-networks/depth-estimation/foundation-stereo/main.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.utility import get_resolution_profile @@ -50,7 +50,7 @@ inference_shape=resolution_profile.nn_shape, ) - colored_disp = pipeline.create(ApplyColormap).build(stereo.disparity) + colored_disp = pipeline.create(ApplyDepthColormap).build(stereo.disparity) visualizer.addTopic("FS Result", fs_inferer.output) visualizer.addTopic("Disparity", colored_disp.out) diff --git a/neural-networks/depth-estimation/foundation-stereo/requirements.txt b/neural-networks/depth-estimation/foundation-stereo/requirements.txt index 5483b3136..1dda0bf24 100644 --- a/neural-networks/depth-estimation/foundation-stereo/requirements.txt +++ b/neural-networks/depth-estimation/foundation-stereo/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 onnxruntime>=1.19.0 onnxruntime-gpu>=1.19.0 numpy>=1.22 diff --git a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt index e585b4f7a..1ca8570e3 100644 --- a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4==4.12.3 -depthai==3.2.1 +depthai==3.3.0 numpy opencv-python~=4.10.0 requests diff --git a/neural-networks/depth-estimation/neural-depth/requirements.txt b/neural-networks/depth-estimation/neural-depth/requirements.txt index 1ef4f866c..218b6f03b 100644 --- a/neural-networks/depth-estimation/neural-depth/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.2.1 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@1b1dd7953feeaff1ca1a8c2234c532704b167d5f \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/feature-detection/xfeat/requirements.txt b/neural-networks/feature-detection/xfeat/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/feature-detection/xfeat/requirements.txt +++ b/neural-networks/feature-detection/xfeat/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/generic-example/requirements.txt b/neural-networks/generic-example/requirements.txt index e5aedc196..d0040b640 100644 --- a/neural-networks/generic-example/requirements.txt +++ b/neural-networks/generic-example/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 python-dotenv diff --git a/neural-networks/object-detection/social-distancing/requirements.txt b/neural-networks/object-detection/social-distancing/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/object-detection/social-distancing/requirements.txt +++ b/neural-networks/object-detection/social-distancing/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/spatial-detections/main.py b/neural-networks/object-detection/spatial-detections/main.py index 965b2b64f..712ff1dd8 100644 --- a/neural-networks/object-detection/spatial-detections/main.py +++ b/neural-networks/object-detection/spatial-detections/main.py @@ -1,5 +1,5 @@ import depthai as dai -from depthai_nodes.node import ApplyColormap +from depthai_nodes.node import ApplyDepthColormap from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -73,7 +73,7 @@ input_detections=nn.out, depth=stereo.depth, labels=classes ) - apply_colormap = pipeline.create(ApplyColormap).build(stereo.depth) + apply_colormap = pipeline.create(ApplyDepthColormap).build(stereo.depth) # video encoding cam_nv12 = cam.requestOutput( diff --git a/neural-networks/object-detection/spatial-detections/requirements.txt b/neural-networks/object-detection/spatial-detections/requirements.txt index 56b6f790b..6f5034885 100644 --- a/neural-networks/object-detection/spatial-detections/requirements.txt +++ b/neural-networks/object-detection/spatial-detections/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 diff --git a/neural-networks/object-detection/thermal-detection/requirements.txt b/neural-networks/object-detection/thermal-detection/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/object-detection/thermal-detection/requirements.txt +++ b/neural-networks/object-detection/thermal-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-host-decoding/requirements.txt b/neural-networks/object-detection/yolo-host-decoding/requirements.txt index 877bcf059..ab5ac5c32 100644 --- a/neural-networks/object-detection/yolo-host-decoding/requirements.txt +++ b/neural-networks/object-detection/yolo-host-decoding/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.3.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-tracking/collision-avoidance/main.py b/neural-networks/object-tracking/collision-avoidance/main.py index aac30a2df..9266392e8 100644 --- a/neural-networks/object-tracking/collision-avoidance/main.py +++ b/neural-networks/object-tracking/collision-avoidance/main.py @@ -67,7 +67,7 @@ ) # TODO: change to numShaves=4 if running on OAK-D Lite img_detections_filter = pipeline.create(ImgDetectionsFilter).build(nn.out) - img_detections_filter.keepLabels([person_label]) + img_detections_filter.keepLabels([person_label]) # keep only person detections # tracking tracker = pipeline.create(dai.node.ObjectTracker) diff --git a/neural-networks/object-tracking/collision-avoidance/requirements.txt b/neural-networks/object-tracking/collision-avoidance/requirements.txt index 8e4027fc1..c2a0c7bb7 100644 --- a/neural-networks/object-tracking/collision-avoidance/requirements.txt +++ b/neural-networks/object-tracking/collision-avoidance/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-tracking/kalman/requirements.txt b/neural-networks/object-tracking/kalman/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/object-tracking/kalman/requirements.txt +++ b/neural-networks/object-tracking/kalman/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-tracking/people-tracker/requirements.txt b/neural-networks/object-tracking/people-tracker/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/object-tracking/people-tracker/requirements.txt +++ b/neural-networks/object-tracking/people-tracker/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/ocr/license-plate-recognition/requirements.txt b/neural-networks/ocr/license-plate-recognition/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/ocr/license-plate-recognition/requirements.txt +++ b/neural-networks/ocr/license-plate-recognition/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/segmentation/blur-background/requirements.txt b/neural-networks/segmentation/blur-background/requirements.txt index df8d7aa85..77fbe7cf7 100644 --- a/neural-networks/segmentation/blur-background/requirements.txt +++ b/neural-networks/segmentation/blur-background/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/segmentation/depth-crop/requirements.txt b/neural-networks/segmentation/depth-crop/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/neural-networks/segmentation/depth-crop/requirements.txt +++ b/neural-networks/segmentation/depth-crop/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt index a36509b78..5121ab744 100644 --- a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt +++ b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 scipy tqdm diff --git a/streaming/mjpeg-streaming/requirements.txt b/streaming/mjpeg-streaming/requirements.txt index d3c270afc..657eada6e 100644 --- a/streaming/mjpeg-streaming/requirements.txt +++ b/streaming/mjpeg-streaming/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/streaming/on-device-encoding/requirements.txt b/streaming/on-device-encoding/requirements.txt index 60dc17665..65eb37eb3 100644 --- a/streaming/on-device-encoding/requirements.txt +++ b/streaming/on-device-encoding/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.3.0 av==12.3.0 numpy>=1.22 \ No newline at end of file diff --git a/streaming/poe-mqtt/requirements.txt b/streaming/poe-mqtt/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/streaming/poe-mqtt/requirements.txt +++ b/streaming/poe-mqtt/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/streaming/poe-tcp-streaming/requirements.txt b/streaming/poe-tcp-streaming/requirements.txt index 2da0fec82..8a3dcf77c 100644 --- a/streaming/poe-tcp-streaming/requirements.txt +++ b/streaming/poe-tcp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.3.0 opencv-python~=4.10.0 numpy>=1.22 diff --git a/streaming/rtsp-streaming/requirements.txt b/streaming/rtsp-streaming/requirements.txt index 4e6b1d24f..b4455f046 100644 --- a/streaming/rtsp-streaming/requirements.txt +++ b/streaming/rtsp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.3.0 numpy>=1.22 PyGObject==3.46.0 \ No newline at end of file diff --git a/tutorials/camera-demo/requirements.txt b/tutorials/camera-demo/requirements.txt index 877bcf059..ab5ac5c32 100644 --- a/tutorials/camera-demo/requirements.txt +++ b/tutorials/camera-demo/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 +depthai==3.3.0 numpy>=1.22 \ No newline at end of file diff --git a/tutorials/custom-models/generate_model/requirements.txt b/tutorials/custom-models/generate_model/requirements.txt index 0804d8b48..a990220e2 100644 --- a/tutorials/custom-models/generate_model/requirements.txt +++ b/tutorials/custom-models/generate_model/requirements.txt @@ -1,5 +1,5 @@ / -depthai==3.0.0 +depthai==3.3.0 modelconv==0.3.3 numpy==1.23.0 onnx==1.17.0 diff --git a/tutorials/custom-models/requirements.txt b/tutorials/custom-models/requirements.txt index 1fa4798a1..8127b82d6 100644 --- a/tutorials/custom-models/requirements.txt +++ b/tutorials/custom-models/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/display-detections/requirements.txt b/tutorials/display-detections/requirements.txt index 1fa4798a1..8127b82d6 100644 --- a/tutorials/display-detections/requirements.txt +++ b/tutorials/display-detections/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/full-fov-nn/requirements.txt b/tutorials/full-fov-nn/requirements.txt index 56b6f790b..6f5034885 100644 --- a/tutorials/full-fov-nn/requirements.txt +++ b/tutorials/full-fov-nn/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 diff --git a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt index 556ae9181..ddd4850e4 100644 --- a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt +++ b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python==4.10.0.84 numpy>=1.22 diff --git a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt index f02d8d291..5e01934ef 100644 --- a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt +++ b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 imutils stitching==0.6.1 \ No newline at end of file diff --git a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt index 8e4027fc1..218b6f03b 100644 --- a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt +++ b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 \ No newline at end of file +depthai==3.3.0 +depthai-nodes==0.4.0 \ No newline at end of file diff --git a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt index c621ccab6..cd84aa195 100644 --- a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt +++ b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt @@ -1,5 +1,5 @@ -depthai==3.0.0 -depthai-nodes==0.3.4 +depthai==3.3.0 +depthai-nodes==0.4.0 opencv-python-headless==4.10.0.84 numpy>=1.22 scipy diff --git a/tutorials/play-encoded-stream/requirements.txt b/tutorials/play-encoded-stream/requirements.txt index b815f908f..3e503ffb8 100644 --- a/tutorials/play-encoded-stream/requirements.txt +++ b/tutorials/play-encoded-stream/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.0.0 +depthai==3.3.0 opencv-python-headless~=4.10.0 av==12.3.0 \ No newline at end of file diff --git a/tutorials/qr-with-tiling/requirements.txt b/tutorials/qr-with-tiling/requirements.txt index 8e7aaa353..f3f6264f1 100644 --- a/tutorials/qr-with-tiling/requirements.txt +++ b/tutorials/qr-with-tiling/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.1.0 -depthai-nodes==0.3.5 +depthai==3.3.0 +depthai-nodes==0.4.0 numpy>=1.22 pyzbar==0.1.9 From 80fd13705a3abb06b5aa842c0b8661f284a37c69 Mon Sep 17 00:00:00 2001 From: bblazeva Date: Tue, 10 Mar 2026 14:02:42 +0100 Subject: [PATCH 12/14] update requirements - DAI 3.4.0 --- camera-controls/depth-driven-focus/requirements.txt | 4 ++-- camera-controls/lossless-zooming/requirements.txt | 2 +- camera-controls/manual-camera-control/requirements.txt | 2 +- .../open-vocabulary-object-detection/backend/requirements.txt | 2 +- custom-frontend/raw-stream/requirements.txt | 2 +- .../3d-measurement/box-measurement/requirements.txt | 2 +- .../3d-measurement/rgbd-pointcloud/requirements.txt | 2 +- depth-measurement/dynamic-calibration/requirements.txt | 2 +- depth-measurement/stereo-on-host/requirements.txt | 2 +- depth-measurement/triangulation/requirements.txt | 2 +- depth-measurement/wls-filter/requirements.txt | 2 +- integrations/hub-snaps-events/requirements.txt | 2 +- integrations/roboflow-dataset/requirements.txt | 2 +- integrations/roboflow-workflow/backend/src/requirements.txt | 2 +- neural-networks/3D-detection/objectron/requirements.txt | 2 +- neural-networks/counting/crowdcounting/requirements.txt | 2 +- .../counting/cumulative-object-counting/requirements.txt | 2 +- .../counting/depth-people-counting/requirements.txt | 2 +- neural-networks/counting/people-counter/requirements.txt | 2 +- .../depth-estimation/foundation-stereo/requirements.txt | 2 +- .../depth-estimation/neural-depth/host_eval/requirements.txt | 2 +- .../depth-estimation/neural-depth/requirements.txt | 2 +- neural-networks/face-detection/age-gender/requirements.txt | 2 +- neural-networks/face-detection/blur-faces/requirements.txt | 2 +- .../face-detection/emotion-recognition/requirements.txt | 2 +- .../face-detection/face-mask-detection/requirements.txt | 2 +- .../face-detection/fatigue-detection/requirements.txt | 2 +- .../face-detection/gaze-estimation/requirements.txt | 2 +- .../face-detection/head-posture-detection/requirements.txt | 2 +- neural-networks/feature-detection/xfeat/requirements.txt | 2 +- neural-networks/generic-example/requirements.txt | 2 +- .../barcode-detection-conveyor-belt/requirements.txt | 2 +- .../object-detection/human-machine-safety/requirements.txt | 2 +- .../object-detection/social-distancing/requirements.txt | 4 ++-- .../object-detection/spatial-detections/requirements.txt | 2 +- neural-networks/object-detection/text-blur/requirements.txt | 2 +- .../object-detection/thermal-detection/requirements.txt | 2 +- .../object-detection/yolo-host-decoding/requirements.txt | 2 +- neural-networks/object-detection/yolo-p/requirements.txt | 2 +- neural-networks/object-detection/yolo-world/requirements.txt | 2 +- .../object-tracking/collision-avoidance/requirements.txt | 2 +- .../object-tracking/deepsort-tracking/requirements.txt | 2 +- neural-networks/object-tracking/kalman/requirements.txt | 2 +- .../object-tracking/people-tracker/requirements.txt | 2 +- neural-networks/ocr/general-ocr/requirements.txt | 2 +- .../ocr/license-plate-recognition/requirements.txt | 2 +- neural-networks/pose-estimation/animal-pose/requirements.txt | 2 +- neural-networks/pose-estimation/hand-pose/requirements.txt | 2 +- neural-networks/pose-estimation/human-pose/requirements.txt | 2 +- .../reidentification/human-reidentification/requirements.txt | 2 +- neural-networks/segmentation/blur-background/requirements.txt | 2 +- neural-networks/segmentation/depth-crop/requirements.txt | 2 +- .../speech-recognition/whisper-tiny-en/requirements.txt | 2 +- streaming/mjpeg-streaming/requirements.txt | 2 +- streaming/on-device-encoding/requirements.txt | 2 +- streaming/poe-mqtt/requirements.txt | 2 +- streaming/poe-tcp-streaming/requirements.txt | 2 +- streaming/rtsp-streaming/requirements.txt | 2 +- streaming/webrtc-streaming/requirements.txt | 2 +- tutorials/camera-demo/requirements.txt | 2 +- tutorials/custom-models/generate_model/requirements.txt | 2 +- tutorials/custom-models/requirements.txt | 2 +- tutorials/display-detections/requirements.txt | 2 +- tutorials/full-fov-nn/requirements.txt | 2 +- .../multiple-devices/multi-cam-calibration/requirements.txt | 2 +- .../multiple-device-stitch-nn/requirements.txt | 2 +- .../multiple-devices-preview/requirements.txt | 2 +- .../spatial-detection-fusion/requirements.txt | 2 +- tutorials/play-encoded-stream/requirements.txt | 2 +- tutorials/qr-with-tiling/requirements.txt | 2 +- 70 files changed, 72 insertions(+), 72 deletions(-) diff --git a/camera-controls/depth-driven-focus/requirements.txt b/camera-controls/depth-driven-focus/requirements.txt index 77fbe7cf7..0939d9a84 100644 --- a/camera-controls/depth-driven-focus/requirements.txt +++ b/camera-controls/depth-driven-focus/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 -depthai-nodes==0.4.0 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/camera-controls/lossless-zooming/requirements.txt b/camera-controls/lossless-zooming/requirements.txt index dfe76c078..e42562133 100644 --- a/camera-controls/lossless-zooming/requirements.txt +++ b/camera-controls/lossless-zooming/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/camera-controls/manual-camera-control/requirements.txt b/camera-controls/manual-camera-control/requirements.txt index a5175aba8..eae2117c0 100644 --- a/camera-controls/manual-camera-control/requirements.txt +++ b/camera-controls/manual-camera-control/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 opencv-python-headless~=4.10.0 \ No newline at end of file diff --git a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt index f93a5cc2b..09753579b 100644 --- a/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt +++ b/custom-frontend/open-vocabulary-object-detection/backend/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/custom-frontend/raw-stream/requirements.txt b/custom-frontend/raw-stream/requirements.txt index ab5ac5c32..e0918ed1d 100644 --- a/custom-frontend/raw-stream/requirements.txt +++ b/custom-frontend/raw-stream/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/depth-measurement/3d-measurement/box-measurement/requirements.txt b/depth-measurement/3d-measurement/box-measurement/requirements.txt index e201dc209..1c236c7c2 100644 --- a/depth-measurement/3d-measurement/box-measurement/requirements.txt +++ b/depth-measurement/3d-measurement/box-measurement/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 open3d~=0.18 diff --git a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt index ab5ac5c32..e0918ed1d 100644 --- a/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt +++ b/depth-measurement/3d-measurement/rgbd-pointcloud/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/depth-measurement/dynamic-calibration/requirements.txt b/depth-measurement/dynamic-calibration/requirements.txt index f63997c07..434698e9c 100644 --- a/depth-measurement/dynamic-calibration/requirements.txt +++ b/depth-measurement/dynamic-calibration/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 opencv-python==4.10.0.84 diff --git a/depth-measurement/stereo-on-host/requirements.txt b/depth-measurement/stereo-on-host/requirements.txt index fb9fadfe1..35c941724 100644 --- a/depth-measurement/stereo-on-host/requirements.txt +++ b/depth-measurement/stereo-on-host/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 scikit-image diff --git a/depth-measurement/triangulation/requirements.txt b/depth-measurement/triangulation/requirements.txt index 657eada6e..09e49daa1 100644 --- a/depth-measurement/triangulation/requirements.txt +++ b/depth-measurement/triangulation/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/depth-measurement/wls-filter/requirements.txt b/depth-measurement/wls-filter/requirements.txt index 680926232..bcb75c179 100644 --- a/depth-measurement/wls-filter/requirements.txt +++ b/depth-measurement/wls-filter/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-contrib-python==4.10.0.84 numpy>=1.22 diff --git a/integrations/hub-snaps-events/requirements.txt b/integrations/hub-snaps-events/requirements.txt index 9ed3cb17b..4340b39fd 100644 --- a/integrations/hub-snaps-events/requirements.txt +++ b/integrations/hub-snaps-events/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update python-dotenv diff --git a/integrations/roboflow-dataset/requirements.txt b/integrations/roboflow-dataset/requirements.txt index 21663e495..46737ea80 100644 --- a/integrations/roboflow-dataset/requirements.txt +++ b/integrations/roboflow-dataset/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/integrations/roboflow-workflow/backend/src/requirements.txt b/integrations/roboflow-workflow/backend/src/requirements.txt index 237af3772..a732ed178 100644 --- a/integrations/roboflow-workflow/backend/src/requirements.txt +++ b/integrations/roboflow-workflow/backend/src/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python~=4.10.0 inference \ No newline at end of file diff --git a/neural-networks/3D-detection/objectron/requirements.txt b/neural-networks/3D-detection/objectron/requirements.txt index dfe76c078..e42562133 100644 --- a/neural-networks/3D-detection/objectron/requirements.txt +++ b/neural-networks/3D-detection/objectron/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/counting/crowdcounting/requirements.txt b/neural-networks/counting/crowdcounting/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/counting/crowdcounting/requirements.txt +++ b/neural-networks/counting/crowdcounting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/cumulative-object-counting/requirements.txt b/neural-networks/counting/cumulative-object-counting/requirements.txt index 702238a7b..1457b9317 100644 --- a/neural-networks/counting/cumulative-object-counting/requirements.txt +++ b/neural-networks/counting/cumulative-object-counting/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/depth-people-counting/requirements.txt b/neural-networks/counting/depth-people-counting/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/counting/depth-people-counting/requirements.txt +++ b/neural-networks/counting/depth-people-counting/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/counting/people-counter/requirements.txt b/neural-networks/counting/people-counter/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/counting/people-counter/requirements.txt +++ b/neural-networks/counting/people-counter/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/depth-estimation/foundation-stereo/requirements.txt b/neural-networks/depth-estimation/foundation-stereo/requirements.txt index 1dda0bf24..9e1a358af 100644 --- a/neural-networks/depth-estimation/foundation-stereo/requirements.txt +++ b/neural-networks/depth-estimation/foundation-stereo/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 onnxruntime>=1.19.0 onnxruntime-gpu>=1.19.0 diff --git a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt index 1ca8570e3..ff0639b20 100644 --- a/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/host_eval/requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4==4.12.3 -depthai==3.3.0 +depthai==3.4.0 numpy opencv-python~=4.10.0 requests diff --git a/neural-networks/depth-estimation/neural-depth/requirements.txt b/neural-networks/depth-estimation/neural-depth/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/neural-networks/depth-estimation/neural-depth/requirements.txt +++ b/neural-networks/depth-estimation/neural-depth/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/requirements.txt b/neural-networks/face-detection/age-gender/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/face-detection/age-gender/requirements.txt +++ b/neural-networks/face-detection/age-gender/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/blur-faces/requirements.txt b/neural-networks/face-detection/blur-faces/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/face-detection/blur-faces/requirements.txt +++ b/neural-networks/face-detection/blur-faces/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/emotion-recognition/requirements.txt b/neural-networks/face-detection/emotion-recognition/requirements.txt index 1577a1888..8f9430e09 100644 --- a/neural-networks/face-detection/emotion-recognition/requirements.txt +++ b/neural-networks/face-detection/emotion-recognition/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/face-mask-detection/requirements.txt b/neural-networks/face-detection/face-mask-detection/requirements.txt index dfe76c078..e42562133 100644 --- a/neural-networks/face-detection/face-mask-detection/requirements.txt +++ b/neural-networks/face-detection/face-mask-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/face-detection/fatigue-detection/requirements.txt b/neural-networks/face-detection/fatigue-detection/requirements.txt index c5f9dee1b..0939d9a84 100644 --- a/neural-networks/face-detection/fatigue-detection/requirements.txt +++ b/neural-networks/face-detection/fatigue-detection/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/face-detection/gaze-estimation/requirements.txt b/neural-networks/face-detection/gaze-estimation/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/face-detection/gaze-estimation/requirements.txt +++ b/neural-networks/face-detection/gaze-estimation/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/face-detection/head-posture-detection/requirements.txt b/neural-networks/face-detection/head-posture-detection/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/face-detection/head-posture-detection/requirements.txt +++ b/neural-networks/face-detection/head-posture-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/feature-detection/xfeat/requirements.txt b/neural-networks/feature-detection/xfeat/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/feature-detection/xfeat/requirements.txt +++ b/neural-networks/feature-detection/xfeat/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/generic-example/requirements.txt b/neural-networks/generic-example/requirements.txt index d0040b640..f0931a49d 100644 --- a/neural-networks/generic-example/requirements.txt +++ b/neural-networks/generic-example/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt index 7425d21f0..40f198383 100644 --- a/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt +++ b/neural-networks/object-detection/barcode-detection-conveyor-belt/requirements.txt @@ -1,4 +1,4 @@ -depthai>=3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/neural-networks/object-detection/human-machine-safety/requirements.txt b/neural-networks/object-detection/human-machine-safety/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/object-detection/human-machine-safety/requirements.txt +++ b/neural-networks/object-detection/human-machine-safety/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-detection/social-distancing/requirements.txt b/neural-networks/object-detection/social-distancing/requirements.txt index 77fbe7cf7..0939d9a84 100644 --- a/neural-networks/object-detection/social-distancing/requirements.txt +++ b/neural-networks/object-detection/social-distancing/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 -depthai-nodes==0.4.0 +depthai==3.4.0 +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/spatial-detections/requirements.txt b/neural-networks/object-detection/spatial-detections/requirements.txt index 6f5034885..3b7d19fe5 100644 --- a/neural-networks/object-detection/spatial-detections/requirements.txt +++ b/neural-networks/object-detection/spatial-detections/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 diff --git a/neural-networks/object-detection/text-blur/requirements.txt b/neural-networks/object-detection/text-blur/requirements.txt index c5f9dee1b..0939d9a84 100644 --- a/neural-networks/object-detection/text-blur/requirements.txt +++ b/neural-networks/object-detection/text-blur/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/thermal-detection/requirements.txt b/neural-networks/object-detection/thermal-detection/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/neural-networks/object-detection/thermal-detection/requirements.txt +++ b/neural-networks/object-detection/thermal-detection/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-host-decoding/requirements.txt b/neural-networks/object-detection/yolo-host-decoding/requirements.txt index ab5ac5c32..e0918ed1d 100644 --- a/neural-networks/object-detection/yolo-host-decoding/requirements.txt +++ b/neural-networks/object-detection/yolo-host-decoding/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-p/requirements.txt b/neural-networks/object-detection/yolo-p/requirements.txt index c5f9dee1b..0939d9a84 100644 --- a/neural-networks/object-detection/yolo-p/requirements.txt +++ b/neural-networks/object-detection/yolo-p/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/object-detection/yolo-world/requirements.txt b/neural-networks/object-detection/yolo-world/requirements.txt index 3b156fb88..ce2d52904 100644 --- a/neural-networks/object-detection/yolo-world/requirements.txt +++ b/neural-networks/object-detection/yolo-world/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/neural-networks/object-tracking/collision-avoidance/requirements.txt b/neural-networks/object-tracking/collision-avoidance/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/object-tracking/collision-avoidance/requirements.txt +++ b/neural-networks/object-tracking/collision-avoidance/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/object-tracking/deepsort-tracking/requirements.txt b/neural-networks/object-tracking/deepsort-tracking/requirements.txt index 1886b4f90..77bf06e35 100644 --- a/neural-networks/object-tracking/deepsort-tracking/requirements.txt +++ b/neural-networks/object-tracking/deepsort-tracking/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/neural-networks/object-tracking/kalman/requirements.txt b/neural-networks/object-tracking/kalman/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/neural-networks/object-tracking/kalman/requirements.txt +++ b/neural-networks/object-tracking/kalman/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/object-tracking/people-tracker/requirements.txt b/neural-networks/object-tracking/people-tracker/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/neural-networks/object-tracking/people-tracker/requirements.txt +++ b/neural-networks/object-tracking/people-tracker/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/requirements.txt b/neural-networks/ocr/general-ocr/requirements.txt index f72c0b1c8..65ae35051 100644 --- a/neural-networks/ocr/general-ocr/requirements.txt +++ b/neural-networks/ocr/general-ocr/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update opencv-python-headless==4.10.0.84 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/ocr/license-plate-recognition/requirements.txt b/neural-networks/ocr/license-plate-recognition/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/ocr/license-plate-recognition/requirements.txt +++ b/neural-networks/ocr/license-plate-recognition/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/pose-estimation/animal-pose/requirements.txt b/neural-networks/pose-estimation/animal-pose/requirements.txt index c2a0c7bb7..1106ae10a 100644 --- a/neural-networks/pose-estimation/animal-pose/requirements.txt +++ b/neural-networks/pose-estimation/animal-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file diff --git a/neural-networks/pose-estimation/hand-pose/requirements.txt b/neural-networks/pose-estimation/hand-pose/requirements.txt index dfe76c078..e42562133 100644 --- a/neural-networks/pose-estimation/hand-pose/requirements.txt +++ b/neural-networks/pose-estimation/hand-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/pose-estimation/human-pose/requirements.txt b/neural-networks/pose-estimation/human-pose/requirements.txt index dfe76c078..e42562133 100644 --- a/neural-networks/pose-estimation/human-pose/requirements.txt +++ b/neural-networks/pose-estimation/human-pose/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/reidentification/human-reidentification/requirements.txt b/neural-networks/reidentification/human-reidentification/requirements.txt index 7368ad5e2..1780d3592 100644 --- a/neural-networks/reidentification/human-reidentification/requirements.txt +++ b/neural-networks/reidentification/human-reidentification/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update numpy>=1.22 diff --git a/neural-networks/segmentation/blur-background/requirements.txt b/neural-networks/segmentation/blur-background/requirements.txt index 77fbe7cf7..1d4d576f6 100644 --- a/neural-networks/segmentation/blur-background/requirements.txt +++ b/neural-networks/segmentation/blur-background/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 \ No newline at end of file diff --git a/neural-networks/segmentation/depth-crop/requirements.txt b/neural-networks/segmentation/depth-crop/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/neural-networks/segmentation/depth-crop/requirements.txt +++ b/neural-networks/segmentation/depth-crop/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt index 5121ab744..677a98fe7 100644 --- a/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt +++ b/neural-networks/speech-recognition/whisper-tiny-en/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 scipy diff --git a/streaming/mjpeg-streaming/requirements.txt b/streaming/mjpeg-streaming/requirements.txt index 657eada6e..09e49daa1 100644 --- a/streaming/mjpeg-streaming/requirements.txt +++ b/streaming/mjpeg-streaming/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless~=4.10.0 numpy>=1.22 diff --git a/streaming/on-device-encoding/requirements.txt b/streaming/on-device-encoding/requirements.txt index 65eb37eb3..b81f2b18e 100644 --- a/streaming/on-device-encoding/requirements.txt +++ b/streaming/on-device-encoding/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 av==12.3.0 numpy>=1.22 \ No newline at end of file diff --git a/streaming/poe-mqtt/requirements.txt b/streaming/poe-mqtt/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/streaming/poe-mqtt/requirements.txt +++ b/streaming/poe-mqtt/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/streaming/poe-tcp-streaming/requirements.txt b/streaming/poe-tcp-streaming/requirements.txt index 8a3dcf77c..059031e53 100644 --- a/streaming/poe-tcp-streaming/requirements.txt +++ b/streaming/poe-tcp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 opencv-python~=4.10.0 numpy>=1.22 diff --git a/streaming/rtsp-streaming/requirements.txt b/streaming/rtsp-streaming/requirements.txt index b4455f046..124919f4e 100644 --- a/streaming/rtsp-streaming/requirements.txt +++ b/streaming/rtsp-streaming/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 numpy>=1.22 PyGObject==3.46.0 \ No newline at end of file diff --git a/streaming/webrtc-streaming/requirements.txt b/streaming/webrtc-streaming/requirements.txt index 942d4ae2b..540d3573e 100644 --- a/streaming/webrtc-streaming/requirements.txt +++ b/streaming/webrtc-streaming/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update aiortc==1.9.0 aiohttp>=3.10.0,<4.0 diff --git a/tutorials/camera-demo/requirements.txt b/tutorials/camera-demo/requirements.txt index ab5ac5c32..e0918ed1d 100644 --- a/tutorials/camera-demo/requirements.txt +++ b/tutorials/camera-demo/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 numpy>=1.22 \ No newline at end of file diff --git a/tutorials/custom-models/generate_model/requirements.txt b/tutorials/custom-models/generate_model/requirements.txt index a990220e2..d3595f13e 100644 --- a/tutorials/custom-models/generate_model/requirements.txt +++ b/tutorials/custom-models/generate_model/requirements.txt @@ -1,5 +1,5 @@ / -depthai==3.3.0 +depthai==3.4.0 modelconv==0.3.3 numpy==1.23.0 onnx==1.17.0 diff --git a/tutorials/custom-models/requirements.txt b/tutorials/custom-models/requirements.txt index 8127b82d6..d9388de9e 100644 --- a/tutorials/custom-models/requirements.txt +++ b/tutorials/custom-models/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/display-detections/requirements.txt b/tutorials/display-detections/requirements.txt index 8127b82d6..d9388de9e 100644 --- a/tutorials/display-detections/requirements.txt +++ b/tutorials/display-detections/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 opencv-python-headless~=4.10.0 diff --git a/tutorials/full-fov-nn/requirements.txt b/tutorials/full-fov-nn/requirements.txt index 6f5034885..3b7d19fe5 100644 --- a/tutorials/full-fov-nn/requirements.txt +++ b/tutorials/full-fov-nn/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 diff --git a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt index ddd4850e4..73ab045e9 100644 --- a/tutorials/multiple-devices/multi-cam-calibration/requirements.txt +++ b/tutorials/multiple-devices/multi-cam-calibration/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python==4.10.0.84 numpy>=1.22 diff --git a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt index 5e01934ef..9af2d834b 100644 --- a/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt +++ b/tutorials/multiple-devices/multiple-device-stitch-nn/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 imutils stitching==0.6.1 \ No newline at end of file diff --git a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt index 218b6f03b..98ae4b842 100644 --- a/tutorials/multiple-devices/multiple-devices-preview/requirements.txt +++ b/tutorials/multiple-devices/multiple-devices-preview/requirements.txt @@ -1,2 +1,2 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 \ No newline at end of file diff --git a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt index cd84aa195..ec46ed409 100644 --- a/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt +++ b/tutorials/multiple-devices/spatial-detection-fusion/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 opencv-python-headless==4.10.0.84 numpy>=1.22 diff --git a/tutorials/play-encoded-stream/requirements.txt b/tutorials/play-encoded-stream/requirements.txt index 3e503ffb8..676c928b4 100644 --- a/tutorials/play-encoded-stream/requirements.txt +++ b/tutorials/play-encoded-stream/requirements.txt @@ -1,3 +1,3 @@ -depthai==3.3.0 +depthai==3.4.0 opencv-python-headless~=4.10.0 av==12.3.0 \ No newline at end of file diff --git a/tutorials/qr-with-tiling/requirements.txt b/tutorials/qr-with-tiling/requirements.txt index f3f6264f1..a713324d3 100644 --- a/tutorials/qr-with-tiling/requirements.txt +++ b/tutorials/qr-with-tiling/requirements.txt @@ -1,4 +1,4 @@ -depthai==3.3.0 +depthai==3.4.0 depthai-nodes==0.4.0 numpy>=1.22 pyzbar==0.1.9 From 4cc9fb438a37438510058fff4f9839f83493b36d Mon Sep 17 00:00:00 2001 From: bblazeva Date: Sat, 14 Mar 2026 15:06:28 +0100 Subject: [PATCH 13/14] fix bbox display on collision-avoidance example --- neural-networks/counting/depth-people-counting/main.py | 4 +++- neural-networks/object-tracking/collision-avoidance/main.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/neural-networks/counting/depth-people-counting/main.py b/neural-networks/counting/depth-people-counting/main.py index cedf4c361..0dc198161 100644 --- a/neural-networks/counting/depth-people-counting/main.py +++ b/neural-networks/counting/depth-people-counting/main.py @@ -74,7 +74,9 @@ dai.TrackerIdAssignmentPolicy.SMALLEST_ID ) - color_transform_disparity = pipeline.create(ApplyDepthColormap).build(stereo.disparity) + color_transform_disparity = pipeline.create(ApplyDepthColormap).build( + stereo.disparity + ) color_transform_disparity.out.link(objectTracker.inputTrackerFrame) color_transform_disparity.out.link(objectTracker.inputDetectionFrame) detection_generator.out.link(objectTracker.inputDetections) diff --git a/neural-networks/object-tracking/collision-avoidance/main.py b/neural-networks/object-tracking/collision-avoidance/main.py index 9266392e8..95da73804 100644 --- a/neural-networks/object-tracking/collision-avoidance/main.py +++ b/neural-networks/object-tracking/collision-avoidance/main.py @@ -91,8 +91,8 @@ # visualization visualizer.addTopic("Video", nn.passthrough, "images") visualizer.addTopic("Tracklets", collision_avoidance.out, "images") - visualizer.addTopic("Direction", collision_avoidance.out_direction, "images") - visualizer.addTopic("Bird Frame", birds_eye_view.output, "images") + visualizer.addTopic("Direction", collision_avoidance.out_direction) + visualizer.addTopic("Bird Frame", birds_eye_view.output) print("Pipeline created.") pipeline.start() From 04550a92961f954ef3e3f6fc2e8c3175de37ab07 Mon Sep 17 00:00:00 2001 From: petrnovota Date: Fri, 27 Mar 2026 18:32:32 +0100 Subject: [PATCH 14/14] make oak-examples work with latest depthai-nodes --- camera-controls/depth-driven-focus/main.py | 5 ++++- camera-controls/lossless-zooming/main.py | 8 +++++++- .../3d-measurement/box-measurement/main.py | 8 ++++++-- .../3D-detection/objectron/main.py | 9 +++------ .../objectron/utils/annotation_node.py | 19 +++++++++++++----- .../counting/people-counter/requirements.txt | 2 +- .../face-detection/age-gender/main.py | 6 +++--- .../age-gender/utils/annotation_node.py | 2 +- .../emotion-recognition/main.py | 6 +++--- .../utils/annotation_node.py | 2 +- .../face-detection/fatigue-detection/main.py | 6 +++--- .../utils/annotation_node.py | 7 ++++--- .../fatigue-detection/utils/face_landmarks.py | 6 +++--- .../face-detection/gaze-estimation/main.py | 6 +++--- .../gaze-estimation/utils/annotation_node.py | 2 +- .../head-posture-detection/main.py | 6 +++--- .../utils/annotation_node.py | 2 +- .../human-machine-safety/main.py | 20 +++++++++---------- .../social-distancing/main.py | 12 +++++------ .../thermal-detection/main.py | 12 +++++++---- .../object-tracking/deepsort-tracking/main.py | 6 +++--- .../utils/deepsort_tracking.py | 4 ++-- neural-networks/ocr/general-ocr/main.py | 6 +++--- .../ocr/general-ocr/utils/annotation_node.py | 2 +- .../pose-estimation/animal-pose/main.py | 6 +++--- .../animal-pose/utils/annotation_node.py | 5 ++--- .../pose-estimation/hand-pose/main.py | 6 +++--- .../hand-pose/utils/annotation_node.py | 9 ++++----- .../pose-estimation/human-pose/main.py | 8 ++++---- .../human-pose/utils/annotation_node.py | 7 +++---- .../human-reidentification/main.py | 6 +++--- .../utils/identification.py | 2 +- .../segmentation/depth-crop/main.py | 2 +- 33 files changed, 118 insertions(+), 97 deletions(-) diff --git a/camera-controls/depth-driven-focus/main.py b/camera-controls/depth-driven-focus/main.py index 7ef3d818e..40f8a22d3 100644 --- a/camera-controls/depth-driven-focus/main.py +++ b/camera-controls/depth-driven-focus/main.py @@ -1,9 +1,12 @@ +from pathlib import Path + import depthai as dai from depthai_nodes.node import ParsingNeuralNetwork, ApplyColormap, DepthMerger from utils.arguments import initialize_argparser from utils.depth_driven_focus import DepthDrivenFocus _, args = initialize_argparser() +MODEL_DIR = Path(__file__).resolve().parent / "depthai_models" visualizer = dai.RemoteConnection(httpPort=8082) device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device() @@ -15,7 +18,7 @@ platform = device.getPlatform() model_description = dai.NNModelDescription.fromYamlFile( - f"yunet.{platform.name}.yaml" + str(MODEL_DIR / f"yunet.{platform.name}.yaml") ) nn_archive = dai.NNArchive(dai.getModelFromZoo(model_description)) diff --git a/camera-controls/lossless-zooming/main.py b/camera-controls/lossless-zooming/main.py index 9cbbaa4a1..da63a9cfa 100644 --- a/camera-controls/lossless-zooming/main.py +++ b/camera-controls/lossless-zooming/main.py @@ -25,7 +25,13 @@ else dai.ImgFrame.Type.BGR888p ) -model_description = dai.NNModelDescription.fromYamlFile(f"yunet.{platform.name}.yaml") +model_description = dai.NNModelDescription.fromYamlFile( + str( + Path(__file__).resolve().parent + / "depthai_models" + / f"yunet.{platform.name}.yaml" + ) +) nn_archive = dai.NNArchive(dai.getModelFromZoo(model_description)) model_width = nn_archive.getInputWidth() model_height = nn_archive.getInputHeight() diff --git a/depth-measurement/3d-measurement/box-measurement/main.py b/depth-measurement/3d-measurement/box-measurement/main.py index c4e4c6eef..6fddcdcf0 100644 --- a/depth-measurement/3d-measurement/box-measurement/main.py +++ b/depth-measurement/3d-measurement/box-measurement/main.py @@ -1,3 +1,5 @@ +from pathlib import Path + import depthai as dai from depthai_nodes.node import ParsingNeuralNetwork from utils.box_processing_node import BoxProcessingNode @@ -6,6 +8,8 @@ _, args = initialize_argparser() +EXAMPLE_DIR = Path(__file__).resolve().parent +MODEL_DIR = EXAMPLE_DIR / "depthai_models" NN_WIDTH, NN_HEIGHT = 512, 320 INPUT_SHAPE = (NN_WIDTH, NN_HEIGHT) @@ -21,7 +25,7 @@ platform = device.getPlatform() model_description = dai.NNModelDescription.fromYamlFile( - f"box_instance_segmentation.{platform.name}.yaml" + str(MODEL_DIR / f"box_instance_segmentation.{platform.name}.yaml") ) nn_archive = dai.NNArchive( dai.getModelFromZoo( @@ -76,7 +80,7 @@ color_output.link(manip.inputImage) - nn = p.create(ParsingNeuralNetwork).build(nn_source=nn_archive, input=manip.out) + nn = p.create(ParsingNeuralNetwork).build(nnSource=nn_archive, input=manip.out) if platform == dai.Platform.RVC2: nn.setNNArchive(nn_archive, numShaves=7) diff --git a/neural-networks/3D-detection/objectron/main.py b/neural-networks/3D-detection/objectron/main.py index c0c4c5c13..71da855d2 100644 --- a/neural-networks/3D-detection/objectron/main.py +++ b/neural-networks/3D-detection/objectron/main.py @@ -81,14 +81,11 @@ crop_node.out, pos_nn_archive ) - detections_filter = pipeline.create(ImgDetectionsFilter).build(det_nn.out) - detections_filter.keepLabels(VALID_LABELS) - # detections and position estimations sync gather_data = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=pos_nn.getOutput(0), - input_reference=detections_filter.out, + cameraFps=args.fps_limit, + inputData=pos_nn.getOutput(0), + inputReference=first_stage_filter.out, ) # annotation diff --git a/neural-networks/3D-detection/objectron/utils/annotation_node.py b/neural-networks/3D-detection/objectron/utils/annotation_node.py index 9d585c475..51b97c094 100644 --- a/neural-networks/3D-detection/objectron/utils/annotation_node.py +++ b/neural-networks/3D-detection/objectron/utils/annotation_node.py @@ -1,6 +1,5 @@ import depthai as dai from depthai_nodes import ( - Keypoints, GatheredData, PRIMARY_COLOR, SECONDARY_COLOR, @@ -43,15 +42,25 @@ def process(self, gathered_data: dai.Buffer) -> None: padding = self.padding for ix, detection in enumerate(detections_list): - keypoints_msg: Keypoints = gathered_data.gathered[ix] + keypoints_msg: dai.KeypointsList = gathered_data.items[ix] slope_x = (detection.xmax + padding) - (detection.xmin - padding) slope_y = (detection.ymax + padding) - (detection.ymin - padding) xs = [] ys = [] - for kp in keypoints_msg.keypoints: - x = min(max(detection.xmin - padding + slope_x * kp.x, 0.0), 1.0) - y = min(max(detection.ymin - padding + slope_y * kp.y, 0.0), 1.0) + for kp in keypoints_msg.getKeypoints(): + x = min( + max( + detection.xmin - padding + slope_x * kp.imageCoordinates.x, 0.0 + ), + 1.0, + ) + y = min( + max( + detection.ymin - padding + slope_y * kp.imageCoordinates.y, 0.0 + ), + 1.0, + ) xs.append(x) ys.append(y) diff --git a/neural-networks/counting/people-counter/requirements.txt b/neural-networks/counting/people-counter/requirements.txt index 1106ae10a..e42562133 100644 --- a/neural-networks/counting/people-counter/requirements.txt +++ b/neural-networks/counting/people-counter/requirements.txt @@ -1,2 +1,2 @@ depthai==3.4.0 -depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update \ No newline at end of file +depthai-nodes @ git+https://github.com/luxonis/depthai-nodes.git@changes_for_oak_examples_update diff --git a/neural-networks/face-detection/age-gender/main.py b/neural-networks/face-detection/age-gender/main.py index 2b07d944e..afa78eda9 100644 --- a/neural-networks/face-detection/age-gender/main.py +++ b/neural-networks/face-detection/age-gender/main.py @@ -92,9 +92,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=rec_nn.outputs, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=rec_nn.outputs, + inputReference=det_nn.out, ) # annotation diff --git a/neural-networks/face-detection/age-gender/utils/annotation_node.py b/neural-networks/face-detection/age-gender/utils/annotation_node.py index 747d5a3cd..a462b4e3f 100644 --- a/neural-networks/face-detection/age-gender/utils/annotation_node.py +++ b/neural-networks/face-detection/age-gender/utils/annotation_node.py @@ -24,7 +24,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) - age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered + age_gender_msg_group_list: List[dai.MessageGroup] = gather_data_msg.items assert isinstance(age_gender_msg_group_list, list) assert all( isinstance(msg, dai.MessageGroup) for msg in age_gender_msg_group_list diff --git a/neural-networks/face-detection/emotion-recognition/main.py b/neural-networks/face-detection/emotion-recognition/main.py index e4d7d4504..73767f833 100755 --- a/neural-networks/face-detection/emotion-recognition/main.py +++ b/neural-networks/face-detection/emotion-recognition/main.py @@ -91,9 +91,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=rec_nn.out, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=det_nn.out, ) # annotation diff --git a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py index 2f01f3b2a..44753122a 100644 --- a/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py +++ b/neural-networks/face-detection/emotion-recognition/utils/annotation_node.py @@ -21,7 +21,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: dets_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(dets_msg, dai.ImgDetections) - rec_msg_list: List[Classifications] = gather_data_msg.gathered + rec_msg_list: List[Classifications] = gather_data_msg.items assert isinstance(rec_msg_list, list) assert all(isinstance(rec_msg, Classifications) for rec_msg in rec_msg_list) assert len(dets_msg.detections) == len(rec_msg_list) diff --git a/neural-networks/face-detection/fatigue-detection/main.py b/neural-networks/face-detection/fatigue-detection/main.py index 45dd4048e..1425b0973 100644 --- a/neural-networks/face-detection/fatigue-detection/main.py +++ b/neural-networks/face-detection/fatigue-detection/main.py @@ -89,9 +89,9 @@ # detections and gaze estimations sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=landmark_nn.out, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=landmark_nn.out, + inputReference=det_nn.out, ) # annotation diff --git a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py index ce9d69032..a881161f2 100644 --- a/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/fatigue-detection/utils/annotation_node.py @@ -2,7 +2,6 @@ from collections import deque import depthai as dai from depthai_nodes.utils import AnnotationHelper -from depthai_nodes import Keypoints from utils.face_landmarks import determine_fatigue @@ -22,9 +21,11 @@ def process(self, gather_data_msg) -> None: assert isinstance(detections_msg, dai.ImgDetections) src_w, src_h = detections_msg.getTransformation().getSize() - landmarks_msg_list: List[Keypoints] = gather_data_msg.gathered + landmarks_msg_list: List[dai.KeypointsList] = gather_data_msg.items assert isinstance(landmarks_msg_list, list) - assert all(isinstance(rec_msg, Keypoints) for rec_msg in landmarks_msg_list) + assert all( + isinstance(rec_msg, dai.KeypointsList) for rec_msg in landmarks_msg_list + ) assert len(landmarks_msg_list) == len(detections_msg.detections) annotations = AnnotationHelper() diff --git a/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py b/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py index 1a2c421c2..b2389558e 100644 --- a/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py +++ b/neural-networks/face-detection/fatigue-detection/utils/face_landmarks.py @@ -2,15 +2,15 @@ import cv2 import math import numpy as np -from depthai_nodes import Keypoints +import depthai as dai def determine_fatigue( - shape: Tuple[int, int], face_keypoints: Keypoints, pitch_angle: int = 20 + shape: Tuple[int, int], face_keypoints: dai.KeypointsList, pitch_angle: int = 20 ): h, w = shape # frame.shape[:2] face_points_2d = np.array( - [[int(kp.x * w), int(kp.y * h)] for kp in face_keypoints.keypoints] + [[int(kp.x * w), int(kp.y * h)] for kp in face_keypoints.getKeypoints()] ) left_eye_idx = [33, 160, 158, 133, 144, 153] diff --git a/neural-networks/face-detection/gaze-estimation/main.py b/neural-networks/face-detection/gaze-estimation/main.py index 26f18d3e4..e18113c4d 100644 --- a/neural-networks/face-detection/gaze-estimation/main.py +++ b/neural-networks/face-detection/gaze-estimation/main.py @@ -147,9 +147,9 @@ # detections and gaze estimations sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=gaze_estimation_node.out, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=gaze_estimation_node.out, + inputReference=det_nn.out, ) # annotation diff --git a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py index 3787f4dcb..300165558 100644 --- a/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py +++ b/neural-networks/face-detection/gaze-estimation/utils/annotation_node.py @@ -17,7 +17,7 @@ def process(self, gather_data_msg) -> None: assert isinstance(detections_msg, dai.ImgDetections) src_w, src_h = detections_msg.getTransformation().getSize() - gaze_msg_list: List[dai.NNData] = gather_data_msg.gathered + gaze_msg_list: List[dai.NNData] = gather_data_msg.items assert isinstance(gaze_msg_list, list) assert all(isinstance(rec_msg, dai.NNData) for rec_msg in gaze_msg_list) assert len(gaze_msg_list) == len(detections_msg.detections) diff --git a/neural-networks/face-detection/head-posture-detection/main.py b/neural-networks/face-detection/head-posture-detection/main.py index 5553f7057..ffd16efca 100644 --- a/neural-networks/face-detection/head-posture-detection/main.py +++ b/neural-networks/face-detection/head-posture-detection/main.py @@ -90,9 +90,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=pose_nn.outputs, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=pose_nn.outputs, + inputReference=det_nn.out, ) # annotation diff --git a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py index 3b2c04b66..b49367ffa 100644 --- a/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py +++ b/neural-networks/face-detection/head-posture-detection/utils/annotation_node.py @@ -22,7 +22,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) - pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.gathered + pose_msg_group_list: List[dai.MessageGroup] = gather_data_msg.items assert isinstance(pose_msg_group_list, list) assert all(isinstance(msg, dai.MessageGroup) for msg in pose_msg_group_list) diff --git a/neural-networks/object-detection/human-machine-safety/main.py b/neural-networks/object-detection/human-machine-safety/main.py index bdb5040c0..fa83f3f4a 100644 --- a/neural-networks/object-detection/human-machine-safety/main.py +++ b/neural-networks/object-detection/human-machine-safety/main.py @@ -113,18 +113,18 @@ parser.setConfidenceThreshold(0.7) detection_depth_merger = pipeline.create(DepthMerger).build( - output_2d=obj_det_nn.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=obj_det_nn.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) palm_depth_merger = pipeline.create(DepthMerger).build( - output_2d=palm_det_nn.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=palm_det_nn.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) # merge both detections into one message diff --git a/neural-networks/object-detection/social-distancing/main.py b/neural-networks/object-detection/social-distancing/main.py index 0e5a32b94..a90a26449 100644 --- a/neural-networks/object-detection/social-distancing/main.py +++ b/neural-networks/object-detection/social-distancing/main.py @@ -75,16 +75,16 @@ nn_parser: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( input=rgb, - nn_source=det_model_nn_archive, + nnSource=det_model_nn_archive, ) # produce spatial detections depth_merger = pipeline.create(DepthMerger).build( - output_2d=nn_parser.out, - output_depth=stereo.depth, - calib_data=device.readCalibration2(), - depth_alignment_socket=dai.CameraBoardSocket.CAM_A, - shrinking_factor=0.1, + output2d=nn_parser.out, + outputDepth=stereo.depth, + calibData=device.readCalibration2(), + depthAlignmentSocket=dai.CameraBoardSocket.CAM_A, + shrinkingFactor=0.1, ) # annotation diff --git a/neural-networks/object-detection/thermal-detection/main.py b/neural-networks/object-detection/thermal-detection/main.py index 092fd4e21..5abb7d19d 100644 --- a/neural-networks/object-detection/thermal-detection/main.py +++ b/neural-networks/object-detection/thermal-detection/main.py @@ -27,10 +27,14 @@ print("Creating pipeline...") # detection model - det_model_description = dai.NNModelDescription.fromYamlFile( - f"thermal_person_detection.{platform}.yaml" - ) - if det_model_description.model != args.model: + model_yaml = Path(f"depthai_models/thermal_person_detection.{platform}.yaml") + if model_yaml.exists(): + det_model_description = dai.NNModelDescription.fromYamlFile(str(model_yaml)) + if det_model_description.model != args.model: + det_model_description = dai.NNModelDescription( + args.model, platform=platform + ) + else: det_model_description = dai.NNModelDescription(args.model, platform=platform) det_model_nn_archive = dai.NNArchive(dai.getModelFromZoo(det_model_description)) det_model_w, det_model_h = det_model_nn_archive.getInputSize() diff --git a/neural-networks/object-tracking/deepsort-tracking/main.py b/neural-networks/object-tracking/deepsort-tracking/main.py index 292e845e3..00ca6ac98 100644 --- a/neural-networks/object-tracking/deepsort-tracking/main.py +++ b/neural-networks/object-tracking/deepsort-tracking/main.py @@ -95,9 +95,9 @@ # detections and embeddings sync gather_data = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=embeddings_nn.out, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=embeddings_nn.out, + inputReference=det_nn.out, ) # tracking diff --git a/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py b/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py index b0b9f5824..10ba2c09d 100644 --- a/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py +++ b/neural-networks/object-tracking/deepsort-tracking/utils/deepsort_tracking.py @@ -2,7 +2,7 @@ from deep_sort_realtime.deepsort_tracker import DeepSort from typing import List -from depthai_nodes import GatheredData +from depthai_nodes.message import GatheredData from .visualized_tracklets import VisualizedTracklets @@ -41,7 +41,7 @@ def process(self, img_frame: dai.ImgFrame, gathered_data: dai.Buffer) -> None: detections: dai.ImgDetections = gathered_data.reference_data detections = detections.detections - recognitions: dai.NNData = gathered_data.gathered + recognitions: list[dai.NNData] = gathered_data.items tracklets = VisualizedTracklets() tracklets.setLabels(self._labels) diff --git a/neural-networks/ocr/general-ocr/main.py b/neural-networks/ocr/general-ocr/main.py index 730d149d8..5631141c6 100644 --- a/neural-networks/ocr/general-ocr/main.py +++ b/neural-networks/ocr/general-ocr/main.py @@ -92,9 +92,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=ocr_nn.out, - input_reference=crop_configs_creator.detections_output, + cameraFps=args.fps_limit, + inputData=ocr_nn.out, + inputReference=crop_configs_creator.detections_output, ) # annotation diff --git a/neural-networks/ocr/general-ocr/utils/annotation_node.py b/neural-networks/ocr/general-ocr/utils/annotation_node.py index e84f3ec6e..502ead1c9 100644 --- a/neural-networks/ocr/general-ocr/utils/annotation_node.py +++ b/neural-networks/ocr/general-ocr/utils/annotation_node.py @@ -18,7 +18,7 @@ def run(self): passthrough_frame = self.passthrough.get() detections_list = text_descriptions.reference_data.detections - recognitions_list = text_descriptions.gathered + recognitions_list = text_descriptions.items w, h = passthrough_frame.getWidth(), passthrough_frame.getHeight() diff --git a/neural-networks/pose-estimation/animal-pose/main.py b/neural-networks/pose-estimation/animal-pose/main.py index 40f23b429..25ff3a912 100644 --- a/neural-networks/pose-estimation/animal-pose/main.py +++ b/neural-networks/pose-estimation/animal-pose/main.py @@ -84,9 +84,9 @@ # detections and pose estimations sync gather_data = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=pose_nn.out, - input_reference=detections_filter.out, + cameraFps=args.fps_limit, + inputData=pose_nn.out, + inputReference=detections_filter.out, ) # annotation diff --git a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py index 30e8cb082..2b8711851 100644 --- a/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/animal-pose/utils/annotation_node.py @@ -1,6 +1,5 @@ import depthai as dai from depthai_nodes import ( - Keypoints, GatheredData, PRIMARY_COLOR, SECONDARY_COLOR, @@ -47,14 +46,14 @@ def process(self, gathered_data: dai.Buffer) -> None: for ix, detection in enumerate(detections_list): detection.labelName = "Animal" - keypoints_message: Keypoints = gathered_data.gathered[ix] + keypoints_message: dai.KeypointsList = gathered_data.items[ix] xmin, ymin, xmax, ymax = detection.getBoundingBox().getOuterRect() slope_x = (xmax + padding) - (xmin - padding) slope_y = (ymax + padding) - (ymin - padding) xs = [] ys = [] - for kp in keypoints_message.keypoints: + for kp in keypoints_message.getKeypoints(): x = min(max(xmin - padding + slope_x * kp.x, 0.0), 1.0) y = min(max(ymin - padding + slope_y * kp.y, 0.0), 1.0) xs.append(x) diff --git a/neural-networks/pose-estimation/hand-pose/main.py b/neural-networks/pose-estimation/hand-pose/main.py index 3e6e5059f..a0a192696 100644 --- a/neural-networks/pose-estimation/hand-pose/main.py +++ b/neural-networks/pose-estimation/hand-pose/main.py @@ -96,9 +96,9 @@ # detections and pose estimations sync gather_data = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=pose_nn.outputs, - input_reference=detection_nn.out, + cameraFps=args.fps_limit, + inputData=pose_nn.outputs, + inputReference=detection_nn.out, ) # annotation diff --git a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py index 53e792518..09ff3cb14 100644 --- a/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/hand-pose/utils/annotation_node.py @@ -1,6 +1,5 @@ import depthai as dai from depthai_nodes import ( - Keypoints, Predictions, GatheredData, SECONDARY_COLOR, @@ -51,9 +50,9 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non det_list = [] for ix, detection in enumerate(detections_list): - keypoints_msg: Keypoints = gathered_data.gathered[ix]["0"] - confidence_msg: Predictions = gathered_data.gathered[ix]["1"] - handness_msg: Predictions = gathered_data.gathered[ix]["2"] + keypoints_msg: dai.KeypointsList = gathered_data.items[ix]["0"] + confidence_msg: Predictions = gathered_data.items[ix]["1"] + handness_msg: Predictions = gathered_data.items[ix]["2"] hand_confidence = confidence_msg.prediction handness = handness_msg.prediction @@ -94,7 +93,7 @@ def process(self, gathered_data: dai.Buffer, video_message: dai.ImgFrame) -> Non xs = [] ys = [] - for kp in keypoints_msg.keypoints: + for kp in keypoints_msg.getKeypoints(): x = min(max(xmin - padding + slope_x * kp.x, 0.0), 1.0) y = min(max(ymin - padding + slope_y * kp.y, 0.0), 1.0) xs.append(x) diff --git a/neural-networks/pose-estimation/human-pose/main.py b/neural-networks/pose-estimation/human-pose/main.py index 3cf2d3d25..0098d5054 100644 --- a/neural-networks/pose-estimation/human-pose/main.py +++ b/neural-networks/pose-estimation/human-pose/main.py @@ -3,11 +3,11 @@ import depthai as dai from depthai_nodes.node import ( ParsingNeuralNetwork, - HRNetParser, GatherData, ImgDetectionsFilter, FrameCropper, ) +from depthai_nodes.node.parsers import HRNetParser from utils.arguments import initialize_argparser from utils.annotation_node import AnnotationNode @@ -100,9 +100,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=rec_nn.out, - input_reference=detections_filter.out, + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=detections_filter.out, ) # annotation diff --git a/neural-networks/pose-estimation/human-pose/utils/annotation_node.py b/neural-networks/pose-estimation/human-pose/utils/annotation_node.py index 42421ad99..2af03104a 100644 --- a/neural-networks/pose-estimation/human-pose/utils/annotation_node.py +++ b/neural-networks/pose-estimation/human-pose/utils/annotation_node.py @@ -1,6 +1,5 @@ from typing import List, Optional import depthai as dai -from depthai_nodes import Keypoints from depthai_nodes.utils import AnnotationHelper @@ -34,9 +33,9 @@ def process(self, gather_data_msg: dai.Buffer) -> None: img_detections_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(img_detections_msg, dai.ImgDetections) - keypoints_msg_list: List[Keypoints] = gather_data_msg.gathered + keypoints_msg_list: List[dai.KeypointsList] = gather_data_msg.items assert isinstance(keypoints_msg_list, list) - assert all(isinstance(msg, Keypoints) for msg in keypoints_msg_list) + assert all(isinstance(msg, dai.KeypointsList) for msg in keypoints_msg_list) annotations = AnnotationHelper() @@ -55,7 +54,7 @@ def process(self, gather_data_msg: dai.Buffer) -> None: xs = [] ys = [] confidences = [] - for keypoint_msg in keypoints_msg.keypoints: + for keypoint_msg in keypoints_msg.getKeypoints(): x = min( max(xmin - self.padding + slope_x * keypoint_msg.x, 0.0), 1.0, diff --git a/neural-networks/reidentification/human-reidentification/main.py b/neural-networks/reidentification/human-reidentification/main.py index 970dad4c4..ca2d9a4f9 100644 --- a/neural-networks/reidentification/human-reidentification/main.py +++ b/neural-networks/reidentification/human-reidentification/main.py @@ -110,9 +110,9 @@ # detections and recognitions sync gather_data_node = pipeline.create(GatherData).build( - camera_fps=args.fps_limit, - input_data=rec_nn.out, - input_reference=det_nn.out, + cameraFps=args.fps_limit, + inputData=rec_nn.out, + inputReference=det_nn.out, ) # idenfication diff --git a/neural-networks/reidentification/human-reidentification/utils/identification.py b/neural-networks/reidentification/human-reidentification/utils/identification.py index dbc622b34..5306ddaff 100644 --- a/neural-networks/reidentification/human-reidentification/utils/identification.py +++ b/neural-networks/reidentification/human-reidentification/utils/identification.py @@ -53,7 +53,7 @@ def process(self, gather_data_msg) -> None: dets_msg: dai.ImgDetections = gather_data_msg.reference_data assert isinstance(dets_msg, dai.ImgDetections) - rec_msg_list = gather_data_msg.gathered + rec_msg_list = gather_data_msg.items assert isinstance(rec_msg_list, list) assert all(isinstance(msg, dai.NNData) for msg in rec_msg_list) diff --git a/neural-networks/segmentation/depth-crop/main.py b/neural-networks/segmentation/depth-crop/main.py index 7bde9b40d..97251240b 100755 --- a/neural-networks/segmentation/depth-crop/main.py +++ b/neural-networks/segmentation/depth-crop/main.py @@ -62,7 +62,7 @@ color_output.link(manip.inputImage) nn = pipeline.create(ParsingNeuralNetwork).build( - nn_source=nn_archive, input=manip.out + nnSource=nn_archive, input=manip.out ) # annotation