From 1733b14fad3e7070f2d045191fc5bca09e4e567a Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 17:44:09 -0800 Subject: [PATCH 01/28] ZED Camera Support Update --- ... INSTALL ONLY FOLLOW THIS IF USING ZED.txt | 113 +++ detection/CMakeLists.txt | 9 +- detection/CMakeLists.txt~ | 66 ++ .../ground_based_people_detector_node.cpp | 66 +- .../ground_based_people_detector_node.cpp~ | 568 ++++++++++++++ ...ground_based_people_detector_node_zed.cpp~ | 516 +++++++++++++ detection/apps/haardispada_node.cpp | 4 +- detection/apps/haardispada_node.cpp~ | 490 ++++++++++++ detection/apps/haardispada_node_sr.cpp | 4 +- detection/apps/haardispada_node_sr.cpp~ | 462 ++++++++++++ detection/apps/haardispada_nodelet.cpp~ | 396 ++++++++++ detection/conf/ground_.txt | 4 + .../ground_based_people_detector_zed.yaml | 52 ++ .../ground_based_people_detector_zed.yaml~ | 52 ++ detection/conf/ground_zed_tracked_frame.txt | 4 + .../ground_based_people_detection_app.h | 4 +- .../ground_based_people_detection_app.h~ | 427 +++++++++++ .../open_ptrack/detection/haardispada.h~ | 142 ++++ .../ground_based_people_detection_app.hpp | 53 +- .../ground_based_people_detection_app.hpp~ | 640 ++++++++++++++++ detection/launch/detector_depth.launch~ | 20 + detection/launch/detector_depth_zed.launch | 18 + detection/launch/detector_depth_zed.launch~ | 16 + detection/src/haardispada.cpp~ | 575 ++++++++++++++ detection_and_tracking_zed.launch | 15 + zed_ros_wrapper/CMakeLists.txt | 100 +++ zed_ros_wrapper/CMakeLists.txt~ | 101 +++ zed_ros_wrapper/LICENSE | 28 + zed_ros_wrapper/README.md | 105 +++ zed_ros_wrapper/cfg/Zed.cfg | 10 + zed_ros_wrapper/cfg/Zed.cfg~ | 10 + zed_ros_wrapper/launch/zed.launch | 43 ++ zed_ros_wrapper/launch/zed.launch~ | 43 ++ zed_ros_wrapper/launch/zed_tf.launch | 15 + zed_ros_wrapper/package.xml | 34 + zed_ros_wrapper/records/record_depth.sh | 2 + zed_ros_wrapper/records/record_stereo.sh | 2 + zed_ros_wrapper/src/zed_wrapper_node.cpp | 707 ++++++++++++++++++ zed_ros_wrapper/src/zed_wrapper_node.cpp~ | 707 ++++++++++++++++++ 39 files changed, 6605 insertions(+), 18 deletions(-) create mode 100644 README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt create mode 100644 detection/CMakeLists.txt~ create mode 100644 detection/apps/ground_based_people_detector_node.cpp~ create mode 100644 detection/apps/ground_based_people_detector_node_zed.cpp~ create mode 100644 detection/apps/haardispada_node.cpp~ create mode 100644 detection/apps/haardispada_node_sr.cpp~ create mode 100644 detection/apps/haardispada_nodelet.cpp~ create mode 100644 detection/conf/ground_.txt create mode 100644 detection/conf/ground_based_people_detector_zed.yaml create mode 100644 detection/conf/ground_based_people_detector_zed.yaml~ create mode 100644 detection/conf/ground_zed_tracked_frame.txt create mode 100644 detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ create mode 100644 detection/include/open_ptrack/detection/haardispada.h~ create mode 100644 detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ create mode 100644 detection/launch/detector_depth.launch~ create mode 100644 detection/launch/detector_depth_zed.launch create mode 100644 detection/launch/detector_depth_zed.launch~ create mode 100644 detection/src/haardispada.cpp~ create mode 100644 detection_and_tracking_zed.launch create mode 100644 zed_ros_wrapper/CMakeLists.txt create mode 100644 zed_ros_wrapper/CMakeLists.txt~ create mode 100644 zed_ros_wrapper/LICENSE create mode 100644 zed_ros_wrapper/README.md create mode 100644 zed_ros_wrapper/cfg/Zed.cfg create mode 100644 zed_ros_wrapper/cfg/Zed.cfg~ create mode 100644 zed_ros_wrapper/launch/zed.launch create mode 100644 zed_ros_wrapper/launch/zed.launch~ create mode 100644 zed_ros_wrapper/launch/zed_tf.launch create mode 100644 zed_ros_wrapper/package.xml create mode 100644 zed_ros_wrapper/records/record_depth.sh create mode 100644 zed_ros_wrapper/records/record_stereo.sh create mode 100644 zed_ros_wrapper/src/zed_wrapper_node.cpp create mode 100644 zed_ros_wrapper/src/zed_wrapper_node.cpp~ diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt new file mode 100644 index 0000000..1a16c7c --- /dev/null +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt @@ -0,0 +1,113 @@ +Built for Ubuntu 14.04 with Ros Indigo + +---------------------------------INSTALLING OPT WITH ZED-------------------------------------------------------- + +First Install Cuda 7.5 Toolkit: + +Using below link: +https://developer.nvidia.com/cuda-75-downloads-archive + +For Options Select: +Linux +x86_64 +Ubuntu +14.04 +deb (local) + +download cuda-repo-ubuntu1404-7-5-local package + +After downloaded: +In terminal go to folder containing cuda package +$ sudo dpkg -i cuda-repo-ubuntu1404-7-5-local_7.5-18_amd64.deb +$ sudo apt-get update +$ sudo apt-get install cuda-7-5 + +After install, reboot computer +Verify Nvidia installed: +$ nvidia-settings + +and make sure there is an nvidia driver number + +Next make sure enviroment path is set: +$ export CUDA_HOME=/usr/local/cuda-7.5 +$ export LD_LIBRARY_PATH=${CUDA_HOME}/lib64 + +$ PATH=${CUDA_HOME}/bin:${PATH} +$ export PATH + +Finally verify install: +$ cuda-install-samples-7.5.sh ~ +$ cd ~/NVIDIA_CUDA-7.5_Samples +$ cd 1_Utilities/deviceQuery +$ make +$ ./deviceQuery + +The terminal then should output the cuda version info + +Next it is necessary to install ROS/OpenCV 2.0 (included in ROS): +from directory where OPT is copied currently +$ cd open_ptrack/scripts +$ chmod +x *.sh +$ ./ros_install.sh +$ source /opt/ros/indigo/setup.bash +$ ./ros_configure.sh + +OpenCV 3.1 is required for ZED to work (however it cannot conflict with OpenCV 2!! Therefore we will create a separate install path): +$ cd +$ sudo apt-get update + +$ sudo apt-get install libopencv-dev build-essential checkinstall cmake pkg-config yasm libtiff4-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils + +$ sudo add-apt-repository ppa:joyard-nicolas/ffmpeg +$ sudo apt-get update +$ sudo apt-get install ffmpeg +$ sudo apt-get install frei0r-plugins + +$ sudo mkdir /usr/local/opencv3 + +$ mkdir OpenCV +$ cd OpenCV +$ git clone https://github.com/Itseez/opencv.git + +$ cd opencv +$ mkdir release +$ cd release +$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv3 -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D ENABLE_FAST_MATH=1 -D CUDA_FAST_MATH=1 -D WITH_CUBLAS=1 .. + +Note adjust the -j7 to the number of available cores on computer +$ make -j7 +$ sudo make install +This process WILL TAKE FOREVER... + +$ printf '# OpenCV\nPKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/opencv3/lib/pkgconfig\nexport PKG_CONFIG_PATH\n' >> ~/.bashrc +$ source ~/.bashrc + +Now Time to Install ZED Camera Drivers: +First make sure ZED Camera is plugged into a POWERED USB 3.0 Slot +$ cd +$ cd open_ptrack/scripts +$ ./ZED_SDK_Linux_x86_64_v1.1.0.run +Press q to exit license agreement and accept all defaults + +Finally Time To Install OPT: +$ cd +$ cd open_ptrack/scripts +$ ./openptrack_install.sh +Note sometimes you have to run ./openptrack_install.sh twice for it to fully install +If there is a persistent error that appears to be linked with OpenCV it sometimes is then necessary to install OpenCV 2 in /usr/local (following same procedure as before) and make sure to add package configure (/usr/local/lib/pkgconfig) to path + +It is suggested to delete openp_track folder in home and then: +$ ln -s ~/workspace/ros/catkin/src/open_ptrack ~/open_ptrack + +------------------RUNNING OPT WITH ZED----------------------------------------------------- +To Run make Sure ZED IS PLUGGED IN: +$ roslaunch tracking detection_and_tracking_zed.launch + +In a 2nd terminal, tracking can be fine tuned using: +$ rosrun rqt_reconfigure rqt_reconfigure + +ALSO ZED Camera Settings Can Be Adjusted in open_ptrack/zed_ros_wrapper/launch/zed.launch: +Resolution Parameter: 0 = 2K, 1 = 1080HD, 2 = 720HD, 3 = VGA +Quality Parameter (Disparity Map Quality): 0 = none, 1 = performance, 2 = medium, 3 = high +Frame Rate: 2K has max frame rate of 15fps, 1080 - 30fps and 720 - 60fps +--------------------------------------------------------------------------------------------- diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index b030e6a..5b70b79 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -11,11 +11,12 @@ if(VTK_FOUND) include (${VTK_USE_FILE}) endif() -find_package(OpenCV REQUIRED) +find_package(OpenCV 2.4 REQUIRED) include_directories(${OpenCV_INCLUDE_DIRS}) link_directories(${OpenCV_LIB_DIR}) add_definitions(${OpenCV_DEFINITIONS}) + find_package(Eigen REQUIRED) include_directories(${Eigen_INCLUDE_DIRS} include ${catkin_INCLUDE_DIRS}) @@ -51,9 +52,9 @@ add_executable(ground_based_people_detector apps/ground_based_people_detector_no SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) -add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) -SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) -target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) +#add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) +#SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) +#target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) add_library(haar_disp_ada src/haardispada.cpp apps/haardispada_nodelet.cpp) target_link_libraries(haar_disp_ada ${catkin_LIBRARIES}) diff --git a/detection/CMakeLists.txt~ b/detection/CMakeLists.txt~ new file mode 100644 index 0000000..c5c43a7 --- /dev/null +++ b/detection/CMakeLists.txt~ @@ -0,0 +1,66 @@ +cmake_minimum_required(VERSION 2.8.3) +project(detection) +set(CMAKE_BUILD_TYPE RelWithDebInfo) +find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure) + +find_package(VTK REQUIRED) +if(VTK_FOUND) + include_directories(${VTK_INCLUDE_DIRS}) + link_directories(${VTK_LIBRARY_DIRS}) + add_definitions(${VTK_DEFINITIONS}) + include (${VTK_USE_FILE}) +endif() + +find_package(OpenCV 2.4 REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) +link_directories(${OpenCV_LIB_DIR}) +add_definitions(${OpenCV_DEFINITIONS}) + + +find_package(Eigen REQUIRED) +include_directories(${Eigen_INCLUDE_DIRS} include ${catkin_INCLUDE_DIRS}) + +find_package(PCL 1.7 REQUIRED) +include_directories(BEFORE ${PCL_INCLUDE_DIRS}) +link_directories(${PCL_LIBRARY_DIRS}) +add_definitions(${PCL_DEFINITIONS}) +MESSAGE("PCL_INCLUDE_DIRS\n${PCL_INCLUDE_DIRS}\n") +MESSAGE("PCL_LIBRARY_DIRS\n${PCL_LIBRARY_DIRS}\n") +MESSAGE("PCL_DEFINITIONS\n${PCL_DEFINITIONS}\n") +MESSAGE("PCL_COMMON_LIBRARIES\n${PCL_COMMON_LIBRARIES}\n") +if (NOT PCL_FOUND) + MESSAGE(FATAL_ERROR "PCL not found.\n") +endif (NOT PCL_FOUND) + +# Dynamic reconfigure support +generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg) + +include_directories(cfg/cpp) + +catkin_package( + INCLUDE_DIRS include + LIBRARIES ${PROJECT_NAME} + CATKIN_DEPENDS roscpp pcl_ros pcl_conversions opt_utils opt_msgs +) + + +add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp) +add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS}) + +add_executable(ground_based_people_detector apps/ground_based_people_detector_node.cpp) +SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) +target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) + +add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) +SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) +target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) + +add_library(haar_disp_ada src/haardispada.cpp apps/haardispada_nodelet.cpp) +target_link_libraries(haar_disp_ada ${catkin_LIBRARIES}) + +add_executable(HaarDispAda174 apps/haardispada_node.cpp) +target_link_libraries(HaarDispAda174 haar_disp_ada ${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS} boost_system boost_signals) + +add_executable(HaarDispAda174_sr apps/haardispada_node_sr.cpp) +target_link_libraries(HaarDispAda174_sr haar_disp_ada ${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS} boost_system boost_signals) diff --git a/detection/apps/ground_based_people_detector_node.cpp b/detection/apps/ground_based_people_detector_node.cpp index 3b8c58f..5c73cb0 100644 --- a/detection/apps/ground_based_people_detector_node.cpp +++ b/detection/apps/ground_based_people_detector_node.cpp @@ -74,6 +74,9 @@ #include #include +#include +#include + using namespace opt_msgs; using namespace sensor_msgs; @@ -115,6 +118,11 @@ bool background_subtraction; // Threshold on the ratio of valid points needed for ground estimation double valid_points_threshold; +//ros::Publisher extractedRGB_cloud; +//ros::Publisher beforeVoxelFilter_cloud; +//ros::Publisher preProcessed_cloud; +//ros::Publisher groundRemoval_cloud; + void cloud_cb (const PointCloudT::ConstPtr& callback_cloud) { @@ -143,6 +151,51 @@ updateBackgroundCallback (const std_msgs::String::ConstPtr & msg) } } +/* +//Publish Intermediate Point Cloud Calls +void +publishExtractRGBCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + extractedRGB_cloud.publish(output); +} + +void +publishpreProcessedCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + preProcessed_cloud.publish(output); +} + + +void +publishBeforeVoxelFilterCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + beforeVoxelFilter_cloud.publish(output); +} + +void +publishBeforeVoxelFilterDummyCloud(PointCloudT::Ptr& cloud) +{ + //sensor_msgs::PointCloud2 output; + //pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + //beforeVoxelFilter_cloud.publish(output); +} + + +void +publishgroundRemovalCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + groundRemoval_cloud.publish(output); +} +*/ + void computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros::Rate rate, PointCloudT::Ptr& background_cloud) { @@ -155,7 +208,7 @@ computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros: { // Point cloud pre-processing (downsampling and filtering): PointCloudT::Ptr cloud_filtered(new PointCloudT); - cloud_filtered = people_detector.preprocessCloud (cloud); + cloud_filtered = people_detector.preprocessCloud (cloud);//, &publishBeforeVoxelFilterDummyCloud); *background_cloud += *cloud_filtered; ros::spinOnce(); @@ -316,6 +369,11 @@ main (int argc, char** argv) ros::Publisher detection_pub; detection_pub= nh.advertise(output_topic, 3); + //extractedRGB_cloud = nh.advertise (pointcloud_topic + "/extractedRGB", 3); + //preProcessed_cloud = nh.advertise (pointcloud_topic + "/preProcessed", 3); + //groundRemoval_cloud = nh.advertise (pointcloud_topic + "/groundRemoval", 3); + //beforeVoxelFilter_cloud = nh.advertise (pointcloud_topic + "/beforeVoxelFilter", 3); + Rois output_rois_; open_ptrack::opt_utils::Conversions converter; @@ -407,10 +465,8 @@ main (int argc, char** argv) if (new_cloud_available_flag) { new_cloud_available_flag = false; - // Convert PCL cloud header to ROS header: std_msgs::Header cloud_header = pcl_conversions::fromPCL(cloud->header); - // If requested, update background: if (update_background) { @@ -427,9 +483,11 @@ main (int argc, char** argv) // Perform people detection on the new cloud: std::vector > clusters; // vector containing persons clusters + + people_detector.setInputCloud(cloud); people_detector.setGround(ground_coeffs); // set floor coefficients - people_detector.compute(clusters); // perform people detection + people_detector.compute(clusters);//, &publishExtractRGBCloud, &publishpreProcessedCloud , &publishgroundRemovalCloud, &publishBeforeVoxelFilterCloud); // perform people detection // If not lock_ground, update ground coefficients: if (not lock_ground) diff --git a/detection/apps/ground_based_people_detector_node.cpp~ b/detection/apps/ground_based_people_detector_node.cpp~ new file mode 100644 index 0000000..e4fd3fe --- /dev/null +++ b/detection/apps/ground_based_people_detector_node.cpp~ @@ -0,0 +1,568 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ground_based_people_detector_node.cpp + * Created on: Jul 07, 2013 + * Author: Matteo Munaro + * + * ROS node which performs people detection assuming that people stand/walk on a ground plane. + * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class, + * which implements the people detection algorithm described here: + * M. Munaro, F. Basso and E. Menegatti, + * Tracking people within groups with RGB-D data, + * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. + */ + +// ROS includes: +#include +#include + +// PCL includes: +#include +#include +#include +#include +#include +#include +#include + +// Open PTrack includes: +#include +#include +#include + +//Publish Messages +#include +#include +#include +#include +#include +#include + +// Dynamic reconfigure: +#include +#include + +#include +#include + +using namespace opt_msgs; +using namespace sensor_msgs; + +typedef pcl::PointXYZRGB PointT; +typedef pcl::PointCloud PointCloudT; +typedef detection::GroundBasedPeopleDetectorConfig Config; +typedef dynamic_reconfigure::Server ReconfigureServer; + +bool new_cloud_available_flag = false; +PointCloudT::Ptr cloud(new PointCloudT); +bool intrinsics_already_set = false; +Eigen::Matrix3f intrinsics_matrix; +bool update_background = false; + +// Min confidence for people detection: +double min_confidence; +// People detection object +open_ptrack::detection::GroundBasedPeopleDetectionApp people_detector; +// Flag stating if classifiers based on RGB image should be used or not +bool use_rgb; +// Threshold on image luminance. If luminance is over this threshold, classifiers on RGB image are also used +int minimum_luminance; +// If true, sensor tilt angle wrt ground plane is compensated to improve people detection +bool sensor_tilt_compensation; +// Voxel size for downsampling the cloud +double voxel_size; +// If true, do not update the ground plane at every frame +bool lock_ground; +// Frames to use for updating the background +int max_background_frames; +// Main loop rate: +double rate_value; +// Voxel resolution of the octree used to represent the background +double background_octree_resolution; +// Background cloud +PointCloudT::Ptr background_cloud; +// If true, background subtraction is performed +bool background_subtraction; +// Threshold on the ratio of valid points needed for ground estimation +double valid_points_threshold; + +//ros::Publisher extractedRGB_cloud; +//ros::Publisher beforeVoxelFilter_cloud; +//ros::Publisher preProcessed_cloud; +//ros::Publisher groundRemoval_cloud; + +void +cloud_cb (const PointCloudT::ConstPtr& callback_cloud) +{ + *cloud = *callback_cloud; + new_cloud_available_flag = true; +} + +void +cameraInfoCallback (const sensor_msgs::CameraInfo::ConstPtr & msg) +{ + if (!intrinsics_already_set) + { + intrinsics_matrix << msg->K.elems[0], msg->K.elems[1], msg->K.elems[2], + msg->K.elems[3], msg->K.elems[4], msg->K.elems[5], + msg->K.elems[6], msg->K.elems[7], msg->K.elems[8]; + intrinsics_already_set = true; + } +} + +void +updateBackgroundCallback (const std_msgs::String::ConstPtr & msg) +{ + if (msg->data == "update") + { + update_background = true; + } +} + +/* +//Publish Intermediate Point Cloud Calls +void +publishExtractRGBCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + extractedRGB_cloud.publish(output); +} + +void +publishpreProcessedCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + preProcessed_cloud.publish(output); +} + + +void +publishBeforeVoxelFilterCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + beforeVoxelFilter_cloud.publish(output); +} + +void +publishBeforeVoxelFilterDummyCloud(PointCloudT::Ptr& cloud) +{ + //sensor_msgs::PointCloud2 output; + //pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + //beforeVoxelFilter_cloud.publish(output); +} + + +void +publishgroundRemovalCloud(PointCloudT::Ptr& cloud) +{ + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message + groundRemoval_cloud.publish(output); +} +*/ + +void +computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros::Rate rate, PointCloudT::Ptr& background_cloud) +{ + std::cout << "Background acquisition..." << std::flush; + + // Create background cloud: + background_cloud->header = cloud->header; + background_cloud->points.clear(); + for (unsigned int i = 0; i < frames; i++) + { + // Point cloud pre-processing (downsampling and filtering): + PointCloudT::Ptr cloud_filtered(new PointCloudT); + cloud_filtered = people_detector.preprocessCloud (cloud, &publishBeforeVoxelFilterDummyCloud); + + *background_cloud += *cloud_filtered; + ros::spinOnce(); + rate.sleep(); + } + + // Voxel grid filtering: + PointCloudT::Ptr cloud_filtered(new PointCloudT); + pcl::VoxelGrid voxel_grid_filter_object; + voxel_grid_filter_object.setInputCloud(background_cloud); + voxel_grid_filter_object.setLeafSize (voxel_size, voxel_size, voxel_size); + voxel_grid_filter_object.filter (*cloud_filtered); + + background_cloud = cloud_filtered; + + // Background saving: + pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); + + std::cout << "done." << std::endl << std::endl; +} + +void +configCb(Config &config, uint32_t level) +{ + valid_points_threshold = config.valid_points_threshold; + + min_confidence = config.ground_based_people_detection_min_confidence; + + people_detector.setHeightLimits (config.minimum_person_height, config.maximum_person_height); + + people_detector.setMaxDistance (config.max_distance); + + people_detector.setSamplingFactor (config.sampling_factor); + + use_rgb = config.use_rgb; + people_detector.setUseRGB (config.use_rgb); + + minimum_luminance = config.minimum_luminance; + + sensor_tilt_compensation = config.sensor_tilt_compensation; + people_detector.setSensorTiltCompensation (config.sensor_tilt_compensation); + + people_detector.setMinimumDistanceBetweenHeads (config.heads_minimum_distance); + + voxel_size = config.voxel_size; + people_detector.setVoxelSize (config.voxel_size); + + people_detector.setDenoisingParameters (config.apply_denoising, config.mean_k_denoising, config.std_dev_denoising); + + lock_ground = config.lock_ground; + + max_background_frames = int(config.background_seconds * rate_value); + + if (config.background_resolution != background_octree_resolution) + { + background_octree_resolution = config.background_resolution; + if (background_subtraction) + people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); + } + + if (config.background_subtraction != background_subtraction) + { + if (config.background_subtraction) + { + update_background = true; + } + else + { + background_subtraction = false; + people_detector.setBackground(false, background_octree_resolution, background_cloud); + } + } +} + +bool +fileExists(const char *fileName) +{ + ifstream infile(fileName); + return infile.good(); +} + +int +main (int argc, char** argv) +{ + ros::init(argc, argv, "ground_based_people_detector"); + ros::NodeHandle nh("~"); + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + // Read some parameters from launch file: + int ground_estimation_mode; + nh.param("ground_estimation_mode", ground_estimation_mode, 0); + std::string svm_filename; + + nh.param("classifier_file", svm_filename, std::string("./")); + nh.param("use_rgb", use_rgb, true); + nh.param("minimum_luminance", minimum_luminance, 20); + nh.param("ground_based_people_detection_min_confidence", min_confidence, -1.5); + double max_distance; + nh.param("max_distance", max_distance, 50.0); + double min_height; + nh.param("minimum_person_height", min_height, 1.3); + double max_height; + nh.param("maximum_person_height", max_height, 2.3); + // Point cloud sampling factor: + int sampling_factor; + nh.param("sampling_factor", sampling_factor, 1); + std::string pointcloud_topic; + nh.param("pointcloud_topic", pointcloud_topic, std::string("/camera/depth_registered/points")); + std::string output_topic; + nh.param("output_topic", output_topic, std::string("/ground_based_people_detector/detections")); + std::string camera_info_topic; + nh.param("camera_info_topic", camera_info_topic, std::string("/camera/rgb/camera_info")); + nh.param("rate", rate_value, 30.0); + // If true, exploit extrinsic calibration for estimatin the ground plane equation: + bool ground_from_extrinsic_calibration; + nh.param("ground_from_extrinsic_calibration", ground_from_extrinsic_calibration, false); + nh.param("lock_ground", lock_ground, false); + nh.param("sensor_tilt_compensation", sensor_tilt_compensation, false); + nh.param("valid_points_threshold", valid_points_threshold, 0.2); + nh.param("background_subtraction", background_subtraction, false); + nh.param("background_resolution", background_octree_resolution, 0.3); + double background_seconds; // Number of seconds used to acquire the background + nh.param("background_seconds", background_seconds, 3.0); + std::string update_background_topic; // Topic where the background update message is published/read + nh.param("update_background_topic", update_background_topic, std::string("/background_update")); + double heads_minimum_distance; // Minimum distance between two persons' head + nh.param("heads_minimum_distance", heads_minimum_distance, 0.3); + nh.param("voxel_size", voxel_size, 0.06); + bool read_ground_from_file; // Flag stating if the ground should be read from file, if present + nh.param("read_ground_from_file", read_ground_from_file, false); + // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise + bool apply_denoising; + nh.param("apply_denoising", apply_denoising, false); + // MeanK for denoising (the higher it is, the stronger is the filtering) + int mean_k_denoising; + nh.param("mean_k_denoising", mean_k_denoising, 5); + // Standard deviation for denoising (the lower it is, the stronger is the filtering) + double std_dev_denoising; + nh.param("std_dev_denoising", std_dev_denoising, 0.3); + + // Eigen::Matrix3f intrinsics_matrix; + intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics + + // Initialize transforms to be used to correct sensor tilt to identity matrix: + Eigen::Affine3f transform, anti_transform; + transform = transform.Identity(); + anti_transform = transform.inverse(); + + // Subscribers: + ros::Subscriber sub = nh.subscribe(pointcloud_topic, 1, cloud_cb); + ros::Subscriber camera_info_sub = nh.subscribe(camera_info_topic, 1, cameraInfoCallback); + ros::Subscriber update_background_sub = nh.subscribe(update_background_topic, 1, updateBackgroundCallback); + + // Publishers: + ros::Publisher detection_pub; + detection_pub= nh.advertise(output_topic, 3); + + //extractedRGB_cloud = nh.advertise (pointcloud_topic + "/extractedRGB", 3); + //preProcessed_cloud = nh.advertise (pointcloud_topic + "/preProcessed", 3); + //groundRemoval_cloud = nh.advertise (pointcloud_topic + "/groundRemoval", 3); + //beforeVoxelFilter_cloud = nh.advertise (pointcloud_topic + "/beforeVoxelFilter", 3); + + Rois output_rois_; + open_ptrack::opt_utils::Conversions converter; + + ros::Rate rate(rate_value); + while(ros::ok() && !new_cloud_available_flag) + { + ros::spinOnce(); + rate.sleep(); + } + + // Create classifier for people detection: + open_ptrack::detection::PersonClassifier person_classifier; + person_classifier.loadSVMFromFile(svm_filename); // load trained SVM + + // People detection app initialization: + people_detector.setVoxelSize(voxel_size); // set the voxel size + people_detector.setMaxDistance(max_distance); // set maximum distance of people from the sensor + people_detector.setIntrinsics(intrinsics_matrix); // set RGB camera intrinsic parameters + people_detector.setClassifier(person_classifier); // set person classifier + people_detector.setHeightLimits(min_height, max_height); // set person classifier + people_detector.setSamplingFactor(sampling_factor); // set sampling factor + people_detector.setUseRGB(use_rgb); // set if RGB should be used or not + people_detector.setSensorTiltCompensation(sensor_tilt_compensation); // enable point cloud rotation correction + people_detector.setMinimumDistanceBetweenHeads (heads_minimum_distance); // set minimum distance between persons' head + people_detector.setDenoisingParameters (apply_denoising, mean_k_denoising, std_dev_denoising); // set parameters for denoising the point cloud + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); + reconfigure_server_->setCallback(f); + + // Loop until a valid point cloud is found + open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); + bool first_valid_frame = false; + int no_valid_frame_counter = 0; + while (!first_valid_frame) + { + if (!ground_estimator.tooManyNaN(cloud, 1 - valid_points_threshold)) + { // A point cloud is valid if the ratio #NaN / #valid points is lower than a threshold + first_valid_frame = true; + std::cout << "Valid frame found!" << std::endl; + } + else + { + if (++no_valid_frame_counter > 60) + { + std::cout << "No valid frame. Move the camera to a better position..." << std::endl; + no_valid_frame_counter = 0; + } + } + + // Execute callbacks: + ros::spinOnce(); + rate.sleep(); + } + std::cout << std::endl; + + // Initialization for background subtraction: + background_cloud = PointCloudT::Ptr (new PointCloudT); + std::string frame_id = cloud->header.frame_id; + max_background_frames = int(background_seconds * rate_value); + if (background_subtraction) + { + std::cout << "Background subtraction enabled." << std::endl; + + // Try to load the background from file: + if (pcl::io::loadPCDFile ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud) == -1) + { + // File not found, then background acquisition: + computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); + } + else + { + std::cout << "Background read from file." << std::endl << std::endl; + } + + people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); + } + + // Ground estimation: + std::cout << "Ground plane initialization starting..." << std::endl; + ground_estimator.setInputCloud(cloud); + Eigen::VectorXf ground_coeffs = ground_estimator.computeMulticamera(ground_from_extrinsic_calibration, read_ground_from_file, + pointcloud_topic, sampling_factor, voxel_size); + + // Main loop: + while(ros::ok()) + { + if (new_cloud_available_flag) + { + new_cloud_available_flag = false; + // Convert PCL cloud header to ROS header: + std_msgs::Header cloud_header = pcl_conversions::fromPCL(cloud->header); + // If requested, update background: + if (update_background) + { + if (not background_subtraction) + { + std::cout << "Background subtraction enabled." << std::endl; + background_subtraction = true; + } + computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); + people_detector.setBackground (background_subtraction, background_octree_resolution, background_cloud); + + update_background = false; + } + + // Perform people detection on the new cloud: + std::vector > clusters; // vector containing persons clusters + + + people_detector.setInputCloud(cloud); + people_detector.setGround(ground_coeffs); // set floor coefficients + people_detector.compute(clusters);//, &publishExtractRGBCloud, &publishpreProcessedCloud , &publishgroundRemovalCloud, &publishBeforeVoxelFilterCloud); // perform people detection + + // If not lock_ground, update ground coefficients: + if (not lock_ground) + ground_coeffs = people_detector.getGround(); // get updated floor coefficients + + if (sensor_tilt_compensation) + people_detector.getTiltCompensationTransforms(transform, anti_transform); + + /// Write detection message: + DetectionArray::Ptr detection_array_msg(new DetectionArray); + // Set camera-specific fields: + detection_array_msg->header = cloud_header; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + detection_array_msg->intrinsic_matrix.push_back(intrinsics_matrix(i, j)); + detection_array_msg->confidence_type = std::string("hog+svm"); + detection_array_msg->image_type = std::string("rgb"); + + // Add all valid detections: + for(std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) + { + if((!use_rgb) | (people_detector.getMeanLuminance() < minimum_luminance) | // if RGB is not used or luminance is too low + ((people_detector.getMeanLuminance() >= minimum_luminance) & (it->getPersonConfidence() > min_confidence))) // if RGB is used, keep only people with confidence above a threshold + { + // Create detection message: + Detection detection_msg; + converter.Vector3fToVector3(anti_transform * it->getMin(), detection_msg.box_3D.p1); + converter.Vector3fToVector3(anti_transform * it->getMax(), detection_msg.box_3D.p2); + + float head_centroid_compensation = 0.05; + + // theoretical person centroid: + Eigen::Vector3f centroid3d = anti_transform * it->getTCenter(); + Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsics_matrix); + // theoretical person top point: + Eigen::Vector3f top3d = anti_transform * it->getTTop(); + Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsics_matrix); + // theoretical person bottom point: + Eigen::Vector3f bottom3d = anti_transform * it->getTBottom(); + Eigen::Vector3f bottom2d = converter.world2cam(bottom3d, intrinsics_matrix); + float enlarge_factor = 1.1; + float pixel_xc = centroid2d(0); + float pixel_yc = centroid2d(1); + float pixel_height = (bottom2d(1) - top2d(1)) * enlarge_factor; + float pixel_width = pixel_height / 2; + detection_msg.box_2D.x = int(centroid2d(0) - pixel_width/2.0); + detection_msg.box_2D.y = int(centroid2d(1) - pixel_height/2.0); + detection_msg.box_2D.width = int(pixel_width); + detection_msg.box_2D.height = int(pixel_height); + detection_msg.height = it->getHeight(); + detection_msg.confidence = it->getPersonConfidence(); + detection_msg.distance = it->getDistance(); + converter.Vector3fToVector3((1+head_centroid_compensation/centroid3d.norm())*centroid3d, detection_msg.centroid); + converter.Vector3fToVector3((1+head_centroid_compensation/top3d.norm())*top3d, detection_msg.top); + converter.Vector3fToVector3((1+head_centroid_compensation/bottom3d.norm())*bottom3d, detection_msg.bottom); + + // Add message: + detection_array_msg->detections.push_back(detection_msg); + } + } + detection_pub.publish(detection_array_msg); // publish message + } + + // Execute callbacks: + ros::spinOnce(); + rate.sleep(); + } + + // Delete background file from disk: + std::string filename = "/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd"; + if (fileExists (filename.c_str())) + { + remove( filename.c_str() ); + } + + return 0; +} + diff --git a/detection/apps/ground_based_people_detector_node_zed.cpp~ b/detection/apps/ground_based_people_detector_node_zed.cpp~ new file mode 100644 index 0000000..02b4b5e --- /dev/null +++ b/detection/apps/ground_based_people_detector_node_zed.cpp~ @@ -0,0 +1,516 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ground_based_people_detector_node.cpp + * Created on: Jul 07, 2013 + * Author: Matteo Munaro + * + * ROS node which performs people detection assuming that people stand/walk on a ground plane. + * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class, + * which implements the people detection algorithm described here: + * M. Munaro, F. Basso and E. Menegatti, + * Tracking people within groups with RGB-D data, + * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. + */ + +// ROS includes: +#include +#include + +// PCL includes: +#include +#include +#include +#include +#include +#include +#include + +// Open PTrack includes: +#include +#include +#include + +//Publish Messages +#include +#include +#include +#include +#include +#include + +// Dynamic reconfigure: +#include +#include + +#include +#include + +using namespace opt_msgs; +using namespace sensor_msgs; + +typedef pcl::PointXYZRGB PointT; +typedef pcl::PointCloud PointCloudT; +typedef detection::GroundBasedPeopleDetectorConfig Config; +typedef dynamic_reconfigure::Server ReconfigureServer; + +bool new_cloud_available_flag = false; +PointCloudT::Ptr cloud(new PointCloudT); +bool intrinsics_already_set = false; +Eigen::Matrix3f intrinsics_matrix; +bool update_background = false; + +// Min confidence for people detection: +double min_confidence; +// People detection object +open_ptrack::detection::GroundBasedPeopleDetectionApp people_detector; +// Flag stating if classifiers based on RGB image should be used or not +bool use_rgb; +// Threshold on image luminance. If luminance is over this threshold, classifiers on RGB image are also used +int minimum_luminance; +// If true, sensor tilt angle wrt ground plane is compensated to improve people detection +bool sensor_tilt_compensation; +// Voxel size for downsampling the cloud +double voxel_size; +// If true, do not update the ground plane at every frame +bool lock_ground; +// Frames to use for updating the background +int max_background_frames; +// Main loop rate: +double rate_value; +// Voxel resolution of the octree used to represent the background +double background_octree_resolution; +// Background cloud +PointCloudT::Ptr background_cloud; +// If true, background subtraction is performed +bool background_subtraction; +// Threshold on the ratio of valid points needed for ground estimation +double valid_points_threshold; + +bool camera_info_available_flag = false; + +void +cloud_cb (const PointCloudT::ConstPtr& callback_cloud) +{ + *cloud = *callback_cloud; + new_cloud_available_flag = true; +} + +void +cameraInfoCallback (const sensor_msgs::CameraInfo::ConstPtr & msg) +{ + if (!intrinsics_already_set) + { + intrinsics_matrix << msg->K[0], 0, msg->K[2], 0, msg->K[4], msg->K[5], 0, 0, 1; + people_detector.setIntrinsics(intrinsics_matrix); + camera_info_available_flag = true; + intrinsics_already_set = true; + camera_info_available_flag = true; + } +} + +void +updateBackgroundCallback (const std_msgs::String::ConstPtr & msg) +{ + if (msg->data == "update") + { + update_background = true; + } +} + +void +computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros::Rate rate, PointCloudT::Ptr& background_cloud) +{ + std::cout << "Background acquisition..." << std::flush; + + // Create background cloud: + background_cloud->header = cloud->header; + background_cloud->points.clear(); + for (unsigned int i = 0; i < frames; i++) + { + // Point cloud pre-processing (downsampling and filtering): + PointCloudT::Ptr cloud_filtered(new PointCloudT); + cloud_filtered = people_detector.preprocessCloud (cloud); + + *background_cloud += *cloud_filtered; + ros::spinOnce(); + rate.sleep(); + } + + // Voxel grid filtering: + PointCloudT::Ptr cloud_filtered(new PointCloudT); + pcl::VoxelGrid voxel_grid_filter_object; + voxel_grid_filter_object.setInputCloud(background_cloud); + voxel_grid_filter_object.setLeafSize (voxel_size, voxel_size, voxel_size); + voxel_grid_filter_object.filter (*cloud_filtered); + + background_cloud = cloud_filtered; + + // Background saving: + pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); + + std::cout << "done." << std::endl << std::endl; +} + +void +configCb(Config &config, uint32_t level) +{ + valid_points_threshold = config.valid_points_threshold; + + min_confidence = config.ground_based_people_detection_min_confidence; + + people_detector.setHeightLimits (config.minimum_person_height, config.maximum_person_height); + + people_detector.setMaxDistance (config.max_distance); + + people_detector.setSamplingFactor (config.sampling_factor); + + use_rgb = config.use_rgb; + people_detector.setUseRGB (config.use_rgb); + + minimum_luminance = config.minimum_luminance; + + sensor_tilt_compensation = config.sensor_tilt_compensation; + people_detector.setSensorTiltCompensation (config.sensor_tilt_compensation); + + people_detector.setMinimumDistanceBetweenHeads (config.heads_minimum_distance); + + voxel_size = config.voxel_size; + people_detector.setVoxelSize (config.voxel_size); + + people_detector.setDenoisingParameters (config.apply_denoising, config.mean_k_denoising, config.std_dev_denoising); + + lock_ground = config.lock_ground; + + max_background_frames = int(config.background_seconds * rate_value); + + if (config.background_resolution != background_octree_resolution) + { + background_octree_resolution = config.background_resolution; + if (background_subtraction) + people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); + } + + if (config.background_subtraction != background_subtraction) + { + if (config.background_subtraction) + { + update_background = true; + } + else + { + background_subtraction = false; + people_detector.setBackground(false, background_octree_resolution, background_cloud); + } + } +} + +bool +fileExists(const char *fileName) +{ + ifstream infile(fileName); + return infile.good(); +} + +int +main (int argc, char** argv) +{ + ros::init(argc, argv, "ground_based_people_detector"); + ros::NodeHandle nh("~"); + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + // Read some parameters from launch file: + int ground_estimation_mode; + nh.param("ground_estimation_mode", ground_estimation_mode, 0); + std::string svm_filename; + + nh.param("classifier_file", svm_filename, std::string("./")); + nh.param("use_rgb", use_rgb, true); + nh.param("minimum_luminance", minimum_luminance, 20); + nh.param("ground_based_people_detection_min_confidence", min_confidence, -1.5); + double max_distance; + nh.param("max_distance", max_distance, 50.0); + double min_height; + nh.param("minimum_person_height", min_height, 1.3); + double max_height; + nh.param("maximum_person_height", max_height, 2.3); + // Point cloud sampling factor: + int sampling_factor; + nh.param("sampling_factor", sampling_factor, 1); + std::string pointcloud_topic; + nh.param("pointcloud_topic", pointcloud_topic, std::string("/camera/depth_registered/points")); + std::string output_topic; + nh.param("output_topic", output_topic, std::string("/ground_based_people_detector/detections")); + std::string camera_info_topic; + nh.param("camera_info_topic", camera_info_topic, std::string("/camera/rgb/camera_info")); + nh.param("rate", rate_value, 30.0); + // If true, exploit extrinsic calibration for estimatin the ground plane equation: + bool ground_from_extrinsic_calibration; + nh.param("ground_from_extrinsic_calibration", ground_from_extrinsic_calibration, false); + nh.param("lock_ground", lock_ground, false); + nh.param("sensor_tilt_compensation", sensor_tilt_compensation, false); + nh.param("valid_points_threshold", valid_points_threshold, 0.2); + nh.param("background_subtraction", background_subtraction, false); + nh.param("background_resolution", background_octree_resolution, 0.3); + double background_seconds; // Number of seconds used to acquire the background + nh.param("background_seconds", background_seconds, 3.0); + std::string update_background_topic; // Topic where the background update message is published/read + nh.param("update_background_topic", update_background_topic, std::string("/background_update")); + double heads_minimum_distance; // Minimum distance between two persons' head + nh.param("heads_minimum_distance", heads_minimum_distance, 0.3); + nh.param("voxel_size", voxel_size, 0.06); + bool read_ground_from_file; // Flag stating if the ground should be read from file, if present + nh.param("read_ground_from_file", read_ground_from_file, false); + // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise + bool apply_denoising; + nh.param("apply_denoising", apply_denoising, false); + // MeanK for denoising (the higher it is, the stronger is the filtering) + int mean_k_denoising; + nh.param("mean_k_denoising", mean_k_denoising, 5); + // Standard deviation for denoising (the lower it is, the stronger is the filtering) + double std_dev_denoising; + nh.param("std_dev_denoising", std_dev_denoising, 0.3); + + // Eigen::Matrix3f intrinsics_matrix; + intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics + + // Initialize transforms to be used to correct sensor tilt to identity matrix: + Eigen::Affine3f transform, anti_transform; + transform = transform.Identity(); + anti_transform = transform.inverse(); + + // Subscribers: + ros::Subscriber sub = nh.subscribe(pointcloud_topic, 1, cloud_cb); + ros::Subscriber camera_info_sub = nh.subscribe(camera_info_topic, 1, cameraInfoCallback); + ros::Subscriber update_background_sub = nh.subscribe(update_background_topic, 1, updateBackgroundCallback); + + // Publishers: + ros::Publisher detection_pub; + detection_pub= nh.advertise(output_topic, 3); + + Rois output_rois_; + open_ptrack::opt_utils::Conversions converter; + + ros::Rate rate(rate_value); + while(ros::ok() && !new_cloud_available_flag) + { + ros::spinOnce(); + rate.sleep(); + } + + // Create classifier for people detection: + open_ptrack::detection::PersonClassifier person_classifier; + person_classifier.loadSVMFromFile(svm_filename); // load trained SVM + + // People detection app initialization: + people_detector.setVoxelSize(voxel_size); // set the voxel size + people_detector.setMaxDistance(max_distance); // set maximum distance of people from the sensor + people_detector.setIntrinsics(intrinsics_matrix); // set RGB camera intrinsic parameters + people_detector.setClassifier(person_classifier); // set person classifier + people_detector.setHeightLimits(min_height, max_height); // set person classifier + people_detector.setSamplingFactor(sampling_factor); // set sampling factor + people_detector.setUseRGB(use_rgb); // set if RGB should be used or not + people_detector.setSensorTiltCompensation(sensor_tilt_compensation); // enable point cloud rotation correction + people_detector.setMinimumDistanceBetweenHeads (heads_minimum_distance); // set minimum distance between persons' head + people_detector.setDenoisingParameters (apply_denoising, mean_k_denoising, std_dev_denoising); // set parameters for denoising the point cloud + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); + reconfigure_server_->setCallback(f); + + // Loop until a valid point cloud is found + open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); + bool first_valid_frame = false; + int no_valid_frame_counter = 0; + while (!first_valid_frame) + { + if (!ground_estimator.tooManyNaN(cloud, 1 - valid_points_threshold)) + { // A point cloud is valid if the ratio #NaN / #valid points is lower than a threshold + first_valid_frame = true; + std::cout << "Valid frame found!" << std::endl; + } + else + { + if (++no_valid_frame_counter > 60) + { + std::cout << "No valid frame. Move the camera to a better position..." << std::endl; + no_valid_frame_counter = 0; + } + } + + // Execute callbacks: + ros::spinOnce(); + rate.sleep(); + } + std::cout << std::endl; + + // Initialization for background subtraction: + background_cloud = PointCloudT::Ptr (new PointCloudT); + std::string frame_id = cloud->header.frame_id; + max_background_frames = int(background_seconds * rate_value); + if (background_subtraction) + { + std::cout << "Background subtraction enabled." << std::endl; + + // Try to load the background from file: + if (pcl::io::loadPCDFile ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud) == -1) + { + // File not found, then background acquisition: + computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); + } + else + { + std::cout << "Background read from file." << std::endl << std::endl; + } + + people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); + } + + // Ground estimation: + std::cout << "Ground plane initialization starting..." << std::endl; + ground_estimator.setInputCloud(cloud); + Eigen::VectorXf ground_coeffs = ground_estimator.computeMulticamera(ground_from_extrinsic_calibration, read_ground_from_file, + pointcloud_topic, sampling_factor, voxel_size); + + // Main loop: + while(ros::ok()) + { + if (new_cloud_available_flag) + { + new_cloud_available_flag = false; + // Convert PCL cloud header to ROS header: + std_msgs::Header cloud_header = pcl_conversions::fromPCL(cloud->header); + // If requested, update background: + if (update_background) + { + if (not background_subtraction) + { + std::cout << "Background subtraction enabled." << std::endl; + background_subtraction = true; + } + computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); + people_detector.setBackground (background_subtraction, background_octree_resolution, background_cloud); + + update_background = false; + } + + // Perform people detection on the new cloud: + std::vector > clusters; // vector containing persons clusters + + + people_detector.setInputCloud(cloud); + people_detector.setGround(ground_coeffs); // set floor coefficients + people_detector.compute(clusters); // perform people detection + + // If not lock_ground, update ground coefficients: + if (not lock_ground) + ground_coeffs = people_detector.getGround(); // get updated floor coefficients + + if (sensor_tilt_compensation) + people_detector.getTiltCompensationTransforms(transform, anti_transform); + + /// Write detection message: + DetectionArray::Ptr detection_array_msg(new DetectionArray); + // Set camera-specific fields: + detection_array_msg->header = cloud_header; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + detection_array_msg->intrinsic_matrix.push_back(intrinsics_matrix(i, j)); + detection_array_msg->confidence_type = std::string("hog+svm"); + detection_array_msg->image_type = std::string("rgb"); + + // Add all valid detections: + for(std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) + { + if((!use_rgb) | (people_detector.getMeanLuminance() < minimum_luminance) | // if RGB is not used or luminance is too low + ((people_detector.getMeanLuminance() >= minimum_luminance) & (it->getPersonConfidence() > min_confidence))) // if RGB is used, keep only people with confidence above a threshold + { + // Create detection message: + Detection detection_msg; + converter.Vector3fToVector3(anti_transform * it->getMin(), detection_msg.box_3D.p1); + converter.Vector3fToVector3(anti_transform * it->getMax(), detection_msg.box_3D.p2); + + float head_centroid_compensation = 0.05; + + // theoretical person centroid: + Eigen::Vector3f centroid3d = anti_transform * it->getTCenter(); + Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsics_matrix); + // theoretical person top point: + Eigen::Vector3f top3d = anti_transform * it->getTTop(); + Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsics_matrix); + // theoretical person bottom point: + Eigen::Vector3f bottom3d = anti_transform * it->getTBottom(); + Eigen::Vector3f bottom2d = converter.world2cam(bottom3d, intrinsics_matrix); + float enlarge_factor = 1.1; + float pixel_xc = centroid2d(0); + float pixel_yc = centroid2d(1); + float pixel_height = (bottom2d(1) - top2d(1)) * enlarge_factor; + float pixel_width = pixel_height / 2; + detection_msg.box_2D.x = int(centroid2d(0) - pixel_width/2.0); + detection_msg.box_2D.y = int(centroid2d(1) - pixel_height/2.0); + detection_msg.box_2D.width = int(pixel_width); + detection_msg.box_2D.height = int(pixel_height); + detection_msg.height = it->getHeight(); + detection_msg.confidence = it->getPersonConfidence(); + detection_msg.distance = it->getDistance(); + converter.Vector3fToVector3((1+head_centroid_compensation/centroid3d.norm())*centroid3d, detection_msg.centroid); + converter.Vector3fToVector3((1+head_centroid_compensation/top3d.norm())*top3d, detection_msg.top); + converter.Vector3fToVector3((1+head_centroid_compensation/bottom3d.norm())*bottom3d, detection_msg.bottom); + + // Add message: + detection_array_msg->detections.push_back(detection_msg); + } + } + detection_pub.publish(detection_array_msg); // publish message + } + + // Execute callbacks: + ros::spinOnce(); + rate.sleep(); + } + + // Delete background file from disk: + std::string filename = "/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd"; + if (fileExists (filename.c_str())) + { + remove( filename.c_str() ); + } + + return 0; +} + diff --git a/detection/apps/haardispada_node.cpp b/detection/apps/haardispada_node.cpp index 071ff1a..54f0036 100644 --- a/detection/apps/haardispada_node.cpp +++ b/detection/apps/haardispada_node.cpp @@ -60,8 +60,8 @@ POSSIBILITY OF SUCH DAMAGE. #include // Used to display OPENCV images -#include -#include +#include +#include // Dynamic reconfigure: #include diff --git a/detection/apps/haardispada_node.cpp~ b/detection/apps/haardispada_node.cpp~ new file mode 100644 index 0000000..071ff1a --- /dev/null +++ b/detection/apps/haardispada_node.cpp~ @@ -0,0 +1,490 @@ +/* +Software License Agreement (BSD License) + +Copyright (c) 2013, Southwest Research Institute +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Southwest Research Institute, nor the names + of its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + */ + +#include "open_ptrack/detection/haardispada.h" +#include "open_ptrack/opt_utils/conversions.h" + +#include "ros/ros.h" +#include + +//Publish Messages +#include "opt_msgs/RoiRect.h" +#include "opt_msgs/Rois.h" +#include "opt_msgs/DetectionArray.h" +#include "std_msgs/String.h" + +//Time Synchronizer +// NOTE: Time Synchronizer conflicts with QT includes may need to investigate +#include +#include +#include +#include + +//Subscribe Messages +#include +#include +#include +#include +#include + +// Image Transport +#include +#include + +// Used to display OPENCV images +#include +#include + +// Dynamic reconfigure: +#include +#include + +using namespace stereo_msgs; +using namespace message_filters::sync_policies; +using namespace opt_msgs; +using namespace sensor_msgs; +using namespace sensor_msgs::image_encodings; +using sensor_msgs::Image; +using cv_bridge::CvImagePtr; +using std::vector; +using std::string; +using cv::Rect; +using cv::Mat; + +class HaarDispAdaNode +{ + + typedef detection::HaarDispAdaDetectorConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + + private: + // Define Node + ros::NodeHandle node_; + + // Subscribe to Messages + message_filters::Subscriber sub_disparity_; + message_filters::Subscriber sub_image_; + message_filters::Subscriber sub_detections_; + + // Define the Synchronizer + typedef ApproximateTime ApproximatePolicy; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr approximate_sync_; + + // Messages to Publish + ros::Publisher pub_rois_; + ros::Publisher pub_Color_Image_; + ros::Publisher pub_Disparity_Image_; + ros::Publisher pub_detections_; + + Rois output_rois_; + + enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; + + //Define the Classifier Object + open_ptrack::detection::HaarDispAdaClassifier HDAC_; + + int num_class1; + int num_class0; + int num_TP_class1; + int num_FP_class1; + int num_TP_class0; + int num_FP_class0; + + // Flag stating if classifiers based on disparity image should be used or not: + bool use_disparity; + + // Minimum classifier confidence for people detection: + double min_confidence; + + // Object of class Conversions: + open_ptrack::opt_utils::Conversions converter; + + // Output detections message: + DetectionArray::Ptr output_detection_msg_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + public: + + explicit HaarDispAdaNode(const ros::NodeHandle& nh): + node_(nh) + { + num_class1 = 0; + num_class0 = 0; + num_TP_class1 = 0; + num_FP_class1 = 0; + num_TP_class0 = 0; + num_FP_class0 = 0; + + string nn = ros::this_node::getName(); + int qs; + if(!node_.getParam("Q_Size",qs)){ + qs=3; + } + + if(!node_.getParam("use_disparity", use_disparity)){ + use_disparity = true; + } + + if(!node_.getParam("haar_disp_ada_min_confidence", min_confidence)){ + min_confidence = 3.0; + } + HDAC_.setMinConfidence(min_confidence); + + int NS; + if(!node_.getParam("num_Training_Samples",NS)){ + NS = 350; // default sets asside very little for training + node_.setParam("num_Training_Samples",NS); + } + HDAC_.setMaxSamples(NS); + + output_detection_msg_ = DetectionArray::Ptr(new DetectionArray); + + // Published Messages + pub_rois_ = node_.advertise("/HaarDispAdaOutputRois",qs); + pub_Color_Image_ = node_.advertise("/HaarDispAdaColorImage",qs); + pub_Disparity_Image_= node_.advertise("/HaarDispAdaDisparityImage",qs); + pub_detections_ = node_.advertise("/detector/detections",3); + + // Subscribe to Messages + sub_image_.subscribe(node_,"/Color_Image",qs); + sub_disparity_.subscribe(node_, "/Disparity_Image",qs); + sub_detections_.subscribe(node_,"/input_detections",qs); + + // Sync the Synchronizer + approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), + sub_image_, + sub_disparity_, + sub_detections_)); + + approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNode::imageCb, + this, + _1, + _2, + _3)); + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&HaarDispAdaNode::configCb, this, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, node_)); + reconfigure_server_->setCallback(f); + } + + int + get_mode() + { + + int callback_mode; + std::string mode=""; + node_.param("mode", mode, std::string("none")); + if(mode.compare("detect") == 0) + { + callback_mode = DETECT; + } + else if(mode.compare("train")==0) + { + callback_mode = TRAIN; + } + else if(mode.compare("load")==0) + { + callback_mode = LOAD; + } + else if(mode.compare("evaluate")==0) + { + callback_mode = EVALUATE; + } + else if(mode.compare("accumulate")==0) + { + callback_mode = ACCUMULATE; + } + else // default mode is accumulate + { + callback_mode = ACCUMULATE; + } + return(callback_mode); + } + + void + createOutputDetectionsMessage(const DetectionArray::ConstPtr& input_msg, vector confidences, DetectionArray::Ptr& output_msg) + { + // Set camera-specific fields: + output_msg->detections.clear(); + output_msg->header = input_msg->header; + output_msg->intrinsic_matrix = input_msg->intrinsic_matrix; + output_msg->confidence_type = std::string("haar+ada"); + output_msg->image_type = std::string("disparity"); + + // Add all valid detections: + int k = 0; + for(unsigned int i = 0; i < input_msg->detections.size(); i++) + { + if((!use_disparity) | (confidences[i] > min_confidence)) // keep only people with confidence above a threshold if use_disparity == true or all detections if use_disparity == false + { + output_msg->detections.push_back(input_msg->detections[i]); + if (use_disparity) + output_msg->detections[k].confidence = confidences[i]; + + k++; + } + } + } + + void + imageCb(const ImageConstPtr& image_msg, + const DisparityImageConstPtr& disparity_msg, + const opt_msgs::DetectionArray::ConstPtr& detection_msg) + { + // Callback for people detection: + bool label_all; + vector L_in; + vector L_out; + vector C_out; + vector R_in; + vector R_out; + string param_name; + string nn = ros::this_node::getName(); + string cfnm; + int numSamples; + double temp=0.0; + + // check encoding and create an intensity image from disparity image + assert(disparity_msg->image.encoding == image_encodings::TYPE_32FC1); + cv::Mat_ dmatrix(disparity_msg->image.height, + disparity_msg->image.width, + (float*) &disparity_msg->image.data[0], + disparity_msg->image.step); + + if(!node_.getParam("UseMissingDataMask",HDAC_.useMissingDataMask_)){ + HDAC_.useMissingDataMask_ = false; + } + + + // **********************************************************************// + // between these comments lies a hack that accounts for the difference // + // between the focal lengths of the kinect's color camera and ir cameras // + // TODO, parameterize to disable this hack for the stereo data // + bool kinect_disparity_fix; + if(!node_.getParam("Kinect_Disparity_Fix",kinect_disparity_fix)){ + kinect_disparity_fix = false; + } + + if(kinect_disparity_fix){ + int nrows = 434; + int ncols = 579; + int row_offset = (dmatrix.rows - nrows)/2; + int col_offset = (dmatrix.cols - ncols)/2; + cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); + resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); + for(int i=0;i(i,j) = 0.0; + } + } + for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); + } + } + } + // **********************************************************************// + + // take the detection message and create vectors of ROIs and labels + R_in.clear(); + L_in.clear(); + + // Read camera intrinsic parameters: + Eigen::Matrix3f intrinsic_matrix; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + intrinsic_matrix(i, j) = detection_msg->intrinsic_matrix[i * 3 + j]; + + // Read detections: + for(unsigned int i=0;idetections.size();i++) + { + // theoretical person centroid: + Eigen::Vector3f centroid3d(detection_msg->detections[i].centroid.x, detection_msg->detections[i].centroid.y, detection_msg->detections[i].centroid.z); + Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsic_matrix); + + // theoretical person top point: + Eigen::Vector3f top3d(detection_msg->detections[i].top.x, detection_msg->detections[i].top.y, detection_msg->detections[i].top.z); + Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsic_matrix); + + // Define Rect and make sure it is not out of the image: + int h = centroid2d(1) - top2d(1); + int w = h * 2 / 3.0; + int x = std::max(0, int(centroid2d(0) - w / 2.0)); + int y = std::max(0, int(top2d(1))); + h = std::min(int(disparity_msg->image.height - y), int(h)); + w = std::min(int(disparity_msg->image.width - x), int(w)); + + Rect R(x,y,w,h); + R_in.push_back(R); + L_in.push_back(1); + } + + // do the work of the node + switch(get_mode()){ + case DETECT: + // Perform people detection within the input rois: + label_all = true; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,C_out,label_all); + + // Build output detections message: + createOutputDetectionsMessage(detection_msg, C_out, output_detection_msg_); + + output_rois_.rois.clear(); + output_rois_.header.stamp = image_msg->header.stamp; + output_rois_.header.frame_id = image_msg->header.frame_id; +// ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); + for(unsigned int i=0; i= NS){ + param_name = "mode"; + node_.setParam(param_name, std::string("train")); + ROS_ERROR("DONE Accumulating, switching to train mode"); + } + } + break; + case TRAIN: + param_name = "classifier_file"; + node_.param(param_name,cfnm,std::string("/home/clewis/classifiers/test.xml")); + param_name = "HaarDispAdaPrior"; + node_.getParam(param_name,temp); + HDAC_.HaarDispAdaPrior_ = temp; + ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); + HDAC_.train(cfnm); + param_name = "mode"; + node_.setParam("mode", std::string("evaluate")); + ROS_ERROR("DONE TRAINING, switching to evaluate mode"); + break; + case EVALUATE: + { + if(!HDAC_.loaded){ + param_name = "classifier_file"; + node_.param(param_name,cfnm,std::string("test.xml")); + std::cout << "HaarDispAda LOADING " << cfnm.c_str() << std::endl; + HDAC_.load(cfnm); + } + label_all = false; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); + + int total0_in_list=0; + int total1_in_list=0; + int fp_in_list=0; + int tp_in_list=0; + + // count the input labels + for(unsigned int i=0; i // Used to display OPENCV images -#include -#include +#include +#include using namespace stereo_msgs; using namespace message_filters::sync_policies; diff --git a/detection/apps/haardispada_node_sr.cpp~ b/detection/apps/haardispada_node_sr.cpp~ new file mode 100644 index 0000000..97194e2 --- /dev/null +++ b/detection/apps/haardispada_node_sr.cpp~ @@ -0,0 +1,462 @@ +/* +Software License Agreement (BSD License) + +Copyright (c) 2013, Southwest Research Institute +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Southwest Research Institute, nor the names + of its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + */ + +#include "open_ptrack/detection/haardispada.h" +#include "open_ptrack/opt_utils/conversions.h" + +#include "ros/ros.h" +#include + +//Publish Messages +#include "opt_msgs/RoiRect.h" +#include "opt_msgs/Rois.h" +#include "opt_msgs/DetectionArray.h" +#include "std_msgs/String.h" + +//Time Synchronizer +// NOTE: Time Synchronizer conflicts with QT includes may need to investigate +#include +#include +#include +#include + +//Subscribe Messages +#include +#include +#include +#include +#include + +// Image Transport +#include +#include + +// Used to display OPENCV images +#include +#include + +using namespace stereo_msgs; +using namespace message_filters::sync_policies; +using namespace opt_msgs; +using namespace sensor_msgs; +using namespace sensor_msgs::image_encodings; +using sensor_msgs::Image; +using cv_bridge::CvImagePtr; +using std::vector; +using std::string; +using cv::Rect; +using cv::Mat; + +class HaarDispAdaNode +{ + private: + // Define Node + ros::NodeHandle node_; + + // Subscribe to Messages + message_filters::Subscriber sub_disparity_; + message_filters::Subscriber sub_image_; + message_filters::Subscriber sub_detections_; + + // Define the Synchronizer + typedef ApproximateTime ApproximatePolicy; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr approximate_sync_; + + // Messages to Publish + ros::Publisher pub_rois_; + ros::Publisher pub_Color_Image_; + ros::Publisher pub_Disparity_Image_; + ros::Publisher pub_detections_; + + Rois output_rois_; + + enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; + + //Define the Classifier Object + open_ptrack::detection::HaarDispAdaClassifier HDAC_; + + int num_class1; + int num_class0; + int num_TP_class1; + int num_FP_class1; + int num_TP_class0; + int num_FP_class0; + + // Flag stating if classifiers based on disparity image should be used or not: + bool use_disparity; + + // Minimum classifier confidence for people detection: + double min_confidence; + + // Object of class Conversions: + open_ptrack::opt_utils::Conversions converter; + + // Output detections message: + DetectionArray::Ptr output_detection_msg_; + + public: + + explicit HaarDispAdaNode(const ros::NodeHandle& nh): + node_(nh) + { + num_class1 = 0; + num_class0 = 0; + num_TP_class1 = 0; + num_FP_class1 = 0; + num_TP_class0 = 0; + num_FP_class0 = 0; + + string nn = ros::this_node::getName(); + int qs; + if(!node_.getParam(nn + "/Q_Size",qs)){ + qs=3; + } + + if(!node_.getParam(nn + "/use_disparity", use_disparity)){ + use_disparity = true; + } + + if(!node_.getParam(nn + "/haar_disp_ada_min_confidence", min_confidence)){ + min_confidence = 3.0; + } + HDAC_.setMinConfidence(min_confidence); + + int NS; + if(!node_.getParam(nn + "/num_Training_Samples",NS)){ + NS = 350; // default sets asside very little for training + node_.setParam(nn + "/num_Training_Samples",NS); + } + HDAC_.setMaxSamples(NS); + + output_detection_msg_ = DetectionArray::Ptr(new DetectionArray); + + // Published Messages + pub_rois_ = node_.advertise("HaarDispAdaOutputRois",qs); + pub_Color_Image_ = node_.advertise("HaarDispAdaColorImage",qs); + pub_Disparity_Image_= node_.advertise("HaarDispAdaDisparityImage",qs); + pub_detections_ = node_.advertise("/detector/detections",3); + + // Subscribe to Messages + sub_image_.subscribe(node_,"Color_Image",qs); + sub_disparity_.subscribe(node_, "Disparity_Image",qs); + sub_detections_.subscribe(node_,"input_detections",qs); + + // Sync the Synchronizer + approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), + sub_image_, + sub_disparity_, + sub_detections_)); + + approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNode::imageCb, + this, + _1, + _2, + _3)); + } + + int + get_mode() + { + + int callback_mode; + std::string mode=""; + node_.param(ros::this_node::getName() + "/mode", mode, std::string("none")); + if(mode.compare("detect") == 0) + { + callback_mode = DETECT; + } + else if(mode.compare("train")==0) + { + callback_mode = TRAIN; + } + else if(mode.compare("load")==0) + { + callback_mode = LOAD; + } + else if(mode.compare("evaluate")==0) + { + callback_mode = EVALUATE; + } + else if(mode.compare("accumulate")==0) + { + callback_mode = ACCUMULATE; + } + else // default mode is accumulate + { + callback_mode = ACCUMULATE; + } + return(callback_mode); + } + + void + createOutputDetectionsMessage(const DetectionArray::ConstPtr& input_msg, vector confidences, DetectionArray::Ptr& output_msg) + { + // Set camera-specific fields: + output_msg->detections.clear(); + output_msg->header = input_msg->header; + output_msg->intrinsic_matrix = input_msg->intrinsic_matrix; + output_msg->confidence_type = std::string("haar+ada"); + output_msg->image_type = std::string("disparity"); + + // Add all valid detections: + int k = 0; + for(unsigned int i = 0; i < input_msg->detections.size(); i++) + { + if((!use_disparity) | (confidences[i] > min_confidence)) // keep only people with confidence above a threshold + { + output_msg->detections.push_back(input_msg->detections[i]); + output_msg->detections[k].confidence = confidences[i]; + k++; + } + } + } + + void + imageCb(const ImageConstPtr& image_msg, + const ImageConstPtr& disparity_msg, + const opt_msgs::DetectionArray::ConstPtr& detection_msg) + { + // Callback for people detection: + bool label_all; + vector L_in; + vector L_out; + vector C_out; + vector R_in; + vector R_out; + string param_name; + string nn = ros::this_node::getName(); + string cfnm; + int numSamples; + double temp=0.0; + + // check encoding and create an intensity image from disparity image + assert(disparity_msg->encoding == image_encodings::TYPE_32FC1); + cv::Mat_ dmatrix(disparity_msg->height, + disparity_msg->width, + (float*) &disparity_msg->data[0], + disparity_msg->step); + + if(!node_.getParam(nn + "/UseMissingDataMask",HDAC_.useMissingDataMask_)){ + HDAC_.useMissingDataMask_ = false; + } + + cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(disparity_msg, image_encodings::MONO8); +// cv::Mat distance_image = cv_ptr->image; +// cv::imwrite("/home/matteo/distance.png", distance_image); + + // **********************************************************************// + // between these comments lies a hack that accounts for the difference // + // between the focal lengths of the kinect's color camera and ir cameras // + // TODO, parameterize to disable this hack for the stereo data // + bool kinect_disparity_fix; + if(!node_.getParam(nn + "/Kinect_Disparity_Fix",kinect_disparity_fix)){ + kinect_disparity_fix = false; + } + + if(kinect_disparity_fix){ + int nrows = 434; + int ncols = 579; + int row_offset = (dmatrix.rows - nrows)/2; + int col_offset = (dmatrix.cols - ncols)/2; + cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); + resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); + for(int i=0;i(i,j) = 0.0; + } + } + for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); + } + } + } + // **********************************************************************// + + // take the detection message and create vectors of ROIs and labels + R_in.clear(); + L_in.clear(); + + // Read camera intrinsic parameters: + Eigen::Matrix3f intrinsic_matrix; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + intrinsic_matrix(i, j) = detection_msg->intrinsic_matrix[i * 3 + j]; + + // Read detections: + for(unsigned int i=0;idetections.size();i++) + { + // theoretical person centroid: + Eigen::Vector3f centroid3d(detection_msg->detections[i].centroid.x, detection_msg->detections[i].centroid.y, detection_msg->detections[i].centroid.z); + Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsic_matrix); + + // theoretical person top point: + Eigen::Vector3f top3d(detection_msg->detections[i].top.x, detection_msg->detections[i].top.y, detection_msg->detections[i].top.z); + Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsic_matrix); + + // Define Rect and make sure it is not out of the image: + int h = centroid2d(1) - top2d(1); + int w = h * 2 / 3.0; + int x = std::max(0, int(centroid2d(0) - w / 2.0)); + int y = std::max(0, int(top2d(1))); + h = std::min(int(disparity_msg->height - y), int(h)); + w = std::min(int(disparity_msg->width - x), int(w)); + + Rect R(x,y,w,h); + R_in.push_back(R); + L_in.push_back(1); + } + + // do the work of the node + switch(get_mode()){ + case DETECT: + // Perform people detection within the input rois: + label_all = true; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,C_out,label_all); + + // Build output detections message: + createOutputDetectionsMessage(detection_msg, C_out, output_detection_msg_); + + output_rois_.rois.clear(); + output_rois_.header.stamp = image_msg->header.stamp; + output_rois_.header.frame_id = image_msg->header.frame_id; +// ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); + for(unsigned int i=0; i= NS){ + param_name = nn + "/mode"; + node_.setParam(param_name, std::string("train")); + ROS_ERROR("DONE Accumulating, switching to train mode"); + } + } + break; + case TRAIN: + param_name = nn + "/classifier_file"; + node_.param(param_name,cfnm,std::string("/home/clewis/classifiers/test.xml")); + param_name = nn + "/HaarDispAdaPrior"; + node_.getParam(param_name,temp); + HDAC_.HaarDispAdaPrior_ = temp; + ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); + HDAC_.train(cfnm); + param_name = nn + "/mode"; + node_.setParam(nn + "/mode", std::string("evaluate")); + ROS_ERROR("DONE TRAINING, switching to evaluate mode"); + break; + case EVALUATE: + { + if(!HDAC_.loaded){ + param_name = nn + "/classifier_file"; + node_.param(param_name,cfnm,std::string("test.xml")); + std::cout << "HaarDispAda LOADING " << cfnm.c_str() << std::endl; + HDAC_.load(cfnm); + } + label_all = false; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); + + int total0_in_list=0; + int total1_in_list=0; + int fp_in_list=0; + int tp_in_list=0; + + // count the input labels + for(unsigned int i=0; i + +//Publish Messages +#include "opt_msgs/RoiRect.h" +#include "opt_msgs/Rois.h" +#include "std_msgs/String.h" + +//Time Synchronizer +// NOTE: Time Synchronizer conflicts with QT includes may need to investigate +#include +#include +#include +#include + +//Subscribe Messages +#include +#include +#include +#include +#include + +// Image Transport +#include +#include + +// Used to display OPENCV images +#include +#include + +using namespace stereo_msgs; +using namespace message_filters::sync_policies; +using namespace opt_msgs; +using namespace sensor_msgs; +using namespace sensor_msgs::image_encodings; +using sensor_msgs::Image; +using cv_bridge::CvImagePtr; +using std::vector; +using std::string; +using cv::Rect; +using cv::Mat; + +namespace open_ptrack +{ + namespace detection + { + class HaarDispAdaNodelet: public nodelet::Nodelet + { + private: + // Define Node + ros::NodeHandle node_; + ros::NodeHandle private_node_; + + // Subscribe to Messages + message_filters::Subscriber sub_disparity_; + message_filters::Subscriber sub_image_; + message_filters::Subscriber sub_rois_; + + // Define the Synchronizer + // typedef ApproximateTime ApproximatePolicy; + typedef ExactTime ApproximatePolicy; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr approximate_sync_; + + // Messages to Publish + ros::Publisher pub_rois_; + ros::Publisher pub_Color_Image_; + ros::Publisher pub_Disparity_Image_; + + Rois output_rois_; + + enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; + + //Define the Classifier Object + open_ptrack::detection::HaarDispAdaClassifier HDAC_; + + int num_class1; + int num_class0; + int num_TP_class1; + int num_FP_class1; + int num_TP_class0; + int num_FP_class0; + + public: + virtual void + onInit() + { + node_ = getNodeHandle(); + private_node_ = getPrivateNodeHandle(); + + num_class1 = 0; + num_class0 = 0; + num_TP_class1 = 0; + num_FP_class1 = 0; + num_TP_class0 = 0; + num_FP_class0 = 0; + + int qs; + if(!private_node_.getParam("Q_Size",qs)){ + ROS_ERROR("could not find Q_Size parameter"); + qs=3; + } + + int NS; + if(!private_node_.getParam("num_Training_Samples",NS)){ + NS = 350; // default sets asside very little for training + private_node_.setParam("num_Training_Samples",NS); + } + HDAC_.setMaxSamples(NS); + + // Published Messages + pub_rois_ = node_.advertise("HaarDispAdaOutputRois",qs); + pub_Color_Image_ = node_.advertise("HaarDispAdaColorImage",qs); + pub_Disparity_Image_= node_.advertise("HaarDispAdaDisparityImage",qs); + + // Subscribe to Messages + sub_image_.subscribe(node_,"Color_Image",qs); + sub_disparity_.subscribe(node_, "Disparity_Image",qs); + sub_rois_.subscribe(node_,"input_rois",qs); + + // Sync the Synchronizer + approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), + sub_image_, + sub_disparity_, + sub_rois_)); + + approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNodelet::imageCb, + this, + _1, + _2, + _3)); + } + int + get_mode() + { + + int callback_mode; + std::string mode; + if(!private_node_.getParam("mode", mode)){ + ROS_ERROR("no mode set in HaarDispAda_Nodelet"); + mode = "load"; + } + if(mode.compare("detect") == 0) + { + callback_mode = DETECT; + } + else if(mode.compare("train")==0) + { + callback_mode = TRAIN; + } + else if(mode.compare("load")==0) + { + callback_mode = LOAD; + } + else if(mode.compare("evaluate")==0) + { + callback_mode = EVALUATE; + } + else if(mode.compare("accumulate")==0) + { + callback_mode = ACCUMULATE; + } + else // default mode is accumulate + { + callback_mode = ACCUMULATE; + } + return(callback_mode); + } + void + imageCb(const ImageConstPtr& image_msg, + const DisparityImageConstPtr& disparity_msg, + const RoisConstPtr& rois_msg){ + + bool label_all; + vector L_in; + vector L_out; + vector R_in; + vector R_out; + string param_name; + string cfnm; + int numSamples; + double temp=0.0; + + // check encoding and create an intensity image from disparity image + assert(disparity_msg->image.encoding == image_encodings::TYPE_32FC1); + cv::Mat_ dmatrix(disparity_msg->image.height, + disparity_msg->image.width, + (float*) &disparity_msg->image.data[0], + disparity_msg->image.step); + + if(!private_node_.getParam("UseMissingDataMask",HDAC_.useMissingDataMask_)){ + HDAC_.useMissingDataMask_ = false; + } + + // **********************************************************************// + // between these comments lies a hack that accounts for the difference // + // between the focal lengths of the kinect's color camera and ir cameras // + // TODO, parameterize to disable this hack for the stereo data // + bool kinect_disparity_fix; + if(!private_node_.getParam("Kinect_Disparity_Fix",kinect_disparity_fix)){ + ROS_ERROR("could not find Kinect_Disparity_Fix parameter"); + kinect_disparity_fix = false; + } + + if(kinect_disparity_fix){ + int nrows = 434; + int ncols = 579; + int row_offset = (dmatrix.rows - nrows)/2; + int col_offset = (dmatrix.cols - ncols)/2; + cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); + resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); + for(int i=0;i(i,j) = 0.0; + } + } + for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); + } + } + } + // **********************************************************************// + + // take the region of interest message and create vectors of ROIs and labels + R_in.clear(); + L_in.clear(); + for(unsigned int i=0;irois.size();i++){ + int x = rois_msg->rois[i].x; + int y = rois_msg->rois[i].y; + int w = rois_msg->rois[i].width; + int h = rois_msg->rois[i].height; + int l = rois_msg->rois[i].label; + Rect R(x,y,w,h); + R_in.push_back(R); + L_in.push_back(l); + } + + // do the work of the node + switch(get_mode()){ + case DETECT: + label_all = false; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); + output_rois_.rois.clear(); + output_rois_.header.stamp = image_msg->header.stamp; + output_rois_.header.frame_id = image_msg->header.frame_id; + ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); + for(unsigned int i=0; i= NS){ + private_node_.setParam("mode", std::string("train")); + ROS_ERROR("DONE Accumulating, switching to train mode"); + } + } + else{ + ROS_ERROR("could not find num_Training_Samples"); + } + break; + case TRAIN: + if(!private_node_.getParam("classifier_file",cfnm)){ + ROS_ERROR("could not find classifier_file param"); + } + if(!private_node_.getParam("HaarDispAdaPrior",temp)){ + ROS_ERROR("could not find HaarDispAdaPrior param"); + } + HDAC_.HaarDispAdaPrior_ = temp; + ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); + HDAC_.train(cfnm); + private_node_.setParam("mode", std::string("evaluate")); + ROS_ERROR("DONE TRAINING, switching to evaluate mode"); + break; + case EVALUATE: + { + if(!HDAC_.loaded){ + if(!private_node_.getParam("classifier_file",cfnm)){ + ROS_ERROR("could not find classifier_file param"); + } + ROS_ERROR("HaarDispAda LOADING %s",cfnm.c_str()); + HDAC_.load(cfnm); + } + label_all = false; + HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); + + int total0_in_list=0; + int total1_in_list=0; + int fp_in_list=0; + int tp_in_list=0; + + // count the input labels + for(unsigned int i=0; i +// PLUGINLIB_DECLARE_CLASS(pkg,class_name,class_type,base_class_type) +PLUGINLIB_DECLARE_CLASS(HaarDispAda,haardispada_nodelet, open_ptrack::detection::HaarDispAdaNodelet, nodelet::Nodelet) diff --git a/detection/conf/ground_.txt b/detection/conf/ground_.txt new file mode 100644 index 0000000..07e4b47 --- /dev/null +++ b/detection/conf/ground_.txt @@ -0,0 +1,4 @@ +1.59075e-37 +0 +1.4013e-45 +1.45 \ No newline at end of file diff --git a/detection/conf/ground_based_people_detector_zed.yaml b/detection/conf/ground_based_people_detector_zed.yaml new file mode 100644 index 0000000..2690f18 --- /dev/null +++ b/detection/conf/ground_based_people_detector_zed.yaml @@ -0,0 +1,52 @@ +####################### +## Ground estimation ## +####################### +# Ground estimation mode - 0:manual, 1:semi-auto, 2:auto with user validation, 3:fully automatic (default 0) +ground_estimation_mode: 3 +# Flag stating if the ground should be read from file, if present: +read_ground_from_file: false +# Flag that locks the ground plane update: +lock_ground: false +# Threshold on the ratio of valid points needed for ground estimation: +valid_points_threshold: 0.1 + +############################ +## Background subtraction ## +############################ +# Flag enabling/disabling background subtraction: +background_subtraction: false +# Resolution of the octree representing the background: +background_resolution: 0.3 +# Seconds to use to learn the background: +background_seconds: 3.0 + +############################################## +## Ground based people detection parameters ## +############################################## +# Minimum detection confidence: +ground_based_people_detection_min_confidence: -5 +# Minimum person height: +minimum_person_height: 0.5 +# Maximum person height: +maximum_person_height: 2.3 +# Kinect's best range is 5.0 m, greater is possible w/ larger error +max_distance: 10.0 +# Point cloud downsampling factor: 4 +sampling_factor: 4 +# Flag stating if classifiers based on RGB image should be used or not: +use_rgb: true +# Threshold on image luminance. If luminance is over this threhsold, classifiers on RGB image are also used: +minimum_luminance: 120 +# If true, sensor tilt angle wrt ground plane is compensated to improve people detection: +sensor_tilt_compensation: flase +# Minimum distance between two persons: +heads_minimum_distance: 0.3 +# Voxel size used to downsample the point cloud (lower: detection slower but more precise; higher: detection faster but less precise): +voxel_size: .06 +# Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: +apply_denoising: false +# MeanK for denoising (the higher it is, the stronger is the filtering): +mean_k_denoising: 5 +# Standard deviation for denoising (the lower it is, the stronger is the filtering): +std_dev_denoising: 0.3 + diff --git a/detection/conf/ground_based_people_detector_zed.yaml~ b/detection/conf/ground_based_people_detector_zed.yaml~ new file mode 100644 index 0000000..550747f --- /dev/null +++ b/detection/conf/ground_based_people_detector_zed.yaml~ @@ -0,0 +1,52 @@ +####################### +## Ground estimation ## +####################### +# Ground estimation mode - 0:manual, 1:semi-auto, 2:auto with user validation, 3:fully automatic (default 0) +ground_estimation_mode: 0 +# Flag stating if the ground should be read from file, if present: +read_ground_from_file: false +# Flag that locks the ground plane update: +lock_ground: false +# Threshold on the ratio of valid points needed for ground estimation: +valid_points_threshold: 0.1 + +############################ +## Background subtraction ## +############################ +# Flag enabling/disabling background subtraction: +background_subtraction: false +# Resolution of the octree representing the background: +background_resolution: 0.3 +# Seconds to use to learn the background: +background_seconds: 3.0 + +############################################## +## Ground based people detection parameters ## +############################################## +# Minimum detection confidence: +ground_based_people_detection_min_confidence: -5 +# Minimum person height: +minimum_person_height: 0.5 +# Maximum person height: +maximum_person_height: 2.3 +# Kinect's best range is 5.0 m, greater is possible w/ larger error +max_distance: 10.0 +# Point cloud downsampling factor: 4 +sampling_factor: 4 +# Flag stating if classifiers based on RGB image should be used or not: +use_rgb: true +# Threshold on image luminance. If luminance is over this threhsold, classifiers on RGB image are also used: +minimum_luminance: 120 +# If true, sensor tilt angle wrt ground plane is compensated to improve people detection: +sensor_tilt_compensation: flase +# Minimum distance between two persons: +heads_minimum_distance: 0.3 +# Voxel size used to downsample the point cloud (lower: detection slower but more precise; higher: detection faster but less precise): +voxel_size: .06 +# Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: +apply_denoising: false +# MeanK for denoising (the higher it is, the stronger is the filtering): +mean_k_denoising: 5 +# Standard deviation for denoising (the lower it is, the stronger is the filtering): +std_dev_denoising: 0.3 + diff --git a/detection/conf/ground_zed_tracked_frame.txt b/detection/conf/ground_zed_tracked_frame.txt new file mode 100644 index 0000000..ccd6555 --- /dev/null +++ b/detection/conf/ground_zed_tracked_frame.txt @@ -0,0 +1,4 @@ +2.94239e-07 +4.57398e-41 +1.49029e-38 +0 \ No newline at end of file diff --git a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h index f91c1b0..63c5620 100644 --- a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h +++ b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h @@ -317,7 +317,7 @@ namespace open_ptrack * \return The cloud after pre-processing. */ PointCloudPtr - preprocessCloud (PointCloudPtr& input_cloud); + preprocessCloud (PointCloudPtr& input_cloud);//, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); /** * \brief Perform people detection on the input data and return people clusters information. @@ -327,7 +327,7 @@ namespace open_ptrack * \return true if the compute operation is successful, false otherwise. */ bool - compute (std::vector >& clusters); + compute (std::vector >& clusters);//, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); protected: /** \brief sampling factor used to downsample the point cloud */ diff --git a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ new file mode 100644 index 0000000..66c2ed1 --- /dev/null +++ b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ @@ -0,0 +1,427 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ground_based_people_detection_app.h + * Created on: Nov 30, 2012 + * Author: Matteo Munaro + */ + +#ifndef OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ +#define OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace open_ptrack +{ + namespace detection + { + /** \brief GroundBasedPeopleDetectionApp performs people detection on RGB-D data having as input the ground plane coefficients. + * It implements the people detection algorithm described here: + * M. Munaro, F. Basso and E. Menegatti, + * Tracking people within groups with RGB-D data, + * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. + * + * \author Matteo Munaro + */ + template class GroundBasedPeopleDetectionApp; + + template + class GroundBasedPeopleDetectionApp + { + public: + + typedef pcl::PointCloud PointCloud; + typedef boost::shared_ptr PointCloudPtr; + typedef boost::shared_ptr PointCloudConstPtr; + + /** \brief Constructor. */ + GroundBasedPeopleDetectionApp (); + + /** \brief Destructor. */ + virtual ~GroundBasedPeopleDetectionApp (); + + /** + * \brief Set the pointer to the input cloud. + * + * \param[in] cloud A pointer to the input cloud. + */ + void + setInputCloud (PointCloudPtr& cloud); + + /** + * \brief Set the ground coefficients. + * + * \param[in] ground_coeffs Vector containing the four plane coefficients. + */ + void + setGround (Eigen::VectorXf& ground_coeffs); + + /** + * \brief Set sampling factor. + * + * \param[in] sampling_factor Value of the downsampling factor (in each dimension) which is applied to the raw point cloud (default = 1.). + */ + void + setSamplingFactor (int sampling_factor); + + /** + * \brief Set voxel size. + * + * \param[in] voxel_size Value of the voxel dimension (default = 0.06m.). + */ + void + setVoxelSize (float voxel_size); + + /** + * \brief Set denoising parameters. + * + * \param[in] apply_denoising Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise + * \param[in] mean_k_denoising MeanK for denoising (the higher it is, the stronger is the filtering) + * \param[in] std_dev_denoising Standard deviation for denoising (the lower it is, the stronger is the filtering) + */ + void + setDenoisingParameters (bool apply_denoising, int mean_k_denoising, float std_dev_denoising); + + /** + * \brief Set points maximum distance from the sensor. + * + * \param[in] max_distance Set points maximum distance from the sensor (default = 50m.). + */ + void + setMaxDistance (float max_distance); + + /** + * \brief Set intrinsic parameters of the RGB camera. + * + * \param[in] intrinsics_matrix RGB camera intrinsic parameters matrix. + */ + void + setIntrinsics (Eigen::Matrix3f intrinsics_matrix); + + /** + * \brief Set SVM-based person classifier. + * + * \param[in] person_classifier Needed for people detection on RGB data. + */ + void + setClassifier (open_ptrack::detection::PersonClassifier person_classifier); + + /** + * \brief Set sensor orientation (vertical = true means portrait mode, vertical = false means landscape mode). + * + * \param[in] vertical Set landscape/portait camera orientation (default = false). + */ + void + setSensorPortraitOrientation (bool vertical); + + /** + * \brief Set head_centroid_ to true (person centroid is in the head) or false (person centroid is the whole body centroid). + * + * \param[in] head_centroid Set the location of the person centroid (head or body center) (default = true). + */ + void + setHeadCentroid (bool head_centroid); + + /** + * \brief Set minimum and maximum allowed height for a person cluster. + * + * \param[in] min_height Minimum allowed height for a person cluster (default = 1.3). + * \param[in] max_height Maximum allowed height for a person cluster (default = 2.3). + */ + void + setHeightLimits (float min_height, float max_height); + + /** + * \brief Set minimum and maximum allowed number of points for a person cluster. + * + * \param[in] min_points Minimum allowed number of points for a person cluster. + * \param[in] max_points Maximum allowed number of points for a person cluster. + */ + void + setDimensionLimits (int min_points, int max_points); + + /** + * \brief Set minimum distance between persons' heads. + * + * \param[in] heads_minimum_distance Minimum allowed distance between persons' heads (default = 0.3). + */ + void + setMinimumDistanceBetweenHeads (float heads_minimum_distance); + + /** + * \brief Set if RGB should be used or not for people detection. + * + * \param[in] use_rgb True: RGB is used, false: RGB is not used. + */ + void + setUseRGB (bool use_rgb); + + /** + * \brief Set if sensor tilt angle wrt ground plane should be compensated to improve people detection + * + * \param[in] sensor_tilt_compensation True: sensor tilt is compensated, false: sensor tilt is not compensated. + */ + void + setSensorTiltCompensation ( bool sensor_tilt_compensation); + + /** + * \brief Set background subtraction parameters + * + * \param[in] background_subtraction True: background subtraction is performed, false: background subtraction is not performed. + * \param[in] background_octree_resolution Resolution of the octree. + * \param[in] background_cloud Point cloud containing the background. + */ + void + setBackground ( bool background_subtraction, float background_octree_resolution, PointCloudPtr& background_cloud); + + /** + * \brief Get minimum and maximum allowed height for a person cluster. + * + * \param[out] min_height Minimum allowed height for a person cluster. + * \param[out] max_height Maximum allowed height for a person cluster. + */ + void + getHeightLimits (float& min_height, float& max_height); + + /** + * \brief Get minimum and maximum allowed number of points for a person cluster. + * + * \param[out] min_points Minimum allowed number of points for a person cluster. + * \param[out] max_points Maximum allowed number of points for a person cluster. + */ + void + getDimensionLimits (int& min_points, int& max_points); + + /** + * \brief Get minimum distance between persons' heads. + */ + float + getMinimumDistanceBetweenHeads (); + + /** + * \brief Get floor coefficients. + */ + Eigen::VectorXf + getGround (); + + /** + * \brief Get pointcloud after voxel grid filtering and ground removal. + */ + PointCloudPtr + getNoGroundCloud (); + + /** + * \brief Get mean luminance of the RGB data. + */ + float + getMeanLuminance (); + + /** + * \brief Get the transforms to be used to compensate sensor tilt. + * + * \param[in/out] transform Direct transform to compensate sensor tilt. + * \param[in/out] anti_transform Inverse transform wrt transform. + */ + void + getTiltCompensationTransforms (Eigen::Affine3f& transform, Eigen::Affine3f& anti_transform); + + /** + * \brief Extract RGB information from a point cloud and output the corresponding RGB point cloud. + * + * \param[in] input_cloud A pointer to a point cloud containing also RGB information. + * \param[out] output_cloud A pointer to a RGB point cloud. + */ + void + extractRGBFromPointCloud (PointCloudPtr input_cloud, pcl::PointCloud::Ptr& output_cloud); + + /** + * \brief Swap rows/cols dimensions of a RGB point cloud (90 degrees counterclockwise rotation). + * + * \param[in,out] cloud A pointer to a RGB point cloud. + */ + void + swapDimensions (pcl::PointCloud::Ptr& cloud); + + /** + * \brief Rotate the input cloud according to transform + * + * \param[in] cloud Pointer to the input point cloud. + * \param[in] transform Transform to be applied to the input cloud. + * + * \return A pointer to the rotated cloud. + */ + PointCloudPtr + rotateCloud (PointCloudPtr cloud, Eigen::Affine3f transform); + + /** + * \brief Rotate input plane coefficients according to transform + * + * \param[in] ground_coeffs Plane coefficients. + * \param[in] transform Transform to be applied to ground_coeffs. + * + * \return Rotated plane coefficients. + */ + Eigen::VectorXf + rotateGround (Eigen::VectorXf ground_coeffs, Eigen::Affine3f transform); + + /** + * \brief Perform pre-processing operations on the input cloud (downsampling, filtering). + * + * \param[in] input_cloud Input cloud. + * + * \return The cloud after pre-processing. + */ + PointCloudPtr + preprocessCloud (PointCloudPtr& input_cloud, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); + + /** + * \brief Perform people detection on the input data and return people clusters information. + * + * \param[out] clusters Vector of PersonCluster. + * + * \return true if the compute operation is successful, false otherwise. + */ + bool + compute (std::vector >& clusters, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); + + protected: + /** \brief sampling factor used to downsample the point cloud */ + int sampling_factor_; + + /** \brief voxel size */ + float voxel_size_; + + /** \brief max distance from the sensor */ + float max_distance_; + + /** \brief ground plane coefficients */ + Eigen::VectorXf ground_coeffs_; + + /** \brief ground plane normalization factor */ + float sqrt_ground_coeffs_; + + /** \brief pointer to the input cloud */ + PointCloudPtr cloud_; + + /** \brief pointer to the cloud after voxel grid filtering and ground removal */ + PointCloudPtr no_ground_cloud_; + + /** \brief pointer to a RGB cloud corresponding to cloud_ */ + pcl::PointCloud::Ptr rgb_image_; + + /** \brief person clusters maximum height from the ground plane */ + float max_height_; + + /** \brief person clusters minimum height from the ground plane */ + float min_height_; + + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; + + /** \brief if true, the person centroid is computed as the centroid of the cluster points belonging to the head; + * if false, the person centroid is computed as the centroid of the whole cluster points (default = true) */ + bool head_centroid_; + + /** \brief maximum number of points for a person cluster */ + int max_points_; + + /** \brief minimum number of points for a person cluster */ + int min_points_; + + /** \brief true if min_points and max_points have been set by the user, false otherwise */ + bool dimension_limits_set_; + + /** \brief minimum distance between persons' heads */ + float heads_minimum_distance_; + + /** \brief intrinsic parameters matrix of the RGB camera */ + Eigen::Matrix3f intrinsics_matrix_; + + /** \brief SVM-based person classifier */ + open_ptrack::detection::PersonClassifier person_classifier_; + + /** \brief flag stating if the classifier has been set or not */ + bool person_classifier_set_flag_; + + /** \brief flag stating if RGB information should be used or not for people detection */ + bool use_rgb_; + + /** \brief Mean luminance of the RGB data */ + float mean_luminance_; + + /** \brief flag stating if the sensor tilt with respect to the ground plane should be compensated */ + bool sensor_tilt_compensation_; + + /** \brief transforms used for compensating sensor tilt with respect to the ground plane */ + Eigen::Affine3f transform_, anti_transform_; + + /** \brief Flag stating if background subtraction should be applied or not */ + bool background_subtraction_; + + /** \brief Octree representing the background */ + pcl::octree::OctreePointCloud *background_octree_; + + /** \brief Frame counter */ + unsigned int frame_counter_; + + /** \brief Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: */ + bool apply_denoising_; + + /** \brief MeanK for denoising (the higher it is, the stronger is the filtering): */ + int mean_k_denoising_; + + /** \brief Standard deviation for denoising (the lower it is, the stronger is the filtering): */ + float std_dev_denoising_; + +// pcl::visualization::PCLVisualizer::Ptr denoising_viewer_; + + }; + } /* namespace detection */ +} /* namespace open_ptrack */ +#include +#endif /* OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ */ diff --git a/detection/include/open_ptrack/detection/haardispada.h~ b/detection/include/open_ptrack/detection/haardispada.h~ new file mode 100644 index 0000000..696270a --- /dev/null +++ b/detection/include/open_ptrack/detection/haardispada.h~ @@ -0,0 +1,142 @@ +/* +Software License Agreement (BSD License) + +Copyright (c) 2013, Southwest Research Institute +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Southwest Research Institute, nor the names + of its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef OPEN_PTRACK_DETECTION_HAARDISPADA_H +#define OPEN_PTRACK_DETECTION_HAARDISPADA_H + +/***************************************************************************** + ** Includes + *****************************************************************************/ +#include +#include +#include +#include +#include +#include "/usr/include/opencv2/core/core.hpp" +#include "/usr/include/opencv/cv.h" +#include "/usr/include/opencv/ml.h" +#include +#include + +/***************************************************************************** + ** Namespaces + *****************************************************************************/ + +namespace open_ptrack +{ + namespace detection + { + using std::string; + using std::vector; + using cv::Mat; + using cv::Rect; + using cv::Range; + using cv::Size; + using cv::INTER_AREA; + + /***************************************************************************** + ** Class + *****************************************************************************/ + + class HaarDispAdaClassifier{ + + public: + HaarDispAdaClassifier(); + HaarDispAdaClassifier(string file_name); + void init(); + bool useMissingDataMask_; + bool loaded; + void load(string filename); + int addToTraining(vector &R_in, vector &Labels_in, Mat &Disparity_in); + void train(string filename); + void detect(vector &R_in, + vector &L_in, + Mat &D_in, + vector &R_out, + vector &L_out, + bool label_all); + + void detect(vector &R_in, + vector &L_in, + Mat &D_in, + vector &R_out, + vector &L_out, + vector &C_out, + bool label_all); + + int test(); + int numSamples_; + float HaarDispAdaPrior_; + void setMaxSamples(int n); + int getMaxSamples(){ return(maxSamples_);}; + float getMinConfidence(); + void setMinConfidence(float ); + + private: + CvBoost HDAC_; + string classifier_filename_; // for loading and saving + int maxSamples_; + int num_filters_; + Mat integralImage_; + Mat trainingSamples_; + Mat trainingLabels_; + Mat trainingMissingMask_; + Mat AvePosTrainingImg_; + + // images set by alpah_map() + Mat map; // set by alpha_map() in haar_response() + + // images set by setDImageRoi() + Mat Image4Haar; + Mat haar16x16; + Mat haar8x8; + Mat haar4x4; + + // Minimum classifier confidence for people detection: + float min_confidence_; + + // private functions + void setDImageRoi(Rect &R_in, Mat &I_in); + void setDImageROI_fast(Rect & R_in, Mat &I_in); + void alpha_map(int idx); + void print_map(int k);// a debugging tool + void print_scaled_map(cv::Mat &A);// a debugging tool + void print_Image4Haar();// a debugging tool + int haar_features(Mat & HF, Mat & MH); + int haar_features_fast(Mat & HF); + void mask_scale(Mat & input, Mat & output); + float find_central_disparity(int x, int y, int height, int width, Mat& D_in); + }; + } // namespace detection +} // namespace open_ptrack + +#endif /* OPEN_PTRACK_DETECTION_HAARDISPADA_H */ + diff --git a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp index 25b2f7b..8e31e86 100644 --- a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp +++ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp @@ -319,7 +319,7 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateGround(Eige } template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr -open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud) +open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud)//, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) { // Downsample of sampling_factor in every dimension: PointCloudPtr cloud_downsampled(new PointCloud); @@ -370,24 +370,38 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud ( PointCloudPtr cloud_filtered(new PointCloud); pcl::VoxelGrid voxel_grid_filter_object; if (apply_denoising_) + { + //std::cout << "APPLY DENOISING" << std::endl; voxel_grid_filter_object.setInputCloud(cloud_denoised); + } else { if (sampling_factor_ != 1) + { + //std::cout << "APPLY DownSampled" << std::endl; voxel_grid_filter_object.setInputCloud(cloud_downsampled); + } else + { + //std::cout << "Use Input Cloud" << std::endl; voxel_grid_filter_object.setInputCloud(input_cloud); + } } + + //std::cout << "Voxel Size " << voxel_size_ << " Max Distance " << max_distance_ << std::endl; + voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); voxel_grid_filter_object.setFilterFieldName("z"); voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); voxel_grid_filter_object.filter (*cloud_filtered); + + //(*publishBeforeVoxelFilterCloud)(cloud_filtered); return cloud_filtered; } template bool -open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters) +open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters)//, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) { frame_counter_++; @@ -434,10 +448,16 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec // Fill rgb image: rgb_image_->points.clear(); // clear RGB pointcloud extractRGBFromPointCloud(cloud_, rgb_image_); // fill RGB pointcloud + + //TODO publish here extractedRGB + //(*publishExtractRGB)(cloud_); // Point cloud pre-processing (downsampling and filtering): PointCloudPtr cloud_filtered(new PointCloud); - cloud_filtered = preprocessCloud (cloud_); + cloud_filtered = preprocessCloud (cloud_);//, publishBeforeVoxelFilterCloud); + + //TODO publish here preProcessed + //(*publishPreProcessed)(cloud_filtered); if (use_rgb_) { @@ -476,6 +496,9 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec PCL_INFO ("No groundplane update!\n"); } } + + //TODO publish here groundRemoval + //(*publishGroundRemoval)(no_ground_cloud_); // Background Subtraction (optional): if (background_subtraction_) @@ -490,12 +513,34 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec } no_ground_cloud_ = foreground_cloud; } + + + std::vector myvector; + + int count = 0; + for(pcl::PointCloud::iterator it = no_ground_cloud_->begin(); it != no_ground_cloud_->end(); it++) + { + if (!std::isfinite (it->x) || !std::isfinite (it->y) || !std::isfinite (it->z)) + { + myvector.push_back(count); + } + count++; + } + + count = 0; + for (unsigned i=0; ierase(no_ground_cloud_->begin() + myvector[i] - count); + count++; + } + if (no_ground_cloud_->points.size() > 0) { // Euclidean Clustering: std::vector cluster_indices; typename pcl::search::KdTree::Ptr tree (new pcl::search::KdTree); + tree->setInputCloud(no_ground_cloud_); pcl::EuclideanClusterExtraction ec; ec.setClusterTolerance(2 * 0.06); @@ -504,7 +549,7 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec ec.setSearchMethod(tree); ec.setInputCloud(no_ground_cloud_); ec.extract(cluster_indices); - + // Sensor tilt compensation to improve people detection: PointCloudPtr no_ground_cloud_rotated(new PointCloud); Eigen::VectorXf ground_coeffs_new; diff --git a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ new file mode 100644 index 0000000..2be5728 --- /dev/null +++ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ @@ -0,0 +1,640 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ground_based_people_detection_app.hpp + * Created on: Nov 30, 2012 + * Author: Matteo Munaro + */ + +#ifndef OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ +#define OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ + +#include + +template +open_ptrack::detection::GroundBasedPeopleDetectionApp::GroundBasedPeopleDetectionApp () +{ +// denoising_viewer_ = pcl::visualization::PCLVisualizer::Ptr (new pcl::visualization::PCLVisualizer("filtering_viewer")); + + rgb_image_ = pcl::PointCloud::Ptr(new pcl::PointCloud); + + // set default values for optional parameters: + sampling_factor_ = 1; + voxel_size_ = 0.06; + max_distance_ = 50.0; + vertical_ = false; + head_centroid_ = true; + min_height_ = 1.3; + max_height_ = 2.3; + min_points_ = 30; // this value is adapted to the voxel size in method "compute" + max_points_ = 5000; // this value is adapted to the voxel size in method "compute" + dimension_limits_set_ = false; + heads_minimum_distance_ = 0.3; + use_rgb_ = true; + mean_luminance_ = 0.0; + sensor_tilt_compensation_ = false; + background_subtraction_ = false; + + // set flag values for mandatory parameters: + sqrt_ground_coeffs_ = std::numeric_limits::quiet_NaN(); + person_classifier_set_flag_ = false; + frame_counter_ = 0; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setInputCloud (PointCloudPtr& cloud) +{ + cloud_ = cloud; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setGround (Eigen::VectorXf& ground_coeffs) +{ + ground_coeffs_ = ground_coeffs; + sqrt_ground_coeffs_ = (ground_coeffs - Eigen::Vector4f(0.0f, 0.0f, 0.0f, ground_coeffs(3))).norm(); +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setSamplingFactor (int sampling_factor) +{ + sampling_factor_ = sampling_factor; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setVoxelSize (float voxel_size) +{ + voxel_size_ = voxel_size; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setDenoisingParameters (bool apply_denoising, int mean_k_denoising, float std_dev_denoising) +{ + apply_denoising_ = apply_denoising; + mean_k_denoising_ = mean_k_denoising; + std_dev_denoising_ = std_dev_denoising; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setMaxDistance (float max_distance) +{ + max_distance_ = max_distance; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setIntrinsics (Eigen::Matrix3f intrinsics_matrix) +{ + intrinsics_matrix_ = intrinsics_matrix; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setClassifier (open_ptrack::detection::PersonClassifier person_classifier) +{ + person_classifier_ = person_classifier; + person_classifier_set_flag_ = true; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setSensorPortraitOrientation (bool vertical) +{ + vertical_ = vertical; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setHeightLimits (float min_height, float max_height) +{ + min_height_ = min_height; + max_height_ = max_height; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setDimensionLimits (int min_points, int max_points) +{ + min_points_ = min_points; + max_points_ = max_points; + dimension_limits_set_ = true; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setMinimumDistanceBetweenHeads (float heads_minimum_distance) +{ + heads_minimum_distance_= heads_minimum_distance; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setHeadCentroid (bool head_centroid) +{ + head_centroid_ = head_centroid; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setSensorTiltCompensation (bool sensor_tilt_compensation) +{ + sensor_tilt_compensation_ = sensor_tilt_compensation; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setUseRGB (bool use_rgb) +{ + use_rgb_ = use_rgb; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::setBackground (bool background_subtraction, float background_octree_resolution, PointCloudPtr& background_cloud) +{ + background_subtraction_ = background_subtraction; + + background_octree_ = new pcl::octree::OctreePointCloud(background_octree_resolution); + background_octree_->defineBoundingBox(-max_distance_/2, -max_distance_/2, 0.0, max_distance_/2, max_distance_/2, max_distance_); + background_octree_->setInputCloud (background_cloud); + background_octree_->addPointsFromInputCloud (); +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::getHeightLimits (float& min_height, float& max_height) +{ + min_height = min_height_; + max_height = max_height_; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::getDimensionLimits (int& min_points, int& max_points) +{ + min_points = min_points_; + max_points = max_points_; +} + +template float +open_ptrack::detection::GroundBasedPeopleDetectionApp::getMinimumDistanceBetweenHeads () +{ + return (heads_minimum_distance_); +} + +template Eigen::VectorXf +open_ptrack::detection::GroundBasedPeopleDetectionApp::getGround () +{ + if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_) + { + PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::getGround] Floor parameters have not been set or they are not valid!\n"); + } + return (ground_coeffs_); +} + +template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr +open_ptrack::detection::GroundBasedPeopleDetectionApp::getNoGroundCloud () +{ + return (no_ground_cloud_); +} + +template float +open_ptrack::detection::GroundBasedPeopleDetectionApp::getMeanLuminance () +{ + return (mean_luminance_); +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::getTiltCompensationTransforms (Eigen::Affine3f& transform, Eigen::Affine3f& anti_transform) +{ + transform = transform_; + anti_transform = anti_transform_; +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::extractRGBFromPointCloud (PointCloudPtr input_cloud, pcl::PointCloud::Ptr& output_cloud) +{ + // Extract RGB information from a point cloud and output the corresponding RGB point cloud + output_cloud->points.resize(input_cloud->height*input_cloud->width); + output_cloud->width = input_cloud->width; + output_cloud->height = input_cloud->height; + + pcl::RGB rgb_point; + for (int j = 0; j < input_cloud->width; j++) + { + for (int i = 0; i < input_cloud->height; i++) + { + rgb_point.r = (*input_cloud)(j,i).r; + rgb_point.g = (*input_cloud)(j,i).g; + rgb_point.b = (*input_cloud)(j,i).b; + (*output_cloud)(j,i) = rgb_point; + } + } +} + +template void +open_ptrack::detection::GroundBasedPeopleDetectionApp::swapDimensions (pcl::PointCloud::Ptr& cloud) +{ + pcl::PointCloud::Ptr output_cloud(new pcl::PointCloud); + output_cloud->points.resize(cloud->height*cloud->width); + output_cloud->width = cloud->height; + output_cloud->height = cloud->width; + for (int i = 0; i < cloud->width; i++) + { + for (int j = 0; j < cloud->height; j++) + { + (*output_cloud)(j,i) = (*cloud)(cloud->width - i - 1, j); + } + } + cloud = output_cloud; +} + +template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr +open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateCloud(PointCloudPtr cloud, Eigen::Affine3f transform ) +{ + PointCloudPtr rotated_cloud (new PointCloud); + pcl::transformPointCloud(*cloud, *rotated_cloud, transform); + rotated_cloud->header.frame_id = cloud->header.frame_id; + + return rotated_cloud; + +} + +template Eigen::VectorXf +open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateGround(Eigen::VectorXf ground_coeffs, Eigen::Affine3f transform){ + + Eigen::VectorXf ground_coeffs_new; + + // Create a cloud with three points on the input ground plane: + pcl::PointCloud::Ptr dummy (new pcl::PointCloud); + + pcl::PointXYZRGB first = pcl::PointXYZRGB(0.0,0.0,0.0); + first.x = 1.0; + pcl::PointXYZRGB second = pcl::PointXYZRGB(0.0,0.0,0.0); + second.y = 1.0; + pcl::PointXYZRGB third = pcl::PointXYZRGB(0.0,0.0,0.0); + third.x = 1.0; + third.y = 1.0; + + dummy->points.push_back( first ); + dummy->points.push_back( second ); + dummy->points.push_back( third ); + + for(uint8_t i = 0; i < dummy->points.size(); i++ ) + { // Find z given x and y: + dummy->points[i].z = (double) ( -ground_coeffs_(3) -(ground_coeffs_(0) * dummy->points[i].x) - (ground_coeffs_(1) * dummy->points[i].y) ) / ground_coeffs_(2); + } + + // Rotate them: + dummy = rotateCloud(dummy, transform); + + // Compute new ground coeffs: + std::vector indices; + for(unsigned int i = 0; i < dummy->points.size(); i++) + { + indices.push_back(i); + } + pcl::SampleConsensusModelPlane model_plane(dummy); + model_plane.computeModelCoefficients(indices, ground_coeffs_new); + + return ground_coeffs_new; +} + +template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr +open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud)//, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) +{ + // Downsample of sampling_factor in every dimension: + PointCloudPtr cloud_downsampled(new PointCloud); + PointCloudPtr cloud_denoised(new PointCloud); + if (sampling_factor_ != 1) + { + cloud_downsampled->width = (input_cloud->width)/sampling_factor_; + cloud_downsampled->height = (input_cloud->height)/sampling_factor_; + cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width); + cloud_downsampled->is_dense = input_cloud->is_dense; + cloud_downsampled->header = input_cloud->header; + for (int j = 0; j < cloud_downsampled->width; j++) + { + for (int i = 0; i < cloud_downsampled->height; i++) + { + (*cloud_downsampled)(j,i) = (*input_cloud)(sampling_factor_*j,sampling_factor_*i); + } + } + } + + if (apply_denoising_) + { + // Denoising with statistical filtering: + pcl::StatisticalOutlierRemoval sor; + if (sampling_factor_ != 1) + sor.setInputCloud (cloud_downsampled); + else + sor.setInputCloud (input_cloud); + sor.setMeanK (mean_k_denoising_); + sor.setStddevMulThresh (std_dev_denoising_); + sor.filter (*cloud_denoised); + } + + // // Denoising viewer + // int v1(0); + // int v2(0); + // denoising_viewer_->removeAllPointClouds(v1); + // denoising_viewer_->removeAllPointClouds(v2); + // denoising_viewer_->createViewPort (0.0, 0.0, 0.5, 1.0, v1); + // pcl::visualization::PointCloudColorHandlerRGBField rgb(input_cloud); + // denoising_viewer_->addPointCloud (input_cloud, rgb, "original", v1); + // denoising_viewer_->createViewPort (0.5, 0.0, 1.0, 1.0, v2); + // pcl::visualization::PointCloudColorHandlerRGBField rgb2(cloud_denoised); + // denoising_viewer_->addPointCloud (cloud_denoised, rgb2, "denoised", v2); + // denoising_viewer_->spinOnce(); + + // Voxel grid filtering: + PointCloudPtr cloud_filtered(new PointCloud); + pcl::VoxelGrid voxel_grid_filter_object; + if (apply_denoising_) + { + //std::cout << "APPLY DENOISING" << std::endl; + voxel_grid_filter_object.setInputCloud(cloud_denoised); + } + else + { + if (sampling_factor_ != 1) + { + //std::cout << "APPLY DownSampled" << std::endl; + voxel_grid_filter_object.setInputCloud(cloud_downsampled); + } + else + { + //std::cout << "Use Input Cloud" << std::endl; + voxel_grid_filter_object.setInputCloud(input_cloud); + } + } + + //std::cout << "Voxel Size " << voxel_size_ << " Max Distance " << max_distance_ << std::endl; + + voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); + voxel_grid_filter_object.setFilterFieldName("z"); + voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); + voxel_grid_filter_object.filter (*cloud_filtered); + + //(*publishBeforeVoxelFilterCloud)(cloud_filtered); + + return cloud_filtered; +} + +template bool +open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters)//, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) +{ + frame_counter_++; + + // Define if debug info should be written or not for this frame: + bool debug_flag = false; + if ((frame_counter_ % 60) == 0) + { + debug_flag = true; + } + + // Check if all mandatory variables have been set: + if (debug_flag) + { + if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_) + { + PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Floor parameters have not been set or they are not valid!\n"); + return (false); + } + if (cloud_ == NULL) + { + PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Input cloud has not been set!\n"); + return (false); + } + if (intrinsics_matrix_(0) == 0) + { + PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Camera intrinsic parameters have not been set!\n"); + return (false); + } + if (!person_classifier_set_flag_) + { + PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Person classifier has not been set!\n"); + return (false); + } + } + + if (!dimension_limits_set_) // if dimension limits have not been set by the user + { + // Adapt thresholds for clusters points number to the voxel size: + max_points_ = int(float(max_points_) * std::pow(0.06/voxel_size_, 2)); + if (voxel_size_ > 0.06) + min_points_ = int(float(min_points_) * std::pow(0.06/voxel_size_, 2)); + } + + // Fill rgb image: + rgb_image_->points.clear(); // clear RGB pointcloud + extractRGBFromPointCloud(cloud_, rgb_image_); // fill RGB pointcloud + + //TODO publish here extractedRGB + //(*publishExtractRGB)(cloud_); + + // Point cloud pre-processing (downsampling and filtering): + PointCloudPtr cloud_filtered(new PointCloud); + cloud_filtered = preprocessCloud (cloud_, publishBeforeVoxelFilterCloud); + + //TODO publish here preProcessed + //(*publishPreProcessed)(cloud_filtered); + + if (use_rgb_) + { + // Compute mean luminance: + int n_points = cloud_filtered->points.size(); + double sumR, sumG, sumB = 0.0; + for (int j = 0; j < cloud_filtered->width; j++) + { + for (int i = 0; i < cloud_filtered->height; i++) + { + sumR += (*cloud_filtered)(j,i).r; + sumG += (*cloud_filtered)(j,i).g; + sumB += (*cloud_filtered)(j,i).b; + } + } + mean_luminance_ = 0.3 * sumR/n_points + 0.59 * sumG/n_points + 0.11 * sumB/n_points; + // mean_luminance_ = 0.2126 * sumR/n_points + 0.7152 * sumG/n_points + 0.0722 * sumB/n_points; + } + + // Ground removal and update: + pcl::IndicesPtr inliers(new std::vector); + boost::shared_ptr > ground_model(new pcl::SampleConsensusModelPlane(cloud_filtered)); + ground_model->selectWithinDistance(ground_coeffs_, voxel_size_, *inliers); + no_ground_cloud_ = PointCloudPtr (new PointCloud); + pcl::ExtractIndices extract; + extract.setInputCloud(cloud_filtered); + extract.setIndices(inliers); + extract.setNegative(true); + extract.filter(*no_ground_cloud_); + if (inliers->size () >= (300 * 0.06 / voxel_size_ / std::pow (static_cast (sampling_factor_), 2))) + ground_model->optimizeModelCoefficients (*inliers, ground_coeffs_, ground_coeffs_); + else + { + if (debug_flag) + { + PCL_INFO ("No groundplane update!\n"); + } + } + + //TODO publish here groundRemoval + //(*publishGroundRemoval)(no_ground_cloud_); + + // Background Subtraction (optional): + if (background_subtraction_) + { + PointCloudPtr foreground_cloud(new PointCloud); + for (unsigned int i = 0; i < no_ground_cloud_->points.size(); i++) + { + if (not (background_octree_->isVoxelOccupiedAtPoint(no_ground_cloud_->points[i].x, no_ground_cloud_->points[i].y, no_ground_cloud_->points[i].z))) + { + foreground_cloud->points.push_back(no_ground_cloud_->points[i]); + } + } + no_ground_cloud_ = foreground_cloud; + } + + + std::vector myvector; + + int count = 0; + for(pcl::PointCloud::iterator it = no_ground_cloud_->begin(); it != no_ground_cloud_->end(); it++) + { + if (!std::isfinite (it->x) || !std::isfinite (it->y) || !std::isfinite (it->z)) + { + myvector.push_back(count); + } + count++; + } + + count = 0; + for (unsigned i=0; ierase(no_ground_cloud_->begin() + myvector[i] - count); + count++; + } + + + if (no_ground_cloud_->points.size() > 0) + { + // Euclidean Clustering: + std::vector cluster_indices; + typename pcl::search::KdTree::Ptr tree (new pcl::search::KdTree); + + tree->setInputCloud(no_ground_cloud_); + pcl::EuclideanClusterExtraction ec; + ec.setClusterTolerance(2 * 0.06); + ec.setMinClusterSize(min_points_); + ec.setMaxClusterSize(max_points_); + ec.setSearchMethod(tree); + ec.setInputCloud(no_ground_cloud_); + ec.extract(cluster_indices); + + // Sensor tilt compensation to improve people detection: + PointCloudPtr no_ground_cloud_rotated(new PointCloud); + Eigen::VectorXf ground_coeffs_new; + if(sensor_tilt_compensation_) + { + // We want to rotate the point cloud so that the ground plane is parallel to the xOz plane of the sensor: + Eigen::Vector3f input_plane, output_plane; + input_plane << ground_coeffs_(0), ground_coeffs_(1), ground_coeffs_(2); + output_plane << 0.0, -1.0, 0.0; + + Eigen::Vector3f axis = input_plane.cross(output_plane); + float angle = acos( input_plane.dot(output_plane)/ ( input_plane.norm()/output_plane.norm() ) ); + transform_ = Eigen::AngleAxisf(angle, axis); + + // Setting also anti_transform for later + anti_transform_ = transform_.inverse(); + no_ground_cloud_rotated = rotateCloud(no_ground_cloud_, transform_); + ground_coeffs_new.resize(4); + ground_coeffs_new = rotateGround(ground_coeffs_, transform_); + } + else + { + transform_ = transform_.Identity(); + anti_transform_ = transform_.inverse(); + no_ground_cloud_rotated = no_ground_cloud_; + ground_coeffs_new = ground_coeffs_; + } + + // To avoid PCL warning: + if (cluster_indices.size() == 0) + cluster_indices.push_back(pcl::PointIndices()); + + // Head based sub-clustering // + pcl::people::HeadBasedSubclustering subclustering; + subclustering.setInputCloud(no_ground_cloud_rotated); + subclustering.setGround(ground_coeffs_new); + subclustering.setInitialClusters(cluster_indices); + subclustering.setHeightLimits(min_height_, max_height_); + subclustering.setMinimumDistanceBetweenHeads(heads_minimum_distance_); + subclustering.setSensorPortraitOrientation(vertical_); + subclustering.subcluster(clusters); + +// for (unsigned int i = 0; i < rgb_image_->points.size(); i++) +// { +// if ((rgb_image_->points[i].r < 0) | (rgb_image_->points[i].r > 255) | isnan(rgb_image_->points[i].r)) +// { +// std::cout << "ERROR in RGB data!" << std::endl; +// } +// } + + if (use_rgb_) // if RGB information can be used + { + // Person confidence evaluation with HOG+SVM: + if (vertical_) // Rotate the image if the camera is vertical + { + swapDimensions(rgb_image_); + } + for(typename std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) + { + //Evaluate confidence for the current PersonCluster: + Eigen::Vector3f centroid = intrinsics_matrix_ * (anti_transform_ * it->getTCenter()); + centroid /= centroid(2); + Eigen::Vector3f top = intrinsics_matrix_ * (anti_transform_ * it->getTTop()); + top /= top(2); + Eigen::Vector3f bottom = intrinsics_matrix_ * (anti_transform_ * it->getTBottom()); + bottom /= bottom(2); + + it->setPersonConfidence(person_classifier_.evaluate(rgb_image_, bottom, top, centroid, vertical_)); + } + } + else + { + for(typename std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) + { + it->setPersonConfidence(-100.0); + } + } + } + + return (true); +} + +template +open_ptrack::detection::GroundBasedPeopleDetectionApp::~GroundBasedPeopleDetectionApp () +{ + // TODO Auto-generated destructor stub +} +#endif /* OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ */ diff --git a/detection/launch/detector_depth.launch~ b/detection/launch/detector_depth.launch~ new file mode 100644 index 0000000..84b9e1c --- /dev/null +++ b/detection/launch/detector_depth.launch~ @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/detection/launch/detector_depth_zed.launch b/detection/launch/detector_depth_zed.launch new file mode 100644 index 0000000..fd5fe84 --- /dev/null +++ b/detection/launch/detector_depth_zed.launch @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/detection/launch/detector_depth_zed.launch~ b/detection/launch/detector_depth_zed.launch~ new file mode 100644 index 0000000..6782260 --- /dev/null +++ b/detection/launch/detector_depth_zed.launch~ @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/detection/src/haardispada.cpp~ b/detection/src/haardispada.cpp~ new file mode 100644 index 0000000..5f66e05 --- /dev/null +++ b/detection/src/haardispada.cpp~ @@ -0,0 +1,575 @@ +/* +Software License Agreement (BSD License) + +Copyright (c) 2013, Southwest Research Institute +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Southwest Research Institute, nor the names + of its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + */ +#include "open_ptrack/detection/haardispada.h" +#include +#include +#include + +/***************************************************************************** + ** Namespaces + *****************************************************************************/ +namespace open_ptrack +{ + namespace detection + { + /***************************************************************************** + ** Implementation + *****************************************************************************/ + HaarDispAdaClassifier::HaarDispAdaClassifier() + { + classifier_filename_ = "UnNamedHaarDispAda.xml"; + HaarDispAdaPrior_ = exp(2); // default prior + loaded = false; + init(); + } + + HaarDispAdaClassifier::HaarDispAdaClassifier(string filename) + { + classifier_filename_ = filename; + loaded = false; + init(); + } + + void + HaarDispAdaClassifier::detect(vector &R_in, + vector &L_in, + Mat &D_in, + vector &R_out, + vector &L_out, + bool label_all) + { + int count =0; + Mat HF(1,num_filters_,CV_32F); + Mat MH(1,num_filters_,CV_8UC1); + + R_out.clear(); + L_out.clear(); + if(!loaded) return; + for(unsigned int i=0;i 2 && R_in[i].height > 2){ + setDImageROI_fast(R_in[i],D_in); // copy region of interest from disparity + int rtn = haar_features_fast(HF); // compute haar features + if(rtn== 1){ + // Compute classifier score: + //result = HDAC_.predict(HF); + cv::ml::StatModel::predict(HF,result,cv::ml::StatModel::RAW_OUTPUT); + } + else{ + ROS_ERROR("WHY O WHY"); + result = 0; + } + } + if(result>0 || label_all == true){ + // Insert in output detections: + R_out.push_back(R_in[i]); + if(label_all) L_out.push_back(result); // apply the label + if(!label_all)L_out.push_back(L_in[i]); // give the same label it came in with to allow eval + if(result>0) count++; + } + } + } + + void + HaarDispAdaClassifier::detect(vector &R_in, + vector &L_in, + Mat &D_in, + vector &R_out, + vector &L_out, + vector &C_out, + bool label_all) + { + int count =0; + Mat HF(1,num_filters_,CV_32F); + Mat MH(1,num_filters_,CV_8UC1); + + R_out.clear(); + L_out.clear(); + if(!loaded) return; + for(unsigned int i=0;i 2 && R_in[i].height > 2){ + setDImageROI_fast(R_in[i],D_in); // copy region of interest from disparity + int rtn = haar_features_fast(HF); // compute haar features + if(rtn== 1){ + // Compute classifier score: + result = HDAC_.predict(HF, cv::Mat(), cv::Range::all(), false, true); + } + else{ + ROS_ERROR("WHY O WHY"); + result = 0; + } + } + + if(result>min_confidence_ || label_all == true){ + // Insert in output detections: + R_out.push_back(R_in[i]); + C_out.push_back(result); // write classifier confidence + if(label_all) + { + if (result > min_confidence_) + L_out.push_back(1); // apply the label + else + L_out.push_back(0); + } + if(!label_all) + L_out.push_back(L_in[i]); // give the same label it came in with to allow evalì + if(result>min_confidence_) count++; + } + } + } + + int + HaarDispAdaClassifier::addToTraining(vector &R_in, vector &L_in, Mat &D_in) + { + Mat HF(1,num_filters_,CV_32F); + Mat MH(1,num_filters_,CV_8UC1); + for(unsigned int i = 0; i 0.0)){ + for(int j=0;j(numSamples_,j) = HF.at(0,j); + } + if(L_in[i] <= 0 ){ + trainingLabels_.at(numSamples_,0) = 0;//classes: 0,1 not -1,1 + } + else{ + trainingLabels_.at(numSamples_,0) = L_in[i]; + } + numSamples_++; + }// end if successful compute feature + }// end of if not too many samples already + }// end each roi + return(numSamples_); + }// end addToTraining + + void + HaarDispAdaClassifier::setDImageRoi(Rect &R_in, Mat &I_in) + { + int xmin = R_in.x; + int xmax = xmin+R_in.width; + int ymin = R_in.y; + int ymax = ymin+R_in.height; + Image4Haar = I_in(Range(ymin,ymax),Range(xmin,xmax)); + } + + void + HaarDispAdaClassifier::train(string filename) + { + CvBoostParams bparams = CvBoostParams(); + float priorFloat[] = { 1.0, HaarDispAdaPrior_ }; // preliminary priors based on ROC) + bparams.priors = &priorFloat[0]; + bparams.use_surrogates = false; + bparams.weak_count = 100; + + // copy sub matrix for training + Mat VarIdx; + Mat Features(numSamples_,num_filters_,CV_32F); + for(int i=0;i(i,j) = trainingSamples_.at(i,j); + } + } + Mat Responses(numSamples_,1,CV_32S); + for(int i=0; i(i,0) == -1) trainingLabels_.at(i,0) = 0;//classes: 0,1 + Responses.at(i,0) = trainingLabels_.at(i,0); + } + Mat vIdx=Mat::ones(Features.cols,1,CV_8UC1); // variables of interest + Mat sIdx=Mat::ones(Responses.rows,1,CV_8UC1); // samples of interest + Mat vtyp=Mat(Features.cols,1,CV_8UC1,CV_VAR_ORDERED); // could be VAR_CATAGORICAL(discrete) + Mat MDM; // no missing mask + HDAC_.train(Features, CV_ROW_SAMPLE, Responses,vIdx,sIdx,vtyp,MDM,bparams,false); + ROS_ERROR("saving trained classifier to %s",filename.c_str()); + loaded = true; + HDAC_.save(filename.c_str()); + // Determine Recall Statistics + int num_TP = 0; + int num_FP = 0; + int num_people = 0; + int num_neg = 0; + int num_TN = 0; + int num_FN = 0; + for(int i=0;i(i,0) == 1) num_people++; + if(Responses.at(i,0) != 1) num_neg++; + if(result==1 && Responses.at(i,0) == 1) num_TP++; // true pos + else if(result==1 && Responses.at(i,0) == 0) num_FP++; // false pos + else if(result==0 && Responses.at(i,0) == 0) num_TN++; // true neg + else if(result==0 && Responses.at(i,0) == 1) num_FN++; // false neg + } + float percent = (float)num_TP/(float)num_people*100.0; + ROS_ERROR("Recall = %6.2f%c",percent,'%'); + percent = (float)num_FP/(float)num_neg*100.0; + ROS_ERROR("False Positives = %6.2f%c",percent,'%'); + } + + void + HaarDispAdaClassifier::setMaxSamples(int n) + { + maxSamples_ = n; + numSamples_ = 0; + trainingSamples_.create(maxSamples_,num_filters_,CV_32FC1); + trainingLabels_.create(maxSamples_,1,CV_32SC1); + + } + + void + HaarDispAdaClassifier::setMinConfidence(float min_confidence) + { + min_confidence_ = min_confidence; + } + + float + HaarDispAdaClassifier::getMinConfidence() + { + return min_confidence_; + } + + void + HaarDispAdaClassifier::init() + { + num_filters_ = 174; + setMaxSamples(350); //use a small number to have a small memory footprint by default + + AvePosTrainingImg_.create(64,64,CV_64F); + + // images used by haar_features() set here since known size and type to avoid realloc + map.create(16,16,CV_16SC1); + + // faster haar + haar16x16.create(16, 16, CV_32F); + haar8x8.create(8, 8, CV_32F); + haar4x4.create(4, 4, CV_32F); + + } + + void + HaarDispAdaClassifier::load(string filename) + { + HDAC_.load(filename.c_str()); + loaded = true; + } + + int + HaarDispAdaClassifier::test() + { + return(1); + } + + // TODO find out what this function is for + void + HaarDispAdaClassifier::alpha_map(int idx) + { + int i, j, rowWidth, lPower, lPower2, type; + + type = idx % 3; // find type (horz, vert, diag) + + if (idx >= 174) + { // in 16x16, there are 225 2x2s + rowWidth = 15; + idx = idx - 174; + lPower = 2; + lPower2 = 1; + } + else if (idx >= 27) + { // in 16x16, there are 49 4x4s + rowWidth = 7; + idx = idx - 27; + lPower = 4; + lPower2 = 2; + } + else + { // in 16x16, there are 9 8x8s + rowWidth = 3; + lPower = 8; + lPower2 = 4; + } // if elseif else + + int i1 = (idx / 3) % rowWidth; + int j1 = (idx / 3) / rowWidth; + + + map = cv::Mat::zeros(map.rows,map.cols,CV_16SC1); + + for (i = i1 * lPower2; i < i1 * lPower2 + lPower; i++) + { // width/x + for (j = j1 * lPower2; j < j1 * lPower2 + lPower; j++) + { // height/y + if (type == 0) + { // horz type + if (i >= i1 * lPower2 + lPower2) + { + map.at(i,j) = 1; + } + else + { + map.at(i,j) = -1; + } + } + else if (type == 1) + { // vert type + if (j >= j1 * lPower2 + lPower2) + { + map.at(i,j) = 1; + } + else + { + map.at(i,j) = -1; + } + } + else + { // diag type (type == 2) + if (((i >= i1 * lPower2 + lPower2) && (j >= j1 * lPower2 + lPower2)) + || ((i < i1 * lPower2 + lPower2) && (j < j1 * lPower2 + lPower2))) + { + map.at(i,j) = 1; + } + else + { + map.at(i,j) = -1; + } + } // if + } // for j + } // for i + } // alpha map + + + void + HaarDispAdaClassifier::mask_scale(Mat & input, Mat & output) + { + assert(input.type() == CV_32F);// must be of type CV_8U + float hratio = (float) input.rows / (float) output.rows; + float wratio = (float) input.cols / (float) output.cols; + for (int i = 0; i < output.rows; i++) + { + float *outputRow = output.ptr(i); + for (int j = 0; j < output.cols; j++) + { + outputRow[j] = 0; + int n = 0; + for (int i1 = i * hratio; (i1 < (i + 1) * hratio) && (i1 < input.rows); i1++) + { + float *inputRow = input.ptr(i1); + for (int j1 = j * wratio; (j1 < (j + 1) * wratio) && (j1 < input.cols); j1++) + { + if (inputRow[j1] > 0) // if the pixel is nonzero, add it to the output pixel + { + outputRow[j] += inputRow[j1]; + n++; // count of non-zero pixels + } // if + } // for i1, j1 + } + if (n > 0) + outputRow[j] /= n; // new value is average of non-zero pixels + } // for i,j + } // i + } // mask_scale + + + void + HaarDispAdaClassifier::setDImageROI_fast(Rect & R_in, Mat & I_in) + { + Mat ROI = I_in(R_in); + mask_scale(ROI, haar16x16); + } + + int + HaarDispAdaClassifier::haar_features_fast(Mat &HF) + { + int jl, idx, i1, j1; + + cv::resize(haar16x16, haar8x8, cv::Size(8,8), 0, 0, CV_INTER_AREA); + cv::resize(haar8x8, haar4x4, cv::Size(4,4), 0, 0, CV_INTER_AREA); + + float* hf_ptr = HF.ptr(0); + + if(haar16x16.rows != 16 || haar16x16.cols !=16) + { + ROS_ERROR("Wrong sz Input4Haar haar_response %dX%d ",haar16x16.rows,haar16x16.cols); + return(0); + } + + + // Largest features are first (8x8s) + for (jl = 0; jl < 27; jl += 3) + { // in 16x16 template, there are 27 8x8s (9 positions, 3 types, windows may overlap by 50%) + idx = jl; + i1 = (idx / 3) % 3; // "x" value of our location in map + j1 = (idx / 3) / 3; // "y" value of our location in map + hf_ptr[jl] = 16 * (-haar4x4.at(j1,i1) + haar4x4.at(j1,i1+1) + -haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); + + hf_ptr[jl+1] = 16 * (-haar4x4.at(j1,i1) - haar4x4.at(j1,i1+1) + +haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); + + hf_ptr[jl+2] = 16 * (haar4x4.at(j1,i1) - haar4x4.at(j1,i1+1) + -haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); + } // for + + // Medium features are next (4x4s) + for (jl = 27; jl < 174; jl += 3) + { // in 16x16 template, there are 147 4x4s (49 positions, 3 types, windows may overlap by 50%) + idx = jl - 27; + i1 = (idx / 3) % 7; // "x" value of our location in map + j1 = (idx / 3) / 7; // "y" value of our location in map + hf_ptr[jl] = 4 * (-haar8x8.at(j1,i1) + haar8x8.at(j1,i1+1) + -haar8x8.at(j1+1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); + + hf_ptr[jl+1] = 4 * (-haar8x8.at(j1,i1) - haar8x8.at(j1,i1+1) + +haar8x8.at(j1 +1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); + + hf_ptr[jl+2] = 4 * (haar8x8.at(j1,i1) - haar8x8.at(j1,i1+1) + -haar8x8.at(j1 +1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); + } // for + return 1; + + } // haar_features_fast + + + int + HaarDispAdaClassifier::haar_features(Mat &HF, Mat &MH) + { + for (int k = 0; k < num_filters_; k++){ // do each feature + // generate haar map and scale to size of Image4Haar(roi) + alpha_map(k); + Mat ScaledMap(Image4Haar.rows,Image4Haar.cols,CV_16SC1); + resize(map, ScaledMap, Size(Image4Haar.cols, Image4Haar.rows), 0, 0, CV_INTER_NN ); + + // multiply, and sum, considering missing data in disparity map + float plus_sum = 0; + float minus_sum = 0; + int num_plus=0; + int num_minus=0; + for(int ii=0;ii(ii,0); + short int *map_ptr = (short int *) &ScaledMap.at(ii,0); + for(int jj=0;jj(0,k) =(uchar) 1; // set missing data mask to 1 + HF.at(0,k) = 0; // set feature value to zero + } + else{// not missing data + MH.at(0,k) = (uchar) 0; // set missing data mask to 0 + float plus_ave = plus_sum/num_plus; + float minus_ave = minus_sum/num_minus; + float ave_dif = fabs(plus_ave - minus_ave); + // determine scaling of feature to be consistent with haar features without missing data + float scaling=2.0; + if(k<27){ + scaling = 32.0; + } + else if(k<174){ + scaling = 8.0; + } + HF.at(0,k) = scaling*ave_dif; + } + } + return(1); + }// haar_features + void + HaarDispAdaClassifier::print_map(int k) + { + printf("Map %d :\n",k); + alpha_map(k); + for(int i=0;i(i,j)); + } + printf("\n"); + } + } + void + HaarDispAdaClassifier::print_scaled_map(cv::Mat &A) + { + printf("SMap = [\n"); + for(int i=0;i(i,j)); + } + printf("\n"); + } + printf("]\n"); + } + void + HaarDispAdaClassifier::print_Image4Haar() + { + printf("Image4Haar = [\n"); + for(int i=0;i(i,j)); + } + printf("\n"); + } + printf("]\n"); + } + + float + HaarDispAdaClassifier::find_central_disparity(int x, int y, int height, int width, Mat &D_in) + { + // Insure its a reasonable roi + if(x+width>D_in.cols || y+height>D_in.rows) return(0.0); + + float ad=0.0; + int j=width/2.0;// use central value for x + for(int i=0;i(y+i,x+j)>ad){ + ad = D_in.at(y+i,x+j); + } + } + + return(ad); + } + + + } // namespace detection +} // namespace open_ptrack diff --git a/detection_and_tracking_zed.launch b/detection_and_tracking_zed.launch new file mode 100644 index 0000000..e5be647 --- /dev/null +++ b/detection_and_tracking_zed.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/zed_ros_wrapper/CMakeLists.txt b/zed_ros_wrapper/CMakeLists.txt new file mode 100644 index 0000000..ea6f1ec --- /dev/null +++ b/zed_ros_wrapper/CMakeLists.txt @@ -0,0 +1,100 @@ +cmake_minimum_required(VERSION 2.8.7) +project(zed_wrapper) + +# if CMAKE_BUILD_TYPE is not specified, take 'Release' as default +IF(NOT CMAKE_BUILD_TYPE) + SET(CMAKE_BUILD_TYPE Release) +ENDIF(NOT CMAKE_BUILD_TYPE) + +find_package(ZED 1.0 REQUIRED) + +##For Jetson, OpenCV4Tegra is based on OpenCV2.4 +exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) +if ( CMAKE_SYSTEM_NAME2 MATCHES "aarch64" ) # X1 + SET(OCV_VERSION "2.4") + SET(CUDA_VERSION "7.0") +elseif(CMAKE_SYSTEM_NAME2 MATCHES "armv7l" ) # K1 + SET(OCV_VERSION "2.4") + SET(CUDA_VERSION "6.5") +else() # Desktop + SET(OCV_VERSION "3.1") + SET(CUDA_VERSION "7.5") +endif() + +find_package(OpenCV ${OCV_VERSION} COMPONENTS core highgui imgproc REQUIRED) +find_package(CUDA ${CUDA_VERSION} REQUIRED) + +find_package(PCL REQUIRED) + +find_package(catkin REQUIRED COMPONENTS + image_transport + roscpp + rosconsole + sensor_msgs + dynamic_reconfigure + tf2_ros + pcl_conversions +) + +generate_dynamic_reconfigure_options( + cfg/Zed.cfg +) + +catkin_package( + CATKIN_DEPENDS + roscpp + rosconsole + sensor_msgs + opencv + image_transport + dynamic_reconfigure + tf2_ros + pcl_conversions +) + +############################################################################### +# INCLUDES + +# Specify locations of header files. +include_directories( + ${catkin_INCLUDE_DIRS} + ${CUDA_INCLUDE_DIRS} + ${ZED_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${PCL_INCLUDE_DIRS} +) + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) +link_directories(${PCL_LIBRARY_DIRS}) + +############################################################################### + +############################################################################### +# EXECUTABLE + +add_definitions(-std=c++11)# -m64) #-Wall) + + +add_executable( + zed_wrapper_node + src/zed_wrapper_node.cpp +) + +target_link_libraries( + zed_wrapper_node + ${catkin_LIBRARIES} + ${ZED_LIBRARIES} + ${CUDA_LIBRARIES} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} + ${OpenCV_LIBS} + ${PCL_LIBRARIES} + ) + +add_dependencies(zed_wrapper_node ${PROJECT_NAME}_gencfg) +############################################################################### + +#Add all files in subdirectories of the project in +# a dummy_target so qtcreator have access to all files +FILE(GLOB_RECURSE extra_files ${CMAKE_SOURCE_DIR}/*) +add_custom_target(dummy_${PROJECT_NAME} SOURCES ${extra_files}) diff --git a/zed_ros_wrapper/CMakeLists.txt~ b/zed_ros_wrapper/CMakeLists.txt~ new file mode 100644 index 0000000..3d258bf --- /dev/null +++ b/zed_ros_wrapper/CMakeLists.txt~ @@ -0,0 +1,101 @@ +cmake_minimum_required(VERSION 2.8.7) + +project(zed_wrapper) + +# if CMAKE_BUILD_TYPE is not specified, take 'Release' as default +IF(NOT CMAKE_BUILD_TYPE) + SET(CMAKE_BUILD_TYPE Release) +ENDIF(NOT CMAKE_BUILD_TYPE) + +find_package(ZED 1.0 REQUIRED) + +##For Jetson, OpenCV4Tegra is based on OpenCV2.4 +exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) +if ( CMAKE_SYSTEM_NAME2 MATCHES "aarch64" ) # X1 + SET(OCV_VERSION "2.4") + SET(CUDA_VERSION "7.0") +elseif(CMAKE_SYSTEM_NAME2 MATCHES "armv7l" ) # K1 + SET(OCV_VERSION "2.4") + SET(CUDA_VERSION "6.5") +else() # Desktop + SET(OCV_VERSION "3.1") + SET(CUDA_VERSION "7.5") +endif() + +find_package(OpenCV ${OCV_VERSION} COMPONENTS core highgui imgproc REQUIRED) +find_package(CUDA ${CUDA_VERSION} REQUIRED) + +find_package(PCL REQUIRED) + +find_package(catkin REQUIRED COMPONENTS + image_transport + roscpp + rosconsole + sensor_msgs + dynamic_reconfigure + tf2_ros + pcl_conversions +) + +generate_dynamic_reconfigure_options( + cfg/Zed.cfg +) + +catkin_package( + CATKIN_DEPENDS + roscpp + rosconsole + sensor_msgs + opencv + image_transport + dynamic_reconfigure + tf2_ros + pcl_conversions +) + +############################################################################### +# INCLUDES + +# Specify locations of header files. +include_directories( + ${catkin_INCLUDE_DIRS} + ${CUDA_INCLUDE_DIRS} + ${ZED_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${PCL_INCLUDE_DIRS} +) + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) +link_directories(${PCL_LIBRARY_DIRS}) + +############################################################################### + +############################################################################### +# EXECUTABLE + +add_definitions(-std=c++11)# -m64) #-Wall) + + +add_executable( + zed_wrapper_node + src/zed_wrapper_node.cpp +) + +target_link_libraries( + zed_wrapper_node + ${catkin_LIBRARIES} + ${ZED_LIBRARIES} + ${CUDA_LIBRARIES} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} + ${OpenCV_LIBS} + ${PCL_LIBRARIES} + ) + +add_dependencies(zed_wrapper_node ${PROJECT_NAME}_gencfg) +############################################################################### + +#Add all files in subdirectories of the project in +# a dummy_target so qtcreator have access to all files +FILE(GLOB_RECURSE extra_files ${CMAKE_SOURCE_DIR}/*) +add_custom_target(dummy_${PROJECT_NAME} SOURCES ${extra_files}) diff --git a/zed_ros_wrapper/LICENSE b/zed_ros_wrapper/LICENSE new file mode 100644 index 0000000..f7581bc --- /dev/null +++ b/zed_ros_wrapper/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Stereolabs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of zed-ros-wrapper nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/zed_ros_wrapper/README.md b/zed_ros_wrapper/README.md new file mode 100644 index 0000000..93dda36 --- /dev/null +++ b/zed_ros_wrapper/README.md @@ -0,0 +1,105 @@ +# zed-ros-wrapper +Ros wrapper for the ZED Stereo Camera SDK + +**This sample is designed to work with the ZED stereo camera only and requires the ZED SDK. For more information: https://www.stereolabs.com** + +**This wrapper also requires the PCL library** + +This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. It can provide the camera images, the depth map, a 3D textured point cloud, and the odometry given by the ZED tracking. +Published topics: + + - /camera/point_cloud/cloud + - /camera/depth/camera_info + - /camera/depth/image_rect_color + - /camera/left/camera_info + - /camera/left/image_rect_color + - /camera/rgb/camera_info + - /camera/rgb/image_rect_color + - /camera/odom + +A set of parameters can be specified in the launch file provided in the launch directory. + + - zed.launch + +The zed_ros_wrapper is a catkin package made to run on ROS Indigo, and depends +on the following ROS packages: + + - tf2_ros + - nav_msgs + - roscpp + - rosconsole + - sensor_msgs + - opencv + - image_transport + - dynamic_reconfigure + +## Build the program + +Place the package folder "zed_wrapper" in your catkin workspace source folder "~/catkin_ws/src" + +Open a terminal : + + $ cd ~/catkin_ws + $ catkin_make + $ source ./devel/setup.bash + + +## Run the program + + Open a terminal to launch the wrapper: + + $ roslaunch zed_wrapper zed.launch + + Open an other terminal to display images: + + $ rosrun image_view image_view image:=/camera/rgb/image_rect_color + + If you want to see the point cloud, lauch rviz with the following command. Then click on **add** (bottom left), select the **By Topic** tab, select **point_cloud->cloud->PointCloud2** and click **OK**. + + $ rosrun rviz rviz + + Note that rviz isn't very good at displaying a camera feed and a point cloud at the same time. You should use an other instance of rviz or the `rosrun` command. + + To visualize the odometry in rviz, select the **Add** button, and select the **odom** topic under the **By topic** tab. + + *Important: By default rviz is badly displaying the odometry, be sure to set it up correctly by opening the newly created Odometry object in the left list, and by setting **Position Tolerance** and **Angle Tolerance** to **0**, and **Keep** to **1**. * + + You can also see the point could fused with the odometry by subscribing to the 'odom' topic (even in an other rviz or an other node). + + To change your referential, use the 'Fixed Frame' parameter at the top-left of rviz. + +## Launch file parameters + + Parameter | Description | Value +------------------------------|-------------------------------------------------------------|------------------------- + svo_file | SVO filename | path to an SVO file + resolution | ZED Camera resolution | '0': HD2K + _ | _ | '1': HD1080 + _ | _ | '2': HD720 + _ | _ | '3': VGA + quality | Disparity Map quality | '0': NONE + _ | _ | '1': PERFORMANCE + _ | _ | '2': MEDIUM + _ | _ | '3': QUALITY + sensing_mode | Depth sensing mode | '0': FILL + _ | _ | '1': STANDARD + openni_depth_mode | Convert depth to 16bit in millimeters | '0': 32bit float meters + _ | _ | '1': 16bit uchar millimeters + frame_rate | Rate at which images are published | int + rgb_topic | Topic to which rgb==default==left images are published | string + rgb_cam_info_topic | Topic to which rgb==default==left camera info are published | string + rgb_frame_id | ID specified in the rgb==default==left image message header | string + left_topic | Topic to which left images are published | string + left_cam_info_topic | Topic to which left camera info are published | string + left_frame_id | ID specified in the left image message header | string + right_topic | Topic to which right images are published | string + right_cam_info_topic | Topic to which right camera info are published | string + right_frame_id | ID specified in the right image message header | string + depth_topic | Topic to which depth map images are published | string + depth_cam_info_topic | Topic to which depth camera info are published | string + depth_frame_id | ID specified in the depth image message header | string + point_cloud_topic | Topic to which point clouds are published | string + cloud_frame_id | ID specified in the point cloud message header | string + odometry_topic | Topic to which odometry is published | string + odometry_frame_id | ID specified in the odometry message header | string + odometry_transform_frame_id | Name of the transformation following the odometry | string diff --git a/zed_ros_wrapper/cfg/Zed.cfg b/zed_ros_wrapper/cfg/Zed.cfg new file mode 100644 index 0000000..cad5bc2 --- /dev/null +++ b/zed_ros_wrapper/cfg/Zed.cfg @@ -0,0 +1,10 @@ +#!/usr/bin/env python +PACKAGE = "zed_ros_wrapper" + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +gen.add("confidence", int_t, 0, "Confidence threshold, the lower the better", 80, 1, 100) + +exit(gen.generate(PACKAGE, "zed_ros_wrapper", "Zed")) diff --git a/zed_ros_wrapper/cfg/Zed.cfg~ b/zed_ros_wrapper/cfg/Zed.cfg~ new file mode 100644 index 0000000..cad5bc2 --- /dev/null +++ b/zed_ros_wrapper/cfg/Zed.cfg~ @@ -0,0 +1,10 @@ +#!/usr/bin/env python +PACKAGE = "zed_ros_wrapper" + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +gen.add("confidence", int_t, 0, "Confidence threshold, the lower the better", 80, 1, 100) + +exit(gen.generate(PACKAGE, "zed_ros_wrapper", "Zed")) diff --git a/zed_ros_wrapper/launch/zed.launch b/zed_ros_wrapper/launch/zed.launch new file mode 100644 index 0000000..24ced3c --- /dev/null +++ b/zed_ros_wrapper/launch/zed.launch @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/zed_ros_wrapper/launch/zed.launch~ b/zed_ros_wrapper/launch/zed.launch~ new file mode 100644 index 0000000..c335e98 --- /dev/null +++ b/zed_ros_wrapper/launch/zed.launch~ @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/zed_ros_wrapper/launch/zed_tf.launch b/zed_ros_wrapper/launch/zed_tf.launch new file mode 100644 index 0000000..bbd6323 --- /dev/null +++ b/zed_ros_wrapper/launch/zed_tf.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/zed_ros_wrapper/package.xml b/zed_ros_wrapper/package.xml new file mode 100644 index 0000000..0bfa159 --- /dev/null +++ b/zed_ros_wrapper/package.xml @@ -0,0 +1,34 @@ + + + zed_wrapper + 1.0.0 + zed_wrapper is a ROS wrapper for the ZED library + for interfacing with the ZED camera. + +STEREOLABS + BSD + + catkin + + tf2_ros + nav_msgs + roscpp + rosconsole + sensor_msgs + opencv + image_transport + dynamic_reconfigure + pcl_conversions + + tf2_ros + nav_msgs + roscpp + rosconsole + sensor_msgs + opencv + image_transport + dynamic_reconfigure + pcl_conversions + + + diff --git a/zed_ros_wrapper/records/record_depth.sh b/zed_ros_wrapper/records/record_depth.sh new file mode 100644 index 0000000..b7f73c3 --- /dev/null +++ b/zed_ros_wrapper/records/record_depth.sh @@ -0,0 +1,2 @@ +#!/bin/bash +rosbag record /camera/rgb/camera_info /camera/depth_registered/image_raw/compressedDepth /camera/rgb/image_rect_color/compressed /tf diff --git a/zed_ros_wrapper/records/record_stereo.sh b/zed_ros_wrapper/records/record_stereo.sh new file mode 100644 index 0000000..97d4cfc --- /dev/null +++ b/zed_ros_wrapper/records/record_stereo.sh @@ -0,0 +1,2 @@ +#!/bin/bash +rosbag record /camera/left/camera_info /camera/right/camera_info /camera/left/image_raw/compressed /camera/right/image_raw/compressed /tf diff --git a/zed_ros_wrapper/src/zed_wrapper_node.cpp b/zed_ros_wrapper/src/zed_wrapper_node.cpp new file mode 100644 index 0000000..e494709 --- /dev/null +++ b/zed_ros_wrapper/src/zed_wrapper_node.cpp @@ -0,0 +1,707 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2015, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + + + + +/**************************************************************************************************** + ** This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. ** + ** You can publish Left+Depth or Left+Right images and camera info on the topics of your choice. ** + ** ** + ** A set of parameters can be specified in the launch file. ** + ****************************************************************************************************/ + +//standard includes +#include +#include +#include +#include +#include +#include +#include +#include // file exists + +//ROS includes +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +//opencv includes +#include +#include +#include +#include + +//Boost includes +#include + +//PCL includes +#include +#include +#include +#include +#include + +//ZED Includes +#include + +using namespace sl::zed; +using namespace std; + + +int confidence; +bool computeDepth; +int openniDepthMode = 0; // 16 bit UC data in mm else 32F in m, for more info http://www.ros.org/reps/rep-0118.html + +// Point cloud thread variables +float* cloud; +bool pointCloudThreadRunning = true; +bool point_cloud_data_processing = false; +string point_cloud_frame_id = ""; +ros::Time point_cloud_time; + +/* \brief Test if a file exist + * \param name : the path to the file + */ +bool file_exist(const std::string& name) { + struct stat buffer; + return (stat(name.c_str(), &buffer) == 0); +} + +/* \brief Image to ros message conversion + * \param img : the image to publish + * \param encodingType : the sensor_msgs::image_encodings encoding type + * \param frameId : the id of the reference frame of the image + * \param t : the ros::Time to stamp the image +*/ +sensor_msgs::ImagePtr imageToROSmsg(cv::Mat img, const std::string encodingType, std::string frameId, ros::Time t){ + sensor_msgs::ImagePtr ptr = boost::make_shared(); + sensor_msgs::Image& imgMessage = *ptr; + imgMessage.header.stamp = t; + imgMessage.header.frame_id = frameId; + imgMessage.height = img.rows; + imgMessage.width = img.cols; + imgMessage.encoding = encodingType; + int num = 1; //for endianness detection + imgMessage.is_bigendian = !(*(char *)&num == 1); + imgMessage.step = img.cols * img.elemSize(); + size_t size = imgMessage.step * img.rows; + imgMessage.data.resize(size); + + if (img.isContinuous()) + memcpy((char*)(&imgMessage.data[0]), img.data, size); + else { + uchar* opencvData = img.data; + uchar* rosData = (uchar*)(&imgMessage.data[0]); + for (unsigned int i = 0; i < img.rows; i++) { + memcpy(rosData, opencvData, imgMessage.step); + rosData += imgMessage.step; + opencvData += img.step; + } + } + return ptr; +} + +/* \brief Publish the pose of the camera with a ros Publisher + * \param Path : the 4x4 matrix representing the camera pose + * \param pub_odom : the publisher object to use + * \param odom_frame_id : the id of the reference frame of the pose + * \param t : the ros::Time to stamp the image + */ +void publishOdom(Eigen::Matrix4f Path, ros::Publisher &pub_odom, string odom_frame_id, ros::Time t) { + nav_msgs::Odometry odom; + odom.header.stamp = t; + odom.header.frame_id = odom_frame_id; + //odom.child_frame_id = "zed_optical_frame"; + + odom.pose.pose.position.y = -Path(0, 3); + odom.pose.pose.position.z = Path(1, 3); + odom.pose.pose.position.x = -Path(2, 3); + Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); + odom.pose.pose.orientation.x = -quat.z(); + odom.pose.pose.orientation.y = -quat.x(); + odom.pose.pose.orientation.z = quat.y(); + odom.pose.pose.orientation.w = quat.w(); + pub_odom.publish(odom); +} + +/* \brief Publish the pose of the camera as a transformation + * \param Path : the 4x4 matrix representing the camera pose + * \param trans_br : the TransformBroadcaster object to use + * \param odometry_transform_frame_id : the id of the transformation + * \param t : the ros::Time to stamp the image + */ +void publishTrackedFrame(Eigen::Matrix4f Path, tf2_ros::TransformBroadcaster &trans_br, string odometry_transform_frame_id, ros::Time t) { + + geometry_msgs::TransformStamped transformStamped; + transformStamped.header.stamp = ros::Time::now(); + transformStamped.header.frame_id = "zed_initial_frame"; + transformStamped.child_frame_id = odometry_transform_frame_id; + transformStamped.transform.translation.x = -Path(2, 3); + transformStamped.transform.translation.y = -Path(0, 3); + transformStamped.transform.translation.z = Path(1, 3); + Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); + transformStamped.transform.rotation.x = -quat.z(); + transformStamped.transform.rotation.y = -quat.x(); + transformStamped.transform.rotation.z = quat.y(); + transformStamped.transform.rotation.w = quat.w(); + trans_br.sendTransform(transformStamped); +} + +/* \brief Publish a cv::Mat image with a ros Publisher + * \param img : the image to publish + * \param pub_img : the publisher object to use + * \param img_frame_id : the id of the reference frame of the image + * \param t : the ros::Time to stamp the image + */ +void publishImage(cv::Mat img, image_transport::Publisher &pub_img, string img_frame_id, ros::Time t) { + pub_img.publish(imageToROSmsg(img + , sensor_msgs::image_encodings::BGR8 + , img_frame_id + , t )); +} + +/* \brief Publish a cv::Mat depth image with a ros Publisher + * \param depth : the depth image to publish + * \param pub_depth : the publisher object to use + * \param depth_frame_id : the id of the reference frame of the depth image + * \param t : the ros::Time to stamp the depth image + */ +void publishDepth(cv::Mat depth, image_transport::Publisher &pub_depth, string depth_frame_id, ros::Time t) { + string encoding; + if(openniDepthMode){ + depth *= 1000.0f; + depth.convertTo(depth, CV_16UC1); // in mm, rounded + encoding = sensor_msgs::image_encodings::TYPE_16UC1; + } + else { + encoding = sensor_msgs::image_encodings::TYPE_32FC1; + } + pub_depth.publish(imageToROSmsg(depth + , encoding + , depth_frame_id + , t )); +} + +/* \brief Publish a pointCloud with a ros Publisher + * \param p_could : the float pointer to point cloud datas + * \param width : the width of the point cloud + * \param height : the height of the point cloud + * \param pub_cloud : the publisher object to use + * \param cloud_frame_id : the id of the reference frame of the point cloud + * \param t : the ros::Time to stamp the point cloud + */ +void publishPointCloud(int width, int height, ros::Publisher &pub_cloud) { + while (pointCloudThreadRunning) { // check if the thread has to continue + if (!point_cloud_data_processing) { // check if datas are available + std::this_thread::sleep_for(std::chrono::milliseconds(2)); // No data, we just wait + continue; + } + pcl::PointCloud point_cloud; + point_cloud.width = width; + point_cloud.height = height; + int size = width*height; + point_cloud.points.resize(size); + int index4 = 0; + float color; + for (int i = 0; i < size; i++) { + if (cloud[index4 + 2] > 0) { // Check if it's an unvalid point, the depth is more than 0 + index4 += 4; + continue; + } + point_cloud.points[i].x = cloud[index4++]; + point_cloud.points[i].y = -cloud[index4++]; + point_cloud.points[i].z = -cloud[index4++]; + color = cloud[index4++]; + uint32_t color_uint = *(uint32_t*) & color; // Convert the color + unsigned char* color_uchar = (unsigned char*) &color_uint; + color_uint = ((uint32_t) color_uchar[0] << 16 | (uint32_t) color_uchar[1] << 8 | (uint32_t) color_uchar[2]); + point_cloud.points[i].rgb = *reinterpret_cast (&color_uint); + } + + //pcl::PointCloud point_cloud_output; + //std::vector indices; + //pcl::removeNaNFromPointCloud(point_cloud, point_cloud_output, indices); + + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(point_cloud, output); // Convert the point cloud to a ROS message + output.header.frame_id = point_cloud_frame_id; // Set the header values of the ROS message + output.header.stamp = point_cloud_time; + pub_cloud.publish(output); + point_cloud_data_processing = false; + } +} + +/* \brief Publish the informations of a camera with a ros Publisher + * \param cam_info_msg : the information message to publish + * \param pub_cam_info : the publisher object to use + * \param t : the ros::Time to stamp the message + */ +void publishCamInfo(sensor_msgs::CameraInfoPtr cam_info_msg, ros::Publisher pub_cam_info, ros::Time t) { + static int seq = 0; + cam_info_msg->header.stamp = t; + cam_info_msg->header.seq = seq; + pub_cam_info.publish(cam_info_msg); + seq++; +} + +/* \brief Get the information of the ZED cameras and store them in an information message + * \param zed : the sl::zed::Camera* pointer to an instance + * \param left_cam_info_msg : the information message to fill with the left camera informations + * \param right_cam_info_msg : the information message to fill with the right camera informations + * \param left_frame_id : the id of the reference frame of the left camera + * \param right_frame_id : the id of the reference frame of the right camera + */ +void fillCamInfo(Camera* zed, sensor_msgs::CameraInfoPtr left_cam_info_msg, sensor_msgs::CameraInfoPtr right_cam_info_msg, + string left_frame_id, string right_frame_id) { + + int width = zed->getImageSize().width; + int height = zed->getImageSize().height; + + sl::zed::StereoParameters* zedParam = zed->getParameters(); + + float baseline = zedParam->baseline * 0.001; // baseline converted in meters + + float fx = zedParam->LeftCam.fx; + float fy = zedParam->LeftCam.fy; + float cx = zedParam->LeftCam.cx; + float cy = zedParam->LeftCam.cy; + + // There is no distorsions since the images are rectified + double k1 = 0; + double k2 = 0; + double k3 = 0; + double p1 = 0; + double p2 = 0; + + left_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; + right_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; + + left_cam_info_msg->D.resize(5); + right_cam_info_msg->D.resize(5); + left_cam_info_msg->D[0] = right_cam_info_msg->D[0] = k1; + left_cam_info_msg->D[1] = right_cam_info_msg->D[1] = k2; + left_cam_info_msg->D[2] = right_cam_info_msg->D[2] = k3; + left_cam_info_msg->D[3] = right_cam_info_msg->D[3] = p1; + left_cam_info_msg->D[4] = right_cam_info_msg->D[4] = p2; + + left_cam_info_msg->K.fill(0.0); + right_cam_info_msg->K.fill(0.0); + left_cam_info_msg->K[0] = right_cam_info_msg->K[0] = fx; + left_cam_info_msg->K[2] = right_cam_info_msg->K[2] = cx; + left_cam_info_msg->K[4] = right_cam_info_msg->K[4] = fy; + left_cam_info_msg->K[5] = right_cam_info_msg->K[5] = cy; + left_cam_info_msg->K[8] = right_cam_info_msg->K[8] = 1.0; + + + + left_cam_info_msg->R.fill(0.0); + right_cam_info_msg->R.fill(0.0); + + left_cam_info_msg->P.fill(0.0); + right_cam_info_msg->P.fill(0.0); + left_cam_info_msg->P[0] = right_cam_info_msg->P[0] = fx; + left_cam_info_msg->P[2] = right_cam_info_msg->P[2] = cx; + left_cam_info_msg->P[5] = right_cam_info_msg->P[5] = fy; + left_cam_info_msg->P[6] = right_cam_info_msg->P[6] = cy; + left_cam_info_msg->P[10] = right_cam_info_msg->P[10] = 1.0; + right_cam_info_msg->P[3] = (-1 * fx * baseline); + + left_cam_info_msg->width = right_cam_info_msg->width = width; + left_cam_info_msg->height = right_cam_info_msg->height = height; + + left_cam_info_msg->header.frame_id = left_frame_id; + right_cam_info_msg->header.frame_id = right_frame_id; +} + +void callback(zed_ros_wrapper::ZedConfig &config, uint32_t level) { + ROS_INFO("Reconfigure confidence : %d", config.confidence); + confidence = config.confidence; +} + +int main(int argc, char **argv) { + // Launch file parameters + int resolution = sl::zed::HD720; + int quality = sl::zed::MODE::PERFORMANCE; + int sensing_mode = sl::zed::SENSING_MODE::STANDARD; + int rate = 30; + string odometry_DB = ""; + + std::string img_topic = "image_rect"; + + // Set the default topic names + string rgb_topic = "rgb/" + img_topic; + string rgb_cam_info_topic = "rgb/camera_info"; + string rgb_frame_id = "/zed_tracked_frame"; + + string left_topic = "left/" + img_topic; + string left_cam_info_topic = "left/camera_info"; + string left_frame_id = "/zed_tracked_frame"; + + string right_topic = "right/" + img_topic; + string right_cam_info_topic = "right/camera_info"; + string right_frame_id = "/zed_tracked_frame"; + + string depth_topic = "depth/"; + if (openniDepthMode) + depth_topic += "image_raw"; + else + depth_topic += img_topic; + + string depth_cam_info_topic = "depth/camera_info"; + string depth_frame_id = "/zed_depth_optical_frame"; + + string point_cloud_topic = "point_cloud/" + img_topic; + string cloud_frame_id = "/zed_tracked_frame"; + + string odometry_topic = "odom"; + string odometry_frame_id = "/zed_initial_frame"; + string odometry_transform_frame_id = "/zed_tracked_frame"; + + ros::init(argc, argv, "zed_depth_stereo_wrapper_node"); + ROS_INFO("ZED_WRAPPER Node initialized"); + + ros::NodeHandle nh; + ros::NodeHandle nh_ns("~"); + + // Get parameters from launch file + nh_ns.getParam("resolution", resolution); + nh_ns.getParam("quality", quality); + nh_ns.getParam("sensing_mode", sensing_mode); + nh_ns.getParam("frame_rate", rate); + nh_ns.getParam("odometry_DB", odometry_DB); + nh_ns.getParam("openni_depth_mode", openniDepthMode); + if (openniDepthMode) + ROS_INFO_STREAM("Openni depth mode activated"); + + nh_ns.getParam("rgb_topic", rgb_topic); + nh_ns.getParam("rgb_cam_info_topic", rgb_cam_info_topic); + nh_ns.getParam("rgb_frame_id", rgb_frame_id); + + nh_ns.getParam("left_topic", left_topic); + nh_ns.getParam("left_cam_info_topic", left_cam_info_topic); + nh_ns.getParam("left_frame_id", left_frame_id); + + nh_ns.getParam("right_topic", right_topic); + nh_ns.getParam("right_cam_info_topic", right_cam_info_topic); + nh_ns.getParam("right_frame_id", right_frame_id); + + nh_ns.getParam("depth_topic", depth_topic); + nh_ns.getParam("depth_cam_info_topic", depth_cam_info_topic); + nh_ns.getParam("depth_frame_id", depth_frame_id); + + nh_ns.getParam("point_cloud_topic", point_cloud_topic); + nh_ns.getParam("cloud_frame_id", cloud_frame_id); + + nh_ns.getParam("odometry_topic", odometry_topic); + nh_ns.getParam("odometry_frame_id", odometry_frame_id); + nh_ns.getParam("odometry_transform_frame_id", odometry_transform_frame_id); + + // Create the ZED object + std::unique_ptr zed; + if (argc == 2) { + zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file + ROS_INFO_STREAM("Reading SVO file : " << argv[1]); + } else { + zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); + ROS_INFO_STREAM("Using ZED Camera"); + } + + // Try to initialize the ZED + sl::zed::InitParams param; + param.unit = UNIT::METER; + param.coordinate = COORDINATE_SYSTEM::RIGHT_HANDED; + param.mode = static_cast (quality); + param.verbose = true; + + ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; + while (err != SUCCESS) { + err = zed->init(param); + ROS_INFO_STREAM(errcode2str(err)); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + } + + zed->grab(static_cast (sensing_mode), true, true, true); //call the first grab + + //Tracking variables + sl::zed::TRACKING_STATE track_state; + sl::zed::TRACKING_FRAME_STATE frame_state; + Eigen::Matrix4f Path; + Path.setIdentity(4, 4); + + //ERRCODE display + dynamic_reconfigure::Server server; + dynamic_reconfigure::Server::CallbackType f; + + f = boost::bind(&callback, _1, _2); + server.setCallback(f); + //confidence was previously 80 + confidence = 80; + + // Get the parameters of the ZED images + int width = zed->getImageSize().width; + int height = zed->getImageSize().height; + ROS_DEBUG_STREAM("Image size : " << width << "x" << height); + + cv::Size cvSize(width, height); + cv::Mat leftImRGB(cvSize, CV_8UC3); + cv::Mat rightImRGB(cvSize, CV_8UC3); + + + // Create all the publishers + // Image publishers + image_transport::ImageTransport it_zed(nh); + image_transport::Publisher pub_rgb = it_zed.advertise(rgb_topic, 1); //rgb + ROS_INFO_STREAM("Advertized on topic " << rgb_topic); + image_transport::Publisher pub_left = it_zed.advertise(left_topic, 1); //left + ROS_INFO_STREAM("Advertized on topic " << left_topic); + image_transport::Publisher pub_right = it_zed.advertise(right_topic, 1); //right + ROS_INFO_STREAM("Advertized on topic " << right_topic); + image_transport::Publisher pub_depth = it_zed.advertise(depth_topic, 1); //depth + ROS_INFO_STREAM("Advertized on topic " << depth_topic); + + //PointCloud publisher + ros::Publisher pub_cloud = nh.advertise (point_cloud_topic, 1); + ROS_INFO_STREAM("Advertized on topic " << point_cloud_topic); + + // Camera info publishers + ros::Publisher pub_rgb_cam_info = nh.advertise(rgb_cam_info_topic, 1); //rgb + ROS_INFO_STREAM("Advertized on topic " << rgb_cam_info_topic); + ros::Publisher pub_left_cam_info = nh.advertise(left_cam_info_topic, 1); //left + ROS_INFO_STREAM("Advertized on topic " << left_cam_info_topic); + ros::Publisher pub_right_cam_info = nh.advertise(right_cam_info_topic, 1); //right + ROS_INFO_STREAM("Advertized on topic " << right_cam_info_topic); + ros::Publisher pub_depth_cam_info = nh.advertise(depth_cam_info_topic, 1); //depth + ROS_INFO_STREAM("Advertized on topic " << depth_cam_info_topic); + + //Odometry publisher + ros::Publisher pub_odom = nh.advertise(odometry_topic, 1); + tf2_ros::TransformBroadcaster transform_odom_broadcaster; + ROS_INFO_STREAM("Advertized on topic " << odometry_topic); + + // Create and fill the camera information messages + sensor_msgs::CameraInfoPtr rgb_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr left_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr right_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr depth_cam_info_msg(new sensor_msgs::CameraInfo()); + fillCamInfo(zed.get(), left_cam_info_msg, right_cam_info_msg, left_frame_id, right_frame_id); + rgb_cam_info_msg = depth_cam_info_msg = left_cam_info_msg; // the reference camera is the Left one (next to the ZED logo) + + ros::Rate loop_rate(rate); + ros::Time old_t = ros::Time::now(); + bool old_image = false; + std::unique_ptr pointCloudThread = nullptr; + pointCloudThread.reset(new std::thread(&publishPointCloud, width, height, std::ref(pub_cloud))); + bool tracking_activated = false; + + try + { + // Main loop + while (ros::ok()) + { + // Check for subscribers + int rgb_SubNumber = pub_rgb.getNumSubscribers(); + int left_SubNumber = pub_left.getNumSubscribers(); + int right_SubNumber = pub_right.getNumSubscribers(); + int depth_SubNumber = pub_depth.getNumSubscribers(); + int cloud_SubNumber = pub_cloud.getNumSubscribers(); + int odom_SubNumber = pub_odom.getNumSubscribers(); + bool runLoop = (rgb_SubNumber + left_SubNumber + right_SubNumber + depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; + // Run the loop only if there is some subscribers + if (true) + { + if (odom_SubNumber > 0 && !tracking_activated) + { //Start the tracking + if (odometry_DB != "" && !file_exist(odometry_DB)) + { + odometry_DB = ""; + ROS_WARN("odometry_DB path doesn't exist or is unreachable."); + } + zed->enableTracking(Path, true, odometry_DB); + tracking_activated = true; + } + else if (odom_SubNumber == 0 && tracking_activated) + { //Stop the tracking + zed->stopTracking(); + tracking_activated = false; + } + computeDepth = (depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; // Detect if one of the subscriber need to have the depth information + ros::Time t = ros::Time::now(); // Get current time + + if (computeDepth) + { + int actual_confidence = zed->getConfidenceThreshold(); + if (actual_confidence != confidence) + { + zed->setConfidenceThreshold(confidence); + } + + old_image = zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); // Ask to compute the depth + } + else + { + old_image = zed->grab(static_cast (sensing_mode), false, false); // Ask to not compute the depth + } + + + if (old_image) + { // Detect if a error occurred (for example: the zed have been disconnected) and re-initialize the ZED + ROS_DEBUG("Wait for a new image to proceed"); + std::this_thread::sleep_for(std::chrono::milliseconds(2)); + if ((t - old_t).toSec() > 5) + { + // delete the old object before constructing a new one + zed.reset(); + if (argc == 2) + { + zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file + ROS_INFO_STREAM("Reading SVO file : " << argv[1]); + } + else + { + zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); + ROS_INFO_STREAM("Using ZED Camera"); + } + + ROS_INFO("Reinit camera"); + ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; + + while (err != SUCCESS) + { + err = zed->init(param); // Try to initialize the ZED + ROS_INFO_STREAM(errcode2str(err)); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + } + + zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); //call the first grab + tracking_activated = false; + + if (odom_SubNumber > 0) { //Start the tracking + if (odometry_DB != "" && !file_exist(odometry_DB)) + { + odometry_DB = ""; + ROS_WARN("odometry_DB path doesn't exist or is unreachable."); + } + + zed->enableTracking(Path, true, odometry_DB); + tracking_activated = true; + } + } + + continue; + } + + old_t = ros::Time::now(); + + // Publish the left == rgb image if someone has subscribed to + //if (left_SubNumber > 0 || rgb_SubNumber > 0) { + // Retrieve RGBA Left image + cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::LEFT)), leftImRGB, CV_RGBA2RGB); // Convert to RGB + // if (left_SubNumber > 0) { + //cout << "Fx: " << left_cam_info_msg->K[0] << "Cx: " << left_cam_info_msg->K[2] << "Fy: " << left_cam_info_msg->K[4] << "Cy: " << left_cam_info_msg->K[5] << endl; + publishCamInfo(left_cam_info_msg, pub_left_cam_info, t); + publishImage(leftImRGB, pub_left, left_frame_id, t); + // } + // if (rgb_SubNumber > 0) { + publishCamInfo(rgb_cam_info_msg, pub_rgb_cam_info, t); + publishImage(leftImRGB, pub_rgb, rgb_frame_id, t); // rgb is the left image + //} + //} + + // Publish the right image if someone has subscribed to + if (right_SubNumber > 0) + { + // Retrieve RGBA Right image + cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::RIGHT)), rightImRGB, CV_RGBA2RGB); // Convert to RGB + publishCamInfo(right_cam_info_msg, pub_right_cam_info, t); + publishImage(rightImRGB, pub_right, right_frame_id, t); + } + + // Publish the depth image if someone has subscribed to + if (depth_SubNumber > 0) + { + publishCamInfo(depth_cam_info_msg, pub_depth_cam_info, t); + publishDepth(slMat2cvMat(zed->retrieveMeasure(sl::zed::MEASURE::DEPTH)), pub_depth, depth_frame_id, t); // in meters + } + + // Publish the point cloud if someone has subscribed to + if ( point_cloud_data_processing == false) //cloud_SubNumber > 0 && + { + // Run the point cloud convertion asynchronously to avoid slowing down all the program + // Retrieve raw pointCloud data + cloud = (float*) zed->retrieveMeasure(sl::zed::MEASURE::XYZRGBA).data; + point_cloud_frame_id = cloud_frame_id; + point_cloud_time = t; + point_cloud_data_processing = true; + } + + // Publish the odometry if someone has subscribed to + if (odom_SubNumber > 0) + { + track_state = zed->getPosition(Path, MAT_TRACKING_TYPE::PATH); + publishOdom(Path, pub_odom, odometry_frame_id, t); + } + + //Note, the frame is published, but its values will only change if someone has subscribed to odom + publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, t); //publish the tracked Frame + + ros::spinOnce(); + loop_rate.sleep(); + } + else + { + + publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, ros::Time::now()); //publish the tracked Frame before the sleep + std::this_thread::sleep_for(std::chrono::milliseconds(10)); // No subscribers, we just wait + } + } + } + catch (...) + { + if (pointCloudThread && pointCloudThreadRunning) + { + pointCloudThreadRunning = false; + pointCloudThread->join(); + } + ROS_ERROR("Unknown error."); + return 1; + } + + if (pointCloudThread && pointCloudThreadRunning) + { + pointCloudThreadRunning = false; + pointCloudThread->join(); + } + + ROS_INFO("Quitting zed_depth_stereo_wrapper_node ...\n"); + + return 0; +} diff --git a/zed_ros_wrapper/src/zed_wrapper_node.cpp~ b/zed_ros_wrapper/src/zed_wrapper_node.cpp~ new file mode 100644 index 0000000..7df17ee --- /dev/null +++ b/zed_ros_wrapper/src/zed_wrapper_node.cpp~ @@ -0,0 +1,707 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2015, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + + + + +/**************************************************************************************************** + ** This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. ** + ** You can publish Left+Depth or Left+Right images and camera info on the topics of your choice. ** + ** ** + ** A set of parameters can be specified in the launch file. ** + ****************************************************************************************************/ + +//standard includes +#include +#include +#include +#include +#include +#include +#include +#include // file exists + +//ROS includes +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +//opencv includes +#include +#include +#include +#include + +//Boost includes +#include + +//PCL includes +#include +#include +#include +#include +#include + +//ZED Includes +#include + +using namespace sl::zed; +using namespace std; + + +int confidence; +bool computeDepth; +int openniDepthMode = 0; // 16 bit UC data in mm else 32F in m, for more info http://www.ros.org/reps/rep-0118.html + +// Point cloud thread variables +float* cloud; +bool pointCloudThreadRunning = true; +bool point_cloud_data_processing = false; +string point_cloud_frame_id = ""; +ros::Time point_cloud_time; + +/* \brief Test if a file exist + * \param name : the path to the file + */ +bool file_exist(const std::string& name) { + struct stat buffer; + return (stat(name.c_str(), &buffer) == 0); +} + +/* \brief Image to ros message conversion + * \param img : the image to publish + * \param encodingType : the sensor_msgs::image_encodings encoding type + * \param frameId : the id of the reference frame of the image + * \param t : the ros::Time to stamp the image +*/ +sensor_msgs::ImagePtr imageToROSmsg(cv::Mat img, const std::string encodingType, std::string frameId, ros::Time t){ + sensor_msgs::ImagePtr ptr = boost::make_shared(); + sensor_msgs::Image& imgMessage = *ptr; + imgMessage.header.stamp = t; + imgMessage.header.frame_id = frameId; + imgMessage.height = img.rows; + imgMessage.width = img.cols; + imgMessage.encoding = encodingType; + int num = 1; //for endianness detection + imgMessage.is_bigendian = !(*(char *)&num == 1); + imgMessage.step = img.cols * img.elemSize(); + size_t size = imgMessage.step * img.rows; + imgMessage.data.resize(size); + + if (img.isContinuous()) + memcpy((char*)(&imgMessage.data[0]), img.data, size); + else { + uchar* opencvData = img.data; + uchar* rosData = (uchar*)(&imgMessage.data[0]); + for (unsigned int i = 0; i < img.rows; i++) { + memcpy(rosData, opencvData, imgMessage.step); + rosData += imgMessage.step; + opencvData += img.step; + } + } + return ptr; +} + +/* \brief Publish the pose of the camera with a ros Publisher + * \param Path : the 4x4 matrix representing the camera pose + * \param pub_odom : the publisher object to use + * \param odom_frame_id : the id of the reference frame of the pose + * \param t : the ros::Time to stamp the image + */ +void publishOdom(Eigen::Matrix4f Path, ros::Publisher &pub_odom, string odom_frame_id, ros::Time t) { + nav_msgs::Odometry odom; + odom.header.stamp = t; + odom.header.frame_id = odom_frame_id; + //odom.child_frame_id = "zed_optical_frame"; + + odom.pose.pose.position.y = -Path(0, 3); + odom.pose.pose.position.z = Path(1, 3); + odom.pose.pose.position.x = -Path(2, 3); + Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); + odom.pose.pose.orientation.x = -quat.z(); + odom.pose.pose.orientation.y = -quat.x(); + odom.pose.pose.orientation.z = quat.y(); + odom.pose.pose.orientation.w = quat.w(); + pub_odom.publish(odom); +} + +/* \brief Publish the pose of the camera as a transformation + * \param Path : the 4x4 matrix representing the camera pose + * \param trans_br : the TransformBroadcaster object to use + * \param odometry_transform_frame_id : the id of the transformation + * \param t : the ros::Time to stamp the image + */ +void publishTrackedFrame(Eigen::Matrix4f Path, tf2_ros::TransformBroadcaster &trans_br, string odometry_transform_frame_id, ros::Time t) { + + geometry_msgs::TransformStamped transformStamped; + transformStamped.header.stamp = ros::Time::now(); + transformStamped.header.frame_id = "zed_initial_frame"; + transformStamped.child_frame_id = odometry_transform_frame_id; + transformStamped.transform.translation.x = -Path(2, 3); + transformStamped.transform.translation.y = -Path(0, 3); + transformStamped.transform.translation.z = Path(1, 3); + Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); + transformStamped.transform.rotation.x = -quat.z(); + transformStamped.transform.rotation.y = -quat.x(); + transformStamped.transform.rotation.z = quat.y(); + transformStamped.transform.rotation.w = quat.w(); + trans_br.sendTransform(transformStamped); +} + +/* \brief Publish a cv::Mat image with a ros Publisher + * \param img : the image to publish + * \param pub_img : the publisher object to use + * \param img_frame_id : the id of the reference frame of the image + * \param t : the ros::Time to stamp the image + */ +void publishImage(cv::Mat img, image_transport::Publisher &pub_img, string img_frame_id, ros::Time t) { + pub_img.publish(imageToROSmsg(img + , sensor_msgs::image_encodings::BGR8 + , img_frame_id + , t )); +} + +/* \brief Publish a cv::Mat depth image with a ros Publisher + * \param depth : the depth image to publish + * \param pub_depth : the publisher object to use + * \param depth_frame_id : the id of the reference frame of the depth image + * \param t : the ros::Time to stamp the depth image + */ +void publishDepth(cv::Mat depth, image_transport::Publisher &pub_depth, string depth_frame_id, ros::Time t) { + string encoding; + if(openniDepthMode){ + depth *= 1000.0f; + depth.convertTo(depth, CV_16UC1); // in mm, rounded + encoding = sensor_msgs::image_encodings::TYPE_16UC1; + } + else { + encoding = sensor_msgs::image_encodings::TYPE_32FC1; + } + pub_depth.publish(imageToROSmsg(depth + , encoding + , depth_frame_id + , t )); +} + +/* \brief Publish a pointCloud with a ros Publisher + * \param p_could : the float pointer to point cloud datas + * \param width : the width of the point cloud + * \param height : the height of the point cloud + * \param pub_cloud : the publisher object to use + * \param cloud_frame_id : the id of the reference frame of the point cloud + * \param t : the ros::Time to stamp the point cloud + */ +void publishPointCloud(int width, int height, ros::Publisher &pub_cloud) { + while (pointCloudThreadRunning) { // check if the thread has to continue + if (!point_cloud_data_processing) { // check if datas are available + std::this_thread::sleep_for(std::chrono::milliseconds(2)); // No data, we just wait + continue; + } + pcl::PointCloud point_cloud; + point_cloud.width = width; + point_cloud.height = height; + int size = width*height; + point_cloud.points.resize(size); + int index4 = 0; + float color; + for (int i = 0; i < size; i++) { + if (cloud[index4 + 2] > 0) { // Check if it's an unvalid point, the depth is more than 0 + index4 += 4; + continue; + } + point_cloud.points[i].x = cloud[index4++]; + point_cloud.points[i].y = -cloud[index4++]; + point_cloud.points[i].z = -cloud[index4++]; + color = cloud[index4++]; + uint32_t color_uint = *(uint32_t*) & color; // Convert the color + unsigned char* color_uchar = (unsigned char*) &color_uint; + color_uint = ((uint32_t) color_uchar[0] << 16 | (uint32_t) color_uchar[1] << 8 | (uint32_t) color_uchar[2]); + point_cloud.points[i].rgb = *reinterpret_cast (&color_uint); + } + + //pcl::PointCloud point_cloud_output; + //std::vector indices; + //pcl::removeNaNFromPointCloud(point_cloud, point_cloud_output, indices); + + sensor_msgs::PointCloud2 output; + pcl::toROSMsg(point_cloud, output); // Convert the point cloud to a ROS message + output.header.frame_id = point_cloud_frame_id; // Set the header values of the ROS message + output.header.stamp = point_cloud_time; + pub_cloud.publish(output); + point_cloud_data_processing = false; + } +} + +/* \brief Publish the informations of a camera with a ros Publisher + * \param cam_info_msg : the information message to publish + * \param pub_cam_info : the publisher object to use + * \param t : the ros::Time to stamp the message + */ +void publishCamInfo(sensor_msgs::CameraInfoPtr cam_info_msg, ros::Publisher pub_cam_info, ros::Time t) { + static int seq = 0; + cam_info_msg->header.stamp = t; + cam_info_msg->header.seq = seq; + pub_cam_info.publish(cam_info_msg); + seq++; +} + +/* \brief Get the information of the ZED cameras and store them in an information message + * \param zed : the sl::zed::Camera* pointer to an instance + * \param left_cam_info_msg : the information message to fill with the left camera informations + * \param right_cam_info_msg : the information message to fill with the right camera informations + * \param left_frame_id : the id of the reference frame of the left camera + * \param right_frame_id : the id of the reference frame of the right camera + */ +void fillCamInfo(Camera* zed, sensor_msgs::CameraInfoPtr left_cam_info_msg, sensor_msgs::CameraInfoPtr right_cam_info_msg, + string left_frame_id, string right_frame_id) { + + int width = zed->getImageSize().width; + int height = zed->getImageSize().height; + + sl::zed::StereoParameters* zedParam = zed->getParameters(); + + float baseline = zedParam->baseline * 0.001; // baseline converted in meters + + float fx = zedParam->LeftCam.fx; + float fy = zedParam->LeftCam.fy; + float cx = zedParam->LeftCam.cx; + float cy = zedParam->LeftCam.cy; + + // There is no distorsions since the images are rectified + double k1 = 0; + double k2 = 0; + double k3 = 0; + double p1 = 0; + double p2 = 0; + + left_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; + right_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; + + left_cam_info_msg->D.resize(5); + right_cam_info_msg->D.resize(5); + left_cam_info_msg->D[0] = right_cam_info_msg->D[0] = k1; + left_cam_info_msg->D[1] = right_cam_info_msg->D[1] = k2; + left_cam_info_msg->D[2] = right_cam_info_msg->D[2] = k3; + left_cam_info_msg->D[3] = right_cam_info_msg->D[3] = p1; + left_cam_info_msg->D[4] = right_cam_info_msg->D[4] = p2; + + left_cam_info_msg->K.fill(0.0); + right_cam_info_msg->K.fill(0.0); + left_cam_info_msg->K[0] = right_cam_info_msg->K[0] = fx; + left_cam_info_msg->K[2] = right_cam_info_msg->K[2] = cx; + left_cam_info_msg->K[4] = right_cam_info_msg->K[4] = fy; + left_cam_info_msg->K[5] = right_cam_info_msg->K[5] = cy; + left_cam_info_msg->K[8] = right_cam_info_msg->K[8] = 1.0; + + + + left_cam_info_msg->R.fill(0.0); + right_cam_info_msg->R.fill(0.0); + + left_cam_info_msg->P.fill(0.0); + right_cam_info_msg->P.fill(0.0); + left_cam_info_msg->P[0] = right_cam_info_msg->P[0] = fx; + left_cam_info_msg->P[2] = right_cam_info_msg->P[2] = cx; + left_cam_info_msg->P[5] = right_cam_info_msg->P[5] = fy; + left_cam_info_msg->P[6] = right_cam_info_msg->P[6] = cy; + left_cam_info_msg->P[10] = right_cam_info_msg->P[10] = 1.0; + right_cam_info_msg->P[3] = (-1 * fx * baseline); + + left_cam_info_msg->width = right_cam_info_msg->width = width; + left_cam_info_msg->height = right_cam_info_msg->height = height; + + left_cam_info_msg->header.frame_id = left_frame_id; + right_cam_info_msg->header.frame_id = right_frame_id; +} + +void callback(zed_ros_wrapper::ZedConfig &config, uint32_t level) { + ROS_INFO("Reconfigure confidence : %d", config.confidence); + confidence = config.confidence; +} + +int main(int argc, char **argv) { + // Launch file parameters + int resolution = sl::zed::HD720; + int quality = sl::zed::MODE::PERFORMANCE; + int sensing_mode = sl::zed::SENSING_MODE::STANDARD; + int rate = 30; + string odometry_DB = ""; + + std::string img_topic = "image_rect"; + + // Set the default topic names + string rgb_topic = "rgb/" + img_topic; + string rgb_cam_info_topic = "rgb/camera_info"; + string rgb_frame_id = "/zed_tracked_frame"; + + string left_topic = "left/" + img_topic; + string left_cam_info_topic = "left/camera_info"; + string left_frame_id = "/zed_tracked_frame"; + + string right_topic = "right/" + img_topic; + string right_cam_info_topic = "right/camera_info"; + string right_frame_id = "/zed_tracked_frame"; + + string depth_topic = "depth/"; + if (openniDepthMode) + depth_topic += "image_raw"; + else + depth_topic += img_topic; + + string depth_cam_info_topic = "depth/camera_info"; + string depth_frame_id = "/zed_depth_optical_frame"; + + string point_cloud_topic = "point_cloud/" + img_topic; + string cloud_frame_id = "/zed_tracked_frame"; + + string odometry_topic = "odom"; + string odometry_frame_id = "/zed_initial_frame"; + string odometry_transform_frame_id = "/zed_tracked_frame"; + + ros::init(argc, argv, "zed_depth_stereo_wrapper_node"); + ROS_INFO("ZED_WRAPPER Node initialized"); + + ros::NodeHandle nh; + ros::NodeHandle nh_ns("~"); + + // Get parameters from launch file + nh_ns.getParam("resolution", resolution); + nh_ns.getParam("quality", quality); + nh_ns.getParam("sensing_mode", sensing_mode); + nh_ns.getParam("frame_rate", rate); + nh_ns.getParam("odometry_DB", odometry_DB); + nh_ns.getParam("openni_depth_mode", openniDepthMode); + if (openniDepthMode) + ROS_INFO_STREAM("Openni depth mode activated"); + + nh_ns.getParam("rgb_topic", rgb_topic); + nh_ns.getParam("rgb_cam_info_topic", rgb_cam_info_topic); + nh_ns.getParam("rgb_frame_id", rgb_frame_id); + + nh_ns.getParam("left_topic", left_topic); + nh_ns.getParam("left_cam_info_topic", left_cam_info_topic); + nh_ns.getParam("left_frame_id", left_frame_id); + + nh_ns.getParam("right_topic", right_topic); + nh_ns.getParam("right_cam_info_topic", right_cam_info_topic); + nh_ns.getParam("right_frame_id", right_frame_id); + + nh_ns.getParam("depth_topic", depth_topic); + nh_ns.getParam("depth_cam_info_topic", depth_cam_info_topic); + nh_ns.getParam("depth_frame_id", depth_frame_id); + + nh_ns.getParam("point_cloud_topic", point_cloud_topic); + nh_ns.getParam("cloud_frame_id", cloud_frame_id); + + nh_ns.getParam("odometry_topic", odometry_topic); + nh_ns.getParam("odometry_frame_id", odometry_frame_id); + nh_ns.getParam("odometry_transform_frame_id", odometry_transform_frame_id); + + // Create the ZED object + std::unique_ptr zed; + if (argc == 2) { + zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file + ROS_INFO_STREAM("Reading SVO file : " << argv[1]); + } else { + zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); + ROS_INFO_STREAM("Using ZED Camera"); + } + + // Try to initialize the ZED + sl::zed::InitParams param; + param.unit = UNIT::METER; + param.coordinate = COORDINATE_SYSTEM::RIGHT_HANDED; + param.mode = static_cast (quality); + param.verbose = true; + + ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; + while (err != SUCCESS) { + err = zed->init(param); + ROS_INFO_STREAM(errcode2str(err)); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + } + + zed->grab(static_cast (sensing_mode), true, true, true); //call the first grab + + //Tracking variables + sl::zed::TRACKING_STATE track_state; + sl::zed::TRACKING_FRAME_STATE frame_state; + Eigen::Matrix4f Path; + Path.setIdentity(4, 4); + + //ERRCODE display + dynamic_reconfigure::Server server; + dynamic_reconfigure::Server::CallbackType f; + + f = boost::bind(&callback, _1, _2); + server.setCallback(f); + //confidence was previously 80 + confidence = 100; + + // Get the parameters of the ZED images + int width = zed->getImageSize().width; + int height = zed->getImageSize().height; + ROS_DEBUG_STREAM("Image size : " << width << "x" << height); + + cv::Size cvSize(width, height); + cv::Mat leftImRGB(cvSize, CV_8UC3); + cv::Mat rightImRGB(cvSize, CV_8UC3); + + + // Create all the publishers + // Image publishers + image_transport::ImageTransport it_zed(nh); + image_transport::Publisher pub_rgb = it_zed.advertise(rgb_topic, 1); //rgb + ROS_INFO_STREAM("Advertized on topic " << rgb_topic); + image_transport::Publisher pub_left = it_zed.advertise(left_topic, 1); //left + ROS_INFO_STREAM("Advertized on topic " << left_topic); + image_transport::Publisher pub_right = it_zed.advertise(right_topic, 1); //right + ROS_INFO_STREAM("Advertized on topic " << right_topic); + image_transport::Publisher pub_depth = it_zed.advertise(depth_topic, 1); //depth + ROS_INFO_STREAM("Advertized on topic " << depth_topic); + + //PointCloud publisher + ros::Publisher pub_cloud = nh.advertise (point_cloud_topic, 1); + ROS_INFO_STREAM("Advertized on topic " << point_cloud_topic); + + // Camera info publishers + ros::Publisher pub_rgb_cam_info = nh.advertise(rgb_cam_info_topic, 1); //rgb + ROS_INFO_STREAM("Advertized on topic " << rgb_cam_info_topic); + ros::Publisher pub_left_cam_info = nh.advertise(left_cam_info_topic, 1); //left + ROS_INFO_STREAM("Advertized on topic " << left_cam_info_topic); + ros::Publisher pub_right_cam_info = nh.advertise(right_cam_info_topic, 1); //right + ROS_INFO_STREAM("Advertized on topic " << right_cam_info_topic); + ros::Publisher pub_depth_cam_info = nh.advertise(depth_cam_info_topic, 1); //depth + ROS_INFO_STREAM("Advertized on topic " << depth_cam_info_topic); + + //Odometry publisher + ros::Publisher pub_odom = nh.advertise(odometry_topic, 1); + tf2_ros::TransformBroadcaster transform_odom_broadcaster; + ROS_INFO_STREAM("Advertized on topic " << odometry_topic); + + // Create and fill the camera information messages + sensor_msgs::CameraInfoPtr rgb_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr left_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr right_cam_info_msg(new sensor_msgs::CameraInfo()); + sensor_msgs::CameraInfoPtr depth_cam_info_msg(new sensor_msgs::CameraInfo()); + fillCamInfo(zed.get(), left_cam_info_msg, right_cam_info_msg, left_frame_id, right_frame_id); + rgb_cam_info_msg = depth_cam_info_msg = left_cam_info_msg; // the reference camera is the Left one (next to the ZED logo) + + ros::Rate loop_rate(rate); + ros::Time old_t = ros::Time::now(); + bool old_image = false; + std::unique_ptr pointCloudThread = nullptr; + pointCloudThread.reset(new std::thread(&publishPointCloud, width, height, std::ref(pub_cloud))); + bool tracking_activated = false; + + try + { + // Main loop + while (ros::ok()) + { + // Check for subscribers + int rgb_SubNumber = pub_rgb.getNumSubscribers(); + int left_SubNumber = pub_left.getNumSubscribers(); + int right_SubNumber = pub_right.getNumSubscribers(); + int depth_SubNumber = pub_depth.getNumSubscribers(); + int cloud_SubNumber = pub_cloud.getNumSubscribers(); + int odom_SubNumber = pub_odom.getNumSubscribers(); + bool runLoop = (rgb_SubNumber + left_SubNumber + right_SubNumber + depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; + // Run the loop only if there is some subscribers + if (true) + { + if (odom_SubNumber > 0 && !tracking_activated) + { //Start the tracking + if (odometry_DB != "" && !file_exist(odometry_DB)) + { + odometry_DB = ""; + ROS_WARN("odometry_DB path doesn't exist or is unreachable."); + } + zed->enableTracking(Path, true, odometry_DB); + tracking_activated = true; + } + else if (odom_SubNumber == 0 && tracking_activated) + { //Stop the tracking + zed->stopTracking(); + tracking_activated = false; + } + computeDepth = (depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; // Detect if one of the subscriber need to have the depth information + ros::Time t = ros::Time::now(); // Get current time + + if (computeDepth) + { + int actual_confidence = zed->getConfidenceThreshold(); + if (actual_confidence != confidence) + { + zed->setConfidenceThreshold(confidence); + } + + old_image = zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); // Ask to compute the depth + } + else + { + old_image = zed->grab(static_cast (sensing_mode), false, false); // Ask to not compute the depth + } + + + if (old_image) + { // Detect if a error occurred (for example: the zed have been disconnected) and re-initialize the ZED + ROS_DEBUG("Wait for a new image to proceed"); + std::this_thread::sleep_for(std::chrono::milliseconds(2)); + if ((t - old_t).toSec() > 5) + { + // delete the old object before constructing a new one + zed.reset(); + if (argc == 2) + { + zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file + ROS_INFO_STREAM("Reading SVO file : " << argv[1]); + } + else + { + zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); + ROS_INFO_STREAM("Using ZED Camera"); + } + + ROS_INFO("Reinit camera"); + ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; + + while (err != SUCCESS) + { + err = zed->init(param); // Try to initialize the ZED + ROS_INFO_STREAM(errcode2str(err)); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + } + + zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); //call the first grab + tracking_activated = false; + + if (odom_SubNumber > 0) { //Start the tracking + if (odometry_DB != "" && !file_exist(odometry_DB)) + { + odometry_DB = ""; + ROS_WARN("odometry_DB path doesn't exist or is unreachable."); + } + + zed->enableTracking(Path, true, odometry_DB); + tracking_activated = true; + } + } + + continue; + } + + old_t = ros::Time::now(); + + // Publish the left == rgb image if someone has subscribed to + //if (left_SubNumber > 0 || rgb_SubNumber > 0) { + // Retrieve RGBA Left image + cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::LEFT)), leftImRGB, CV_RGBA2RGB); // Convert to RGB + // if (left_SubNumber > 0) { + //cout << "Fx: " << left_cam_info_msg->K[0] << "Cx: " << left_cam_info_msg->K[2] << "Fy: " << left_cam_info_msg->K[4] << "Cy: " << left_cam_info_msg->K[5] << endl; + publishCamInfo(left_cam_info_msg, pub_left_cam_info, t); + publishImage(leftImRGB, pub_left, left_frame_id, t); + // } + // if (rgb_SubNumber > 0) { + publishCamInfo(rgb_cam_info_msg, pub_rgb_cam_info, t); + publishImage(leftImRGB, pub_rgb, rgb_frame_id, t); // rgb is the left image + //} + //} + + // Publish the right image if someone has subscribed to + if (right_SubNumber > 0) + { + // Retrieve RGBA Right image + cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::RIGHT)), rightImRGB, CV_RGBA2RGB); // Convert to RGB + publishCamInfo(right_cam_info_msg, pub_right_cam_info, t); + publishImage(rightImRGB, pub_right, right_frame_id, t); + } + + // Publish the depth image if someone has subscribed to + if (depth_SubNumber > 0) + { + publishCamInfo(depth_cam_info_msg, pub_depth_cam_info, t); + publishDepth(slMat2cvMat(zed->retrieveMeasure(sl::zed::MEASURE::DEPTH)), pub_depth, depth_frame_id, t); // in meters + } + + // Publish the point cloud if someone has subscribed to + if ( point_cloud_data_processing == false) //cloud_SubNumber > 0 && + { + // Run the point cloud convertion asynchronously to avoid slowing down all the program + // Retrieve raw pointCloud data + cloud = (float*) zed->retrieveMeasure(sl::zed::MEASURE::XYZRGBA).data; + point_cloud_frame_id = cloud_frame_id; + point_cloud_time = t; + point_cloud_data_processing = true; + } + + // Publish the odometry if someone has subscribed to + if (odom_SubNumber > 0) + { + track_state = zed->getPosition(Path, MAT_TRACKING_TYPE::PATH); + publishOdom(Path, pub_odom, odometry_frame_id, t); + } + + //Note, the frame is published, but its values will only change if someone has subscribed to odom + publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, t); //publish the tracked Frame + + ros::spinOnce(); + loop_rate.sleep(); + } + else + { + + publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, ros::Time::now()); //publish the tracked Frame before the sleep + std::this_thread::sleep_for(std::chrono::milliseconds(10)); // No subscribers, we just wait + } + } + } + catch (...) + { + if (pointCloudThread && pointCloudThreadRunning) + { + pointCloudThreadRunning = false; + pointCloudThread->join(); + } + ROS_ERROR("Unknown error."); + return 1; + } + + if (pointCloudThread && pointCloudThreadRunning) + { + pointCloudThreadRunning = false; + pointCloudThread->join(); + } + + ROS_INFO("Quitting zed_depth_stereo_wrapper_node ...\n"); + + return 0; +} From f36f3f421ca72aa4af857a0b7407c0b616068e6d Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 17:52:50 -0800 Subject: [PATCH 02/28] Update README for Zed --- ...ME-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt index 1a16c7c..758a9b9 100644 --- a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt @@ -83,9 +83,10 @@ $ printf '# OpenCV\nPKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/opencv3/lib/pkgc $ source ~/.bashrc Now Time to Install ZED Camera Drivers: +Download ZED v1.1.0 SDK from: https://www.stereolabs.com/developers/release/archives/ +It is very important v1.1.0 is downloaded and not any other version + First make sure ZED Camera is plugged into a POWERED USB 3.0 Slot -$ cd -$ cd open_ptrack/scripts $ ./ZED_SDK_Linux_x86_64_v1.1.0.run Press q to exit license agreement and accept all defaults From 9d6938212a620f9df849061b6826a2c39f8c0712 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 17:56:31 -0800 Subject: [PATCH 03/28] Removing Duplicate files From e21b31c77ca86048f25cd3ee221992c2a37f2a1b Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 17:59:45 -0800 Subject: [PATCH 04/28] Deleting Duplicate File --- .../ground_based_people_detector_node.cpp~ | 568 ------------------ 1 file changed, 568 deletions(-) delete mode 100644 detection/apps/ground_based_people_detector_node.cpp~ diff --git a/detection/apps/ground_based_people_detector_node.cpp~ b/detection/apps/ground_based_people_detector_node.cpp~ deleted file mode 100644 index e4fd3fe..0000000 --- a/detection/apps/ground_based_people_detector_node.cpp~ +++ /dev/null @@ -1,568 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * ground_based_people_detector_node.cpp - * Created on: Jul 07, 2013 - * Author: Matteo Munaro - * - * ROS node which performs people detection assuming that people stand/walk on a ground plane. - * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class, - * which implements the people detection algorithm described here: - * M. Munaro, F. Basso and E. Menegatti, - * Tracking people within groups with RGB-D data, - * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. - */ - -// ROS includes: -#include -#include - -// PCL includes: -#include -#include -#include -#include -#include -#include -#include - -// Open PTrack includes: -#include -#include -#include - -//Publish Messages -#include -#include -#include -#include -#include -#include - -// Dynamic reconfigure: -#include -#include - -#include -#include - -using namespace opt_msgs; -using namespace sensor_msgs; - -typedef pcl::PointXYZRGB PointT; -typedef pcl::PointCloud PointCloudT; -typedef detection::GroundBasedPeopleDetectorConfig Config; -typedef dynamic_reconfigure::Server ReconfigureServer; - -bool new_cloud_available_flag = false; -PointCloudT::Ptr cloud(new PointCloudT); -bool intrinsics_already_set = false; -Eigen::Matrix3f intrinsics_matrix; -bool update_background = false; - -// Min confidence for people detection: -double min_confidence; -// People detection object -open_ptrack::detection::GroundBasedPeopleDetectionApp people_detector; -// Flag stating if classifiers based on RGB image should be used or not -bool use_rgb; -// Threshold on image luminance. If luminance is over this threshold, classifiers on RGB image are also used -int minimum_luminance; -// If true, sensor tilt angle wrt ground plane is compensated to improve people detection -bool sensor_tilt_compensation; -// Voxel size for downsampling the cloud -double voxel_size; -// If true, do not update the ground plane at every frame -bool lock_ground; -// Frames to use for updating the background -int max_background_frames; -// Main loop rate: -double rate_value; -// Voxel resolution of the octree used to represent the background -double background_octree_resolution; -// Background cloud -PointCloudT::Ptr background_cloud; -// If true, background subtraction is performed -bool background_subtraction; -// Threshold on the ratio of valid points needed for ground estimation -double valid_points_threshold; - -//ros::Publisher extractedRGB_cloud; -//ros::Publisher beforeVoxelFilter_cloud; -//ros::Publisher preProcessed_cloud; -//ros::Publisher groundRemoval_cloud; - -void -cloud_cb (const PointCloudT::ConstPtr& callback_cloud) -{ - *cloud = *callback_cloud; - new_cloud_available_flag = true; -} - -void -cameraInfoCallback (const sensor_msgs::CameraInfo::ConstPtr & msg) -{ - if (!intrinsics_already_set) - { - intrinsics_matrix << msg->K.elems[0], msg->K.elems[1], msg->K.elems[2], - msg->K.elems[3], msg->K.elems[4], msg->K.elems[5], - msg->K.elems[6], msg->K.elems[7], msg->K.elems[8]; - intrinsics_already_set = true; - } -} - -void -updateBackgroundCallback (const std_msgs::String::ConstPtr & msg) -{ - if (msg->data == "update") - { - update_background = true; - } -} - -/* -//Publish Intermediate Point Cloud Calls -void -publishExtractRGBCloud(PointCloudT::Ptr& cloud) -{ - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message - extractedRGB_cloud.publish(output); -} - -void -publishpreProcessedCloud(PointCloudT::Ptr& cloud) -{ - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message - preProcessed_cloud.publish(output); -} - - -void -publishBeforeVoxelFilterCloud(PointCloudT::Ptr& cloud) -{ - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message - beforeVoxelFilter_cloud.publish(output); -} - -void -publishBeforeVoxelFilterDummyCloud(PointCloudT::Ptr& cloud) -{ - //sensor_msgs::PointCloud2 output; - //pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message - //beforeVoxelFilter_cloud.publish(output); -} - - -void -publishgroundRemovalCloud(PointCloudT::Ptr& cloud) -{ - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(*cloud, output); // Convert the point cloud to a ROS message - groundRemoval_cloud.publish(output); -} -*/ - -void -computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros::Rate rate, PointCloudT::Ptr& background_cloud) -{ - std::cout << "Background acquisition..." << std::flush; - - // Create background cloud: - background_cloud->header = cloud->header; - background_cloud->points.clear(); - for (unsigned int i = 0; i < frames; i++) - { - // Point cloud pre-processing (downsampling and filtering): - PointCloudT::Ptr cloud_filtered(new PointCloudT); - cloud_filtered = people_detector.preprocessCloud (cloud, &publishBeforeVoxelFilterDummyCloud); - - *background_cloud += *cloud_filtered; - ros::spinOnce(); - rate.sleep(); - } - - // Voxel grid filtering: - PointCloudT::Ptr cloud_filtered(new PointCloudT); - pcl::VoxelGrid voxel_grid_filter_object; - voxel_grid_filter_object.setInputCloud(background_cloud); - voxel_grid_filter_object.setLeafSize (voxel_size, voxel_size, voxel_size); - voxel_grid_filter_object.filter (*cloud_filtered); - - background_cloud = cloud_filtered; - - // Background saving: - pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); - - std::cout << "done." << std::endl << std::endl; -} - -void -configCb(Config &config, uint32_t level) -{ - valid_points_threshold = config.valid_points_threshold; - - min_confidence = config.ground_based_people_detection_min_confidence; - - people_detector.setHeightLimits (config.minimum_person_height, config.maximum_person_height); - - people_detector.setMaxDistance (config.max_distance); - - people_detector.setSamplingFactor (config.sampling_factor); - - use_rgb = config.use_rgb; - people_detector.setUseRGB (config.use_rgb); - - minimum_luminance = config.minimum_luminance; - - sensor_tilt_compensation = config.sensor_tilt_compensation; - people_detector.setSensorTiltCompensation (config.sensor_tilt_compensation); - - people_detector.setMinimumDistanceBetweenHeads (config.heads_minimum_distance); - - voxel_size = config.voxel_size; - people_detector.setVoxelSize (config.voxel_size); - - people_detector.setDenoisingParameters (config.apply_denoising, config.mean_k_denoising, config.std_dev_denoising); - - lock_ground = config.lock_ground; - - max_background_frames = int(config.background_seconds * rate_value); - - if (config.background_resolution != background_octree_resolution) - { - background_octree_resolution = config.background_resolution; - if (background_subtraction) - people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); - } - - if (config.background_subtraction != background_subtraction) - { - if (config.background_subtraction) - { - update_background = true; - } - else - { - background_subtraction = false; - people_detector.setBackground(false, background_octree_resolution, background_cloud); - } - } -} - -bool -fileExists(const char *fileName) -{ - ifstream infile(fileName); - return infile.good(); -} - -int -main (int argc, char** argv) -{ - ros::init(argc, argv, "ground_based_people_detector"); - ros::NodeHandle nh("~"); - - // Dynamic reconfigure - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - // Read some parameters from launch file: - int ground_estimation_mode; - nh.param("ground_estimation_mode", ground_estimation_mode, 0); - std::string svm_filename; - - nh.param("classifier_file", svm_filename, std::string("./")); - nh.param("use_rgb", use_rgb, true); - nh.param("minimum_luminance", minimum_luminance, 20); - nh.param("ground_based_people_detection_min_confidence", min_confidence, -1.5); - double max_distance; - nh.param("max_distance", max_distance, 50.0); - double min_height; - nh.param("minimum_person_height", min_height, 1.3); - double max_height; - nh.param("maximum_person_height", max_height, 2.3); - // Point cloud sampling factor: - int sampling_factor; - nh.param("sampling_factor", sampling_factor, 1); - std::string pointcloud_topic; - nh.param("pointcloud_topic", pointcloud_topic, std::string("/camera/depth_registered/points")); - std::string output_topic; - nh.param("output_topic", output_topic, std::string("/ground_based_people_detector/detections")); - std::string camera_info_topic; - nh.param("camera_info_topic", camera_info_topic, std::string("/camera/rgb/camera_info")); - nh.param("rate", rate_value, 30.0); - // If true, exploit extrinsic calibration for estimatin the ground plane equation: - bool ground_from_extrinsic_calibration; - nh.param("ground_from_extrinsic_calibration", ground_from_extrinsic_calibration, false); - nh.param("lock_ground", lock_ground, false); - nh.param("sensor_tilt_compensation", sensor_tilt_compensation, false); - nh.param("valid_points_threshold", valid_points_threshold, 0.2); - nh.param("background_subtraction", background_subtraction, false); - nh.param("background_resolution", background_octree_resolution, 0.3); - double background_seconds; // Number of seconds used to acquire the background - nh.param("background_seconds", background_seconds, 3.0); - std::string update_background_topic; // Topic where the background update message is published/read - nh.param("update_background_topic", update_background_topic, std::string("/background_update")); - double heads_minimum_distance; // Minimum distance between two persons' head - nh.param("heads_minimum_distance", heads_minimum_distance, 0.3); - nh.param("voxel_size", voxel_size, 0.06); - bool read_ground_from_file; // Flag stating if the ground should be read from file, if present - nh.param("read_ground_from_file", read_ground_from_file, false); - // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise - bool apply_denoising; - nh.param("apply_denoising", apply_denoising, false); - // MeanK for denoising (the higher it is, the stronger is the filtering) - int mean_k_denoising; - nh.param("mean_k_denoising", mean_k_denoising, 5); - // Standard deviation for denoising (the lower it is, the stronger is the filtering) - double std_dev_denoising; - nh.param("std_dev_denoising", std_dev_denoising, 0.3); - - // Eigen::Matrix3f intrinsics_matrix; - intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics - - // Initialize transforms to be used to correct sensor tilt to identity matrix: - Eigen::Affine3f transform, anti_transform; - transform = transform.Identity(); - anti_transform = transform.inverse(); - - // Subscribers: - ros::Subscriber sub = nh.subscribe(pointcloud_topic, 1, cloud_cb); - ros::Subscriber camera_info_sub = nh.subscribe(camera_info_topic, 1, cameraInfoCallback); - ros::Subscriber update_background_sub = nh.subscribe(update_background_topic, 1, updateBackgroundCallback); - - // Publishers: - ros::Publisher detection_pub; - detection_pub= nh.advertise(output_topic, 3); - - //extractedRGB_cloud = nh.advertise (pointcloud_topic + "/extractedRGB", 3); - //preProcessed_cloud = nh.advertise (pointcloud_topic + "/preProcessed", 3); - //groundRemoval_cloud = nh.advertise (pointcloud_topic + "/groundRemoval", 3); - //beforeVoxelFilter_cloud = nh.advertise (pointcloud_topic + "/beforeVoxelFilter", 3); - - Rois output_rois_; - open_ptrack::opt_utils::Conversions converter; - - ros::Rate rate(rate_value); - while(ros::ok() && !new_cloud_available_flag) - { - ros::spinOnce(); - rate.sleep(); - } - - // Create classifier for people detection: - open_ptrack::detection::PersonClassifier person_classifier; - person_classifier.loadSVMFromFile(svm_filename); // load trained SVM - - // People detection app initialization: - people_detector.setVoxelSize(voxel_size); // set the voxel size - people_detector.setMaxDistance(max_distance); // set maximum distance of people from the sensor - people_detector.setIntrinsics(intrinsics_matrix); // set RGB camera intrinsic parameters - people_detector.setClassifier(person_classifier); // set person classifier - people_detector.setHeightLimits(min_height, max_height); // set person classifier - people_detector.setSamplingFactor(sampling_factor); // set sampling factor - people_detector.setUseRGB(use_rgb); // set if RGB should be used or not - people_detector.setSensorTiltCompensation(sensor_tilt_compensation); // enable point cloud rotation correction - people_detector.setMinimumDistanceBetweenHeads (heads_minimum_distance); // set minimum distance between persons' head - people_detector.setDenoisingParameters (apply_denoising, mean_k_denoising, std_dev_denoising); // set parameters for denoising the point cloud - - // Set up dynamic reconfiguration - ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); - reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); - reconfigure_server_->setCallback(f); - - // Loop until a valid point cloud is found - open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); - bool first_valid_frame = false; - int no_valid_frame_counter = 0; - while (!first_valid_frame) - { - if (!ground_estimator.tooManyNaN(cloud, 1 - valid_points_threshold)) - { // A point cloud is valid if the ratio #NaN / #valid points is lower than a threshold - first_valid_frame = true; - std::cout << "Valid frame found!" << std::endl; - } - else - { - if (++no_valid_frame_counter > 60) - { - std::cout << "No valid frame. Move the camera to a better position..." << std::endl; - no_valid_frame_counter = 0; - } - } - - // Execute callbacks: - ros::spinOnce(); - rate.sleep(); - } - std::cout << std::endl; - - // Initialization for background subtraction: - background_cloud = PointCloudT::Ptr (new PointCloudT); - std::string frame_id = cloud->header.frame_id; - max_background_frames = int(background_seconds * rate_value); - if (background_subtraction) - { - std::cout << "Background subtraction enabled." << std::endl; - - // Try to load the background from file: - if (pcl::io::loadPCDFile ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud) == -1) - { - // File not found, then background acquisition: - computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); - } - else - { - std::cout << "Background read from file." << std::endl << std::endl; - } - - people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); - } - - // Ground estimation: - std::cout << "Ground plane initialization starting..." << std::endl; - ground_estimator.setInputCloud(cloud); - Eigen::VectorXf ground_coeffs = ground_estimator.computeMulticamera(ground_from_extrinsic_calibration, read_ground_from_file, - pointcloud_topic, sampling_factor, voxel_size); - - // Main loop: - while(ros::ok()) - { - if (new_cloud_available_flag) - { - new_cloud_available_flag = false; - // Convert PCL cloud header to ROS header: - std_msgs::Header cloud_header = pcl_conversions::fromPCL(cloud->header); - // If requested, update background: - if (update_background) - { - if (not background_subtraction) - { - std::cout << "Background subtraction enabled." << std::endl; - background_subtraction = true; - } - computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); - people_detector.setBackground (background_subtraction, background_octree_resolution, background_cloud); - - update_background = false; - } - - // Perform people detection on the new cloud: - std::vector > clusters; // vector containing persons clusters - - - people_detector.setInputCloud(cloud); - people_detector.setGround(ground_coeffs); // set floor coefficients - people_detector.compute(clusters);//, &publishExtractRGBCloud, &publishpreProcessedCloud , &publishgroundRemovalCloud, &publishBeforeVoxelFilterCloud); // perform people detection - - // If not lock_ground, update ground coefficients: - if (not lock_ground) - ground_coeffs = people_detector.getGround(); // get updated floor coefficients - - if (sensor_tilt_compensation) - people_detector.getTiltCompensationTransforms(transform, anti_transform); - - /// Write detection message: - DetectionArray::Ptr detection_array_msg(new DetectionArray); - // Set camera-specific fields: - detection_array_msg->header = cloud_header; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - detection_array_msg->intrinsic_matrix.push_back(intrinsics_matrix(i, j)); - detection_array_msg->confidence_type = std::string("hog+svm"); - detection_array_msg->image_type = std::string("rgb"); - - // Add all valid detections: - for(std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) - { - if((!use_rgb) | (people_detector.getMeanLuminance() < minimum_luminance) | // if RGB is not used or luminance is too low - ((people_detector.getMeanLuminance() >= minimum_luminance) & (it->getPersonConfidence() > min_confidence))) // if RGB is used, keep only people with confidence above a threshold - { - // Create detection message: - Detection detection_msg; - converter.Vector3fToVector3(anti_transform * it->getMin(), detection_msg.box_3D.p1); - converter.Vector3fToVector3(anti_transform * it->getMax(), detection_msg.box_3D.p2); - - float head_centroid_compensation = 0.05; - - // theoretical person centroid: - Eigen::Vector3f centroid3d = anti_transform * it->getTCenter(); - Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsics_matrix); - // theoretical person top point: - Eigen::Vector3f top3d = anti_transform * it->getTTop(); - Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsics_matrix); - // theoretical person bottom point: - Eigen::Vector3f bottom3d = anti_transform * it->getTBottom(); - Eigen::Vector3f bottom2d = converter.world2cam(bottom3d, intrinsics_matrix); - float enlarge_factor = 1.1; - float pixel_xc = centroid2d(0); - float pixel_yc = centroid2d(1); - float pixel_height = (bottom2d(1) - top2d(1)) * enlarge_factor; - float pixel_width = pixel_height / 2; - detection_msg.box_2D.x = int(centroid2d(0) - pixel_width/2.0); - detection_msg.box_2D.y = int(centroid2d(1) - pixel_height/2.0); - detection_msg.box_2D.width = int(pixel_width); - detection_msg.box_2D.height = int(pixel_height); - detection_msg.height = it->getHeight(); - detection_msg.confidence = it->getPersonConfidence(); - detection_msg.distance = it->getDistance(); - converter.Vector3fToVector3((1+head_centroid_compensation/centroid3d.norm())*centroid3d, detection_msg.centroid); - converter.Vector3fToVector3((1+head_centroid_compensation/top3d.norm())*top3d, detection_msg.top); - converter.Vector3fToVector3((1+head_centroid_compensation/bottom3d.norm())*bottom3d, detection_msg.bottom); - - // Add message: - detection_array_msg->detections.push_back(detection_msg); - } - } - detection_pub.publish(detection_array_msg); // publish message - } - - // Execute callbacks: - ros::spinOnce(); - rate.sleep(); - } - - // Delete background file from disk: - std::string filename = "/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd"; - if (fileExists (filename.c_str())) - { - remove( filename.c_str() ); - } - - return 0; -} - From 64f48236c0a42a82894d94d2ca0f5df76021115d Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:00:18 -0800 Subject: [PATCH 05/28] Delete ground_based_people_detector_node_zed.cpp~ --- ...ground_based_people_detector_node_zed.cpp~ | 516 ------------------ 1 file changed, 516 deletions(-) delete mode 100644 detection/apps/ground_based_people_detector_node_zed.cpp~ diff --git a/detection/apps/ground_based_people_detector_node_zed.cpp~ b/detection/apps/ground_based_people_detector_node_zed.cpp~ deleted file mode 100644 index 02b4b5e..0000000 --- a/detection/apps/ground_based_people_detector_node_zed.cpp~ +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * ground_based_people_detector_node.cpp - * Created on: Jul 07, 2013 - * Author: Matteo Munaro - * - * ROS node which performs people detection assuming that people stand/walk on a ground plane. - * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class, - * which implements the people detection algorithm described here: - * M. Munaro, F. Basso and E. Menegatti, - * Tracking people within groups with RGB-D data, - * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. - */ - -// ROS includes: -#include -#include - -// PCL includes: -#include -#include -#include -#include -#include -#include -#include - -// Open PTrack includes: -#include -#include -#include - -//Publish Messages -#include -#include -#include -#include -#include -#include - -// Dynamic reconfigure: -#include -#include - -#include -#include - -using namespace opt_msgs; -using namespace sensor_msgs; - -typedef pcl::PointXYZRGB PointT; -typedef pcl::PointCloud PointCloudT; -typedef detection::GroundBasedPeopleDetectorConfig Config; -typedef dynamic_reconfigure::Server ReconfigureServer; - -bool new_cloud_available_flag = false; -PointCloudT::Ptr cloud(new PointCloudT); -bool intrinsics_already_set = false; -Eigen::Matrix3f intrinsics_matrix; -bool update_background = false; - -// Min confidence for people detection: -double min_confidence; -// People detection object -open_ptrack::detection::GroundBasedPeopleDetectionApp people_detector; -// Flag stating if classifiers based on RGB image should be used or not -bool use_rgb; -// Threshold on image luminance. If luminance is over this threshold, classifiers on RGB image are also used -int minimum_luminance; -// If true, sensor tilt angle wrt ground plane is compensated to improve people detection -bool sensor_tilt_compensation; -// Voxel size for downsampling the cloud -double voxel_size; -// If true, do not update the ground plane at every frame -bool lock_ground; -// Frames to use for updating the background -int max_background_frames; -// Main loop rate: -double rate_value; -// Voxel resolution of the octree used to represent the background -double background_octree_resolution; -// Background cloud -PointCloudT::Ptr background_cloud; -// If true, background subtraction is performed -bool background_subtraction; -// Threshold on the ratio of valid points needed for ground estimation -double valid_points_threshold; - -bool camera_info_available_flag = false; - -void -cloud_cb (const PointCloudT::ConstPtr& callback_cloud) -{ - *cloud = *callback_cloud; - new_cloud_available_flag = true; -} - -void -cameraInfoCallback (const sensor_msgs::CameraInfo::ConstPtr & msg) -{ - if (!intrinsics_already_set) - { - intrinsics_matrix << msg->K[0], 0, msg->K[2], 0, msg->K[4], msg->K[5], 0, 0, 1; - people_detector.setIntrinsics(intrinsics_matrix); - camera_info_available_flag = true; - intrinsics_already_set = true; - camera_info_available_flag = true; - } -} - -void -updateBackgroundCallback (const std_msgs::String::ConstPtr & msg) -{ - if (msg->data == "update") - { - update_background = true; - } -} - -void -computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros::Rate rate, PointCloudT::Ptr& background_cloud) -{ - std::cout << "Background acquisition..." << std::flush; - - // Create background cloud: - background_cloud->header = cloud->header; - background_cloud->points.clear(); - for (unsigned int i = 0; i < frames; i++) - { - // Point cloud pre-processing (downsampling and filtering): - PointCloudT::Ptr cloud_filtered(new PointCloudT); - cloud_filtered = people_detector.preprocessCloud (cloud); - - *background_cloud += *cloud_filtered; - ros::spinOnce(); - rate.sleep(); - } - - // Voxel grid filtering: - PointCloudT::Ptr cloud_filtered(new PointCloudT); - pcl::VoxelGrid voxel_grid_filter_object; - voxel_grid_filter_object.setInputCloud(background_cloud); - voxel_grid_filter_object.setLeafSize (voxel_size, voxel_size, voxel_size); - voxel_grid_filter_object.filter (*cloud_filtered); - - background_cloud = cloud_filtered; - - // Background saving: - pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); - - std::cout << "done." << std::endl << std::endl; -} - -void -configCb(Config &config, uint32_t level) -{ - valid_points_threshold = config.valid_points_threshold; - - min_confidence = config.ground_based_people_detection_min_confidence; - - people_detector.setHeightLimits (config.minimum_person_height, config.maximum_person_height); - - people_detector.setMaxDistance (config.max_distance); - - people_detector.setSamplingFactor (config.sampling_factor); - - use_rgb = config.use_rgb; - people_detector.setUseRGB (config.use_rgb); - - minimum_luminance = config.minimum_luminance; - - sensor_tilt_compensation = config.sensor_tilt_compensation; - people_detector.setSensorTiltCompensation (config.sensor_tilt_compensation); - - people_detector.setMinimumDistanceBetweenHeads (config.heads_minimum_distance); - - voxel_size = config.voxel_size; - people_detector.setVoxelSize (config.voxel_size); - - people_detector.setDenoisingParameters (config.apply_denoising, config.mean_k_denoising, config.std_dev_denoising); - - lock_ground = config.lock_ground; - - max_background_frames = int(config.background_seconds * rate_value); - - if (config.background_resolution != background_octree_resolution) - { - background_octree_resolution = config.background_resolution; - if (background_subtraction) - people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); - } - - if (config.background_subtraction != background_subtraction) - { - if (config.background_subtraction) - { - update_background = true; - } - else - { - background_subtraction = false; - people_detector.setBackground(false, background_octree_resolution, background_cloud); - } - } -} - -bool -fileExists(const char *fileName) -{ - ifstream infile(fileName); - return infile.good(); -} - -int -main (int argc, char** argv) -{ - ros::init(argc, argv, "ground_based_people_detector"); - ros::NodeHandle nh("~"); - - // Dynamic reconfigure - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - // Read some parameters from launch file: - int ground_estimation_mode; - nh.param("ground_estimation_mode", ground_estimation_mode, 0); - std::string svm_filename; - - nh.param("classifier_file", svm_filename, std::string("./")); - nh.param("use_rgb", use_rgb, true); - nh.param("minimum_luminance", minimum_luminance, 20); - nh.param("ground_based_people_detection_min_confidence", min_confidence, -1.5); - double max_distance; - nh.param("max_distance", max_distance, 50.0); - double min_height; - nh.param("minimum_person_height", min_height, 1.3); - double max_height; - nh.param("maximum_person_height", max_height, 2.3); - // Point cloud sampling factor: - int sampling_factor; - nh.param("sampling_factor", sampling_factor, 1); - std::string pointcloud_topic; - nh.param("pointcloud_topic", pointcloud_topic, std::string("/camera/depth_registered/points")); - std::string output_topic; - nh.param("output_topic", output_topic, std::string("/ground_based_people_detector/detections")); - std::string camera_info_topic; - nh.param("camera_info_topic", camera_info_topic, std::string("/camera/rgb/camera_info")); - nh.param("rate", rate_value, 30.0); - // If true, exploit extrinsic calibration for estimatin the ground plane equation: - bool ground_from_extrinsic_calibration; - nh.param("ground_from_extrinsic_calibration", ground_from_extrinsic_calibration, false); - nh.param("lock_ground", lock_ground, false); - nh.param("sensor_tilt_compensation", sensor_tilt_compensation, false); - nh.param("valid_points_threshold", valid_points_threshold, 0.2); - nh.param("background_subtraction", background_subtraction, false); - nh.param("background_resolution", background_octree_resolution, 0.3); - double background_seconds; // Number of seconds used to acquire the background - nh.param("background_seconds", background_seconds, 3.0); - std::string update_background_topic; // Topic where the background update message is published/read - nh.param("update_background_topic", update_background_topic, std::string("/background_update")); - double heads_minimum_distance; // Minimum distance between two persons' head - nh.param("heads_minimum_distance", heads_minimum_distance, 0.3); - nh.param("voxel_size", voxel_size, 0.06); - bool read_ground_from_file; // Flag stating if the ground should be read from file, if present - nh.param("read_ground_from_file", read_ground_from_file, false); - // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise - bool apply_denoising; - nh.param("apply_denoising", apply_denoising, false); - // MeanK for denoising (the higher it is, the stronger is the filtering) - int mean_k_denoising; - nh.param("mean_k_denoising", mean_k_denoising, 5); - // Standard deviation for denoising (the lower it is, the stronger is the filtering) - double std_dev_denoising; - nh.param("std_dev_denoising", std_dev_denoising, 0.3); - - // Eigen::Matrix3f intrinsics_matrix; - intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics - - // Initialize transforms to be used to correct sensor tilt to identity matrix: - Eigen::Affine3f transform, anti_transform; - transform = transform.Identity(); - anti_transform = transform.inverse(); - - // Subscribers: - ros::Subscriber sub = nh.subscribe(pointcloud_topic, 1, cloud_cb); - ros::Subscriber camera_info_sub = nh.subscribe(camera_info_topic, 1, cameraInfoCallback); - ros::Subscriber update_background_sub = nh.subscribe(update_background_topic, 1, updateBackgroundCallback); - - // Publishers: - ros::Publisher detection_pub; - detection_pub= nh.advertise(output_topic, 3); - - Rois output_rois_; - open_ptrack::opt_utils::Conversions converter; - - ros::Rate rate(rate_value); - while(ros::ok() && !new_cloud_available_flag) - { - ros::spinOnce(); - rate.sleep(); - } - - // Create classifier for people detection: - open_ptrack::detection::PersonClassifier person_classifier; - person_classifier.loadSVMFromFile(svm_filename); // load trained SVM - - // People detection app initialization: - people_detector.setVoxelSize(voxel_size); // set the voxel size - people_detector.setMaxDistance(max_distance); // set maximum distance of people from the sensor - people_detector.setIntrinsics(intrinsics_matrix); // set RGB camera intrinsic parameters - people_detector.setClassifier(person_classifier); // set person classifier - people_detector.setHeightLimits(min_height, max_height); // set person classifier - people_detector.setSamplingFactor(sampling_factor); // set sampling factor - people_detector.setUseRGB(use_rgb); // set if RGB should be used or not - people_detector.setSensorTiltCompensation(sensor_tilt_compensation); // enable point cloud rotation correction - people_detector.setMinimumDistanceBetweenHeads (heads_minimum_distance); // set minimum distance between persons' head - people_detector.setDenoisingParameters (apply_denoising, mean_k_denoising, std_dev_denoising); // set parameters for denoising the point cloud - - // Set up dynamic reconfiguration - ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); - reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); - reconfigure_server_->setCallback(f); - - // Loop until a valid point cloud is found - open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); - bool first_valid_frame = false; - int no_valid_frame_counter = 0; - while (!first_valid_frame) - { - if (!ground_estimator.tooManyNaN(cloud, 1 - valid_points_threshold)) - { // A point cloud is valid if the ratio #NaN / #valid points is lower than a threshold - first_valid_frame = true; - std::cout << "Valid frame found!" << std::endl; - } - else - { - if (++no_valid_frame_counter > 60) - { - std::cout << "No valid frame. Move the camera to a better position..." << std::endl; - no_valid_frame_counter = 0; - } - } - - // Execute callbacks: - ros::spinOnce(); - rate.sleep(); - } - std::cout << std::endl; - - // Initialization for background subtraction: - background_cloud = PointCloudT::Ptr (new PointCloudT); - std::string frame_id = cloud->header.frame_id; - max_background_frames = int(background_seconds * rate_value); - if (background_subtraction) - { - std::cout << "Background subtraction enabled." << std::endl; - - // Try to load the background from file: - if (pcl::io::loadPCDFile ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud) == -1) - { - // File not found, then background acquisition: - computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); - } - else - { - std::cout << "Background read from file." << std::endl << std::endl; - } - - people_detector.setBackground(background_subtraction, background_octree_resolution, background_cloud); - } - - // Ground estimation: - std::cout << "Ground plane initialization starting..." << std::endl; - ground_estimator.setInputCloud(cloud); - Eigen::VectorXf ground_coeffs = ground_estimator.computeMulticamera(ground_from_extrinsic_calibration, read_ground_from_file, - pointcloud_topic, sampling_factor, voxel_size); - - // Main loop: - while(ros::ok()) - { - if (new_cloud_available_flag) - { - new_cloud_available_flag = false; - // Convert PCL cloud header to ROS header: - std_msgs::Header cloud_header = pcl_conversions::fromPCL(cloud->header); - // If requested, update background: - if (update_background) - { - if (not background_subtraction) - { - std::cout << "Background subtraction enabled." << std::endl; - background_subtraction = true; - } - computeBackgroundCloud (max_background_frames, voxel_size, frame_id, rate, background_cloud); - people_detector.setBackground (background_subtraction, background_octree_resolution, background_cloud); - - update_background = false; - } - - // Perform people detection on the new cloud: - std::vector > clusters; // vector containing persons clusters - - - people_detector.setInputCloud(cloud); - people_detector.setGround(ground_coeffs); // set floor coefficients - people_detector.compute(clusters); // perform people detection - - // If not lock_ground, update ground coefficients: - if (not lock_ground) - ground_coeffs = people_detector.getGround(); // get updated floor coefficients - - if (sensor_tilt_compensation) - people_detector.getTiltCompensationTransforms(transform, anti_transform); - - /// Write detection message: - DetectionArray::Ptr detection_array_msg(new DetectionArray); - // Set camera-specific fields: - detection_array_msg->header = cloud_header; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - detection_array_msg->intrinsic_matrix.push_back(intrinsics_matrix(i, j)); - detection_array_msg->confidence_type = std::string("hog+svm"); - detection_array_msg->image_type = std::string("rgb"); - - // Add all valid detections: - for(std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) - { - if((!use_rgb) | (people_detector.getMeanLuminance() < minimum_luminance) | // if RGB is not used or luminance is too low - ((people_detector.getMeanLuminance() >= minimum_luminance) & (it->getPersonConfidence() > min_confidence))) // if RGB is used, keep only people with confidence above a threshold - { - // Create detection message: - Detection detection_msg; - converter.Vector3fToVector3(anti_transform * it->getMin(), detection_msg.box_3D.p1); - converter.Vector3fToVector3(anti_transform * it->getMax(), detection_msg.box_3D.p2); - - float head_centroid_compensation = 0.05; - - // theoretical person centroid: - Eigen::Vector3f centroid3d = anti_transform * it->getTCenter(); - Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsics_matrix); - // theoretical person top point: - Eigen::Vector3f top3d = anti_transform * it->getTTop(); - Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsics_matrix); - // theoretical person bottom point: - Eigen::Vector3f bottom3d = anti_transform * it->getTBottom(); - Eigen::Vector3f bottom2d = converter.world2cam(bottom3d, intrinsics_matrix); - float enlarge_factor = 1.1; - float pixel_xc = centroid2d(0); - float pixel_yc = centroid2d(1); - float pixel_height = (bottom2d(1) - top2d(1)) * enlarge_factor; - float pixel_width = pixel_height / 2; - detection_msg.box_2D.x = int(centroid2d(0) - pixel_width/2.0); - detection_msg.box_2D.y = int(centroid2d(1) - pixel_height/2.0); - detection_msg.box_2D.width = int(pixel_width); - detection_msg.box_2D.height = int(pixel_height); - detection_msg.height = it->getHeight(); - detection_msg.confidence = it->getPersonConfidence(); - detection_msg.distance = it->getDistance(); - converter.Vector3fToVector3((1+head_centroid_compensation/centroid3d.norm())*centroid3d, detection_msg.centroid); - converter.Vector3fToVector3((1+head_centroid_compensation/top3d.norm())*top3d, detection_msg.top); - converter.Vector3fToVector3((1+head_centroid_compensation/bottom3d.norm())*bottom3d, detection_msg.bottom); - - // Add message: - detection_array_msg->detections.push_back(detection_msg); - } - } - detection_pub.publish(detection_array_msg); // publish message - } - - // Execute callbacks: - ros::spinOnce(); - rate.sleep(); - } - - // Delete background file from disk: - std::string filename = "/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd"; - if (fileExists (filename.c_str())) - { - remove( filename.c_str() ); - } - - return 0; -} - From 0df9336c7c34551506d38c10f4131f87fe5717a8 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:00:49 -0800 Subject: [PATCH 06/28] Delete haardispada_node.cpp~ --- detection/apps/haardispada_node.cpp~ | 490 --------------------------- 1 file changed, 490 deletions(-) delete mode 100644 detection/apps/haardispada_node.cpp~ diff --git a/detection/apps/haardispada_node.cpp~ b/detection/apps/haardispada_node.cpp~ deleted file mode 100644 index 071ff1a..0000000 --- a/detection/apps/haardispada_node.cpp~ +++ /dev/null @@ -1,490 +0,0 @@ -/* -Software License Agreement (BSD License) - -Copyright (c) 2013, Southwest Research Institute -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Southwest Research Institute, nor the names - of its contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - */ - -#include "open_ptrack/detection/haardispada.h" -#include "open_ptrack/opt_utils/conversions.h" - -#include "ros/ros.h" -#include - -//Publish Messages -#include "opt_msgs/RoiRect.h" -#include "opt_msgs/Rois.h" -#include "opt_msgs/DetectionArray.h" -#include "std_msgs/String.h" - -//Time Synchronizer -// NOTE: Time Synchronizer conflicts with QT includes may need to investigate -#include -#include -#include -#include - -//Subscribe Messages -#include -#include -#include -#include -#include - -// Image Transport -#include -#include - -// Used to display OPENCV images -#include -#include - -// Dynamic reconfigure: -#include -#include - -using namespace stereo_msgs; -using namespace message_filters::sync_policies; -using namespace opt_msgs; -using namespace sensor_msgs; -using namespace sensor_msgs::image_encodings; -using sensor_msgs::Image; -using cv_bridge::CvImagePtr; -using std::vector; -using std::string; -using cv::Rect; -using cv::Mat; - -class HaarDispAdaNode -{ - - typedef detection::HaarDispAdaDetectorConfig Config; - typedef dynamic_reconfigure::Server ReconfigureServer; - - private: - // Define Node - ros::NodeHandle node_; - - // Subscribe to Messages - message_filters::Subscriber sub_disparity_; - message_filters::Subscriber sub_image_; - message_filters::Subscriber sub_detections_; - - // Define the Synchronizer - typedef ApproximateTime ApproximatePolicy; - typedef message_filters::Synchronizer ApproximateSync; - boost::shared_ptr approximate_sync_; - - // Messages to Publish - ros::Publisher pub_rois_; - ros::Publisher pub_Color_Image_; - ros::Publisher pub_Disparity_Image_; - ros::Publisher pub_detections_; - - Rois output_rois_; - - enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; - - //Define the Classifier Object - open_ptrack::detection::HaarDispAdaClassifier HDAC_; - - int num_class1; - int num_class0; - int num_TP_class1; - int num_FP_class1; - int num_TP_class0; - int num_FP_class0; - - // Flag stating if classifiers based on disparity image should be used or not: - bool use_disparity; - - // Minimum classifier confidence for people detection: - double min_confidence; - - // Object of class Conversions: - open_ptrack::opt_utils::Conversions converter; - - // Output detections message: - DetectionArray::Ptr output_detection_msg_; - - // Dynamic reconfigure - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - public: - - explicit HaarDispAdaNode(const ros::NodeHandle& nh): - node_(nh) - { - num_class1 = 0; - num_class0 = 0; - num_TP_class1 = 0; - num_FP_class1 = 0; - num_TP_class0 = 0; - num_FP_class0 = 0; - - string nn = ros::this_node::getName(); - int qs; - if(!node_.getParam("Q_Size",qs)){ - qs=3; - } - - if(!node_.getParam("use_disparity", use_disparity)){ - use_disparity = true; - } - - if(!node_.getParam("haar_disp_ada_min_confidence", min_confidence)){ - min_confidence = 3.0; - } - HDAC_.setMinConfidence(min_confidence); - - int NS; - if(!node_.getParam("num_Training_Samples",NS)){ - NS = 350; // default sets asside very little for training - node_.setParam("num_Training_Samples",NS); - } - HDAC_.setMaxSamples(NS); - - output_detection_msg_ = DetectionArray::Ptr(new DetectionArray); - - // Published Messages - pub_rois_ = node_.advertise("/HaarDispAdaOutputRois",qs); - pub_Color_Image_ = node_.advertise("/HaarDispAdaColorImage",qs); - pub_Disparity_Image_= node_.advertise("/HaarDispAdaDisparityImage",qs); - pub_detections_ = node_.advertise("/detector/detections",3); - - // Subscribe to Messages - sub_image_.subscribe(node_,"/Color_Image",qs); - sub_disparity_.subscribe(node_, "/Disparity_Image",qs); - sub_detections_.subscribe(node_,"/input_detections",qs); - - // Sync the Synchronizer - approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), - sub_image_, - sub_disparity_, - sub_detections_)); - - approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNode::imageCb, - this, - _1, - _2, - _3)); - - // Set up dynamic reconfiguration - ReconfigureServer::CallbackType f = boost::bind(&HaarDispAdaNode::configCb, this, _1, _2); - reconfigure_server_.reset(new ReconfigureServer(config_mutex_, node_)); - reconfigure_server_->setCallback(f); - } - - int - get_mode() - { - - int callback_mode; - std::string mode=""; - node_.param("mode", mode, std::string("none")); - if(mode.compare("detect") == 0) - { - callback_mode = DETECT; - } - else if(mode.compare("train")==0) - { - callback_mode = TRAIN; - } - else if(mode.compare("load")==0) - { - callback_mode = LOAD; - } - else if(mode.compare("evaluate")==0) - { - callback_mode = EVALUATE; - } - else if(mode.compare("accumulate")==0) - { - callback_mode = ACCUMULATE; - } - else // default mode is accumulate - { - callback_mode = ACCUMULATE; - } - return(callback_mode); - } - - void - createOutputDetectionsMessage(const DetectionArray::ConstPtr& input_msg, vector confidences, DetectionArray::Ptr& output_msg) - { - // Set camera-specific fields: - output_msg->detections.clear(); - output_msg->header = input_msg->header; - output_msg->intrinsic_matrix = input_msg->intrinsic_matrix; - output_msg->confidence_type = std::string("haar+ada"); - output_msg->image_type = std::string("disparity"); - - // Add all valid detections: - int k = 0; - for(unsigned int i = 0; i < input_msg->detections.size(); i++) - { - if((!use_disparity) | (confidences[i] > min_confidence)) // keep only people with confidence above a threshold if use_disparity == true or all detections if use_disparity == false - { - output_msg->detections.push_back(input_msg->detections[i]); - if (use_disparity) - output_msg->detections[k].confidence = confidences[i]; - - k++; - } - } - } - - void - imageCb(const ImageConstPtr& image_msg, - const DisparityImageConstPtr& disparity_msg, - const opt_msgs::DetectionArray::ConstPtr& detection_msg) - { - // Callback for people detection: - bool label_all; - vector L_in; - vector L_out; - vector C_out; - vector R_in; - vector R_out; - string param_name; - string nn = ros::this_node::getName(); - string cfnm; - int numSamples; - double temp=0.0; - - // check encoding and create an intensity image from disparity image - assert(disparity_msg->image.encoding == image_encodings::TYPE_32FC1); - cv::Mat_ dmatrix(disparity_msg->image.height, - disparity_msg->image.width, - (float*) &disparity_msg->image.data[0], - disparity_msg->image.step); - - if(!node_.getParam("UseMissingDataMask",HDAC_.useMissingDataMask_)){ - HDAC_.useMissingDataMask_ = false; - } - - - // **********************************************************************// - // between these comments lies a hack that accounts for the difference // - // between the focal lengths of the kinect's color camera and ir cameras // - // TODO, parameterize to disable this hack for the stereo data // - bool kinect_disparity_fix; - if(!node_.getParam("Kinect_Disparity_Fix",kinect_disparity_fix)){ - kinect_disparity_fix = false; - } - - if(kinect_disparity_fix){ - int nrows = 434; - int ncols = 579; - int row_offset = (dmatrix.rows - nrows)/2; - int col_offset = (dmatrix.cols - ncols)/2; - cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); - resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); - for(int i=0;i(i,j) = 0.0; - } - } - for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); - } - } - } - // **********************************************************************// - - // take the detection message and create vectors of ROIs and labels - R_in.clear(); - L_in.clear(); - - // Read camera intrinsic parameters: - Eigen::Matrix3f intrinsic_matrix; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - intrinsic_matrix(i, j) = detection_msg->intrinsic_matrix[i * 3 + j]; - - // Read detections: - for(unsigned int i=0;idetections.size();i++) - { - // theoretical person centroid: - Eigen::Vector3f centroid3d(detection_msg->detections[i].centroid.x, detection_msg->detections[i].centroid.y, detection_msg->detections[i].centroid.z); - Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsic_matrix); - - // theoretical person top point: - Eigen::Vector3f top3d(detection_msg->detections[i].top.x, detection_msg->detections[i].top.y, detection_msg->detections[i].top.z); - Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsic_matrix); - - // Define Rect and make sure it is not out of the image: - int h = centroid2d(1) - top2d(1); - int w = h * 2 / 3.0; - int x = std::max(0, int(centroid2d(0) - w / 2.0)); - int y = std::max(0, int(top2d(1))); - h = std::min(int(disparity_msg->image.height - y), int(h)); - w = std::min(int(disparity_msg->image.width - x), int(w)); - - Rect R(x,y,w,h); - R_in.push_back(R); - L_in.push_back(1); - } - - // do the work of the node - switch(get_mode()){ - case DETECT: - // Perform people detection within the input rois: - label_all = true; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,C_out,label_all); - - // Build output detections message: - createOutputDetectionsMessage(detection_msg, C_out, output_detection_msg_); - - output_rois_.rois.clear(); - output_rois_.header.stamp = image_msg->header.stamp; - output_rois_.header.frame_id = image_msg->header.frame_id; -// ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); - for(unsigned int i=0; i= NS){ - param_name = "mode"; - node_.setParam(param_name, std::string("train")); - ROS_ERROR("DONE Accumulating, switching to train mode"); - } - } - break; - case TRAIN: - param_name = "classifier_file"; - node_.param(param_name,cfnm,std::string("/home/clewis/classifiers/test.xml")); - param_name = "HaarDispAdaPrior"; - node_.getParam(param_name,temp); - HDAC_.HaarDispAdaPrior_ = temp; - ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); - HDAC_.train(cfnm); - param_name = "mode"; - node_.setParam("mode", std::string("evaluate")); - ROS_ERROR("DONE TRAINING, switching to evaluate mode"); - break; - case EVALUATE: - { - if(!HDAC_.loaded){ - param_name = "classifier_file"; - node_.param(param_name,cfnm,std::string("test.xml")); - std::cout << "HaarDispAda LOADING " << cfnm.c_str() << std::endl; - HDAC_.load(cfnm); - } - label_all = false; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); - - int total0_in_list=0; - int total1_in_list=0; - int fp_in_list=0; - int tp_in_list=0; - - // count the input labels - for(unsigned int i=0; i Date: Mon, 12 Dec 2016 18:01:13 -0800 Subject: [PATCH 07/28] Delete haardispada_node_sr.cpp~ --- detection/apps/haardispada_node_sr.cpp~ | 462 ------------------------ 1 file changed, 462 deletions(-) delete mode 100644 detection/apps/haardispada_node_sr.cpp~ diff --git a/detection/apps/haardispada_node_sr.cpp~ b/detection/apps/haardispada_node_sr.cpp~ deleted file mode 100644 index 97194e2..0000000 --- a/detection/apps/haardispada_node_sr.cpp~ +++ /dev/null @@ -1,462 +0,0 @@ -/* -Software License Agreement (BSD License) - -Copyright (c) 2013, Southwest Research Institute -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Southwest Research Institute, nor the names - of its contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - */ - -#include "open_ptrack/detection/haardispada.h" -#include "open_ptrack/opt_utils/conversions.h" - -#include "ros/ros.h" -#include - -//Publish Messages -#include "opt_msgs/RoiRect.h" -#include "opt_msgs/Rois.h" -#include "opt_msgs/DetectionArray.h" -#include "std_msgs/String.h" - -//Time Synchronizer -// NOTE: Time Synchronizer conflicts with QT includes may need to investigate -#include -#include -#include -#include - -//Subscribe Messages -#include -#include -#include -#include -#include - -// Image Transport -#include -#include - -// Used to display OPENCV images -#include -#include - -using namespace stereo_msgs; -using namespace message_filters::sync_policies; -using namespace opt_msgs; -using namespace sensor_msgs; -using namespace sensor_msgs::image_encodings; -using sensor_msgs::Image; -using cv_bridge::CvImagePtr; -using std::vector; -using std::string; -using cv::Rect; -using cv::Mat; - -class HaarDispAdaNode -{ - private: - // Define Node - ros::NodeHandle node_; - - // Subscribe to Messages - message_filters::Subscriber sub_disparity_; - message_filters::Subscriber sub_image_; - message_filters::Subscriber sub_detections_; - - // Define the Synchronizer - typedef ApproximateTime ApproximatePolicy; - typedef message_filters::Synchronizer ApproximateSync; - boost::shared_ptr approximate_sync_; - - // Messages to Publish - ros::Publisher pub_rois_; - ros::Publisher pub_Color_Image_; - ros::Publisher pub_Disparity_Image_; - ros::Publisher pub_detections_; - - Rois output_rois_; - - enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; - - //Define the Classifier Object - open_ptrack::detection::HaarDispAdaClassifier HDAC_; - - int num_class1; - int num_class0; - int num_TP_class1; - int num_FP_class1; - int num_TP_class0; - int num_FP_class0; - - // Flag stating if classifiers based on disparity image should be used or not: - bool use_disparity; - - // Minimum classifier confidence for people detection: - double min_confidence; - - // Object of class Conversions: - open_ptrack::opt_utils::Conversions converter; - - // Output detections message: - DetectionArray::Ptr output_detection_msg_; - - public: - - explicit HaarDispAdaNode(const ros::NodeHandle& nh): - node_(nh) - { - num_class1 = 0; - num_class0 = 0; - num_TP_class1 = 0; - num_FP_class1 = 0; - num_TP_class0 = 0; - num_FP_class0 = 0; - - string nn = ros::this_node::getName(); - int qs; - if(!node_.getParam(nn + "/Q_Size",qs)){ - qs=3; - } - - if(!node_.getParam(nn + "/use_disparity", use_disparity)){ - use_disparity = true; - } - - if(!node_.getParam(nn + "/haar_disp_ada_min_confidence", min_confidence)){ - min_confidence = 3.0; - } - HDAC_.setMinConfidence(min_confidence); - - int NS; - if(!node_.getParam(nn + "/num_Training_Samples",NS)){ - NS = 350; // default sets asside very little for training - node_.setParam(nn + "/num_Training_Samples",NS); - } - HDAC_.setMaxSamples(NS); - - output_detection_msg_ = DetectionArray::Ptr(new DetectionArray); - - // Published Messages - pub_rois_ = node_.advertise("HaarDispAdaOutputRois",qs); - pub_Color_Image_ = node_.advertise("HaarDispAdaColorImage",qs); - pub_Disparity_Image_= node_.advertise("HaarDispAdaDisparityImage",qs); - pub_detections_ = node_.advertise("/detector/detections",3); - - // Subscribe to Messages - sub_image_.subscribe(node_,"Color_Image",qs); - sub_disparity_.subscribe(node_, "Disparity_Image",qs); - sub_detections_.subscribe(node_,"input_detections",qs); - - // Sync the Synchronizer - approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), - sub_image_, - sub_disparity_, - sub_detections_)); - - approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNode::imageCb, - this, - _1, - _2, - _3)); - } - - int - get_mode() - { - - int callback_mode; - std::string mode=""; - node_.param(ros::this_node::getName() + "/mode", mode, std::string("none")); - if(mode.compare("detect") == 0) - { - callback_mode = DETECT; - } - else if(mode.compare("train")==0) - { - callback_mode = TRAIN; - } - else if(mode.compare("load")==0) - { - callback_mode = LOAD; - } - else if(mode.compare("evaluate")==0) - { - callback_mode = EVALUATE; - } - else if(mode.compare("accumulate")==0) - { - callback_mode = ACCUMULATE; - } - else // default mode is accumulate - { - callback_mode = ACCUMULATE; - } - return(callback_mode); - } - - void - createOutputDetectionsMessage(const DetectionArray::ConstPtr& input_msg, vector confidences, DetectionArray::Ptr& output_msg) - { - // Set camera-specific fields: - output_msg->detections.clear(); - output_msg->header = input_msg->header; - output_msg->intrinsic_matrix = input_msg->intrinsic_matrix; - output_msg->confidence_type = std::string("haar+ada"); - output_msg->image_type = std::string("disparity"); - - // Add all valid detections: - int k = 0; - for(unsigned int i = 0; i < input_msg->detections.size(); i++) - { - if((!use_disparity) | (confidences[i] > min_confidence)) // keep only people with confidence above a threshold - { - output_msg->detections.push_back(input_msg->detections[i]); - output_msg->detections[k].confidence = confidences[i]; - k++; - } - } - } - - void - imageCb(const ImageConstPtr& image_msg, - const ImageConstPtr& disparity_msg, - const opt_msgs::DetectionArray::ConstPtr& detection_msg) - { - // Callback for people detection: - bool label_all; - vector L_in; - vector L_out; - vector C_out; - vector R_in; - vector R_out; - string param_name; - string nn = ros::this_node::getName(); - string cfnm; - int numSamples; - double temp=0.0; - - // check encoding and create an intensity image from disparity image - assert(disparity_msg->encoding == image_encodings::TYPE_32FC1); - cv::Mat_ dmatrix(disparity_msg->height, - disparity_msg->width, - (float*) &disparity_msg->data[0], - disparity_msg->step); - - if(!node_.getParam(nn + "/UseMissingDataMask",HDAC_.useMissingDataMask_)){ - HDAC_.useMissingDataMask_ = false; - } - - cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(disparity_msg, image_encodings::MONO8); -// cv::Mat distance_image = cv_ptr->image; -// cv::imwrite("/home/matteo/distance.png", distance_image); - - // **********************************************************************// - // between these comments lies a hack that accounts for the difference // - // between the focal lengths of the kinect's color camera and ir cameras // - // TODO, parameterize to disable this hack for the stereo data // - bool kinect_disparity_fix; - if(!node_.getParam(nn + "/Kinect_Disparity_Fix",kinect_disparity_fix)){ - kinect_disparity_fix = false; - } - - if(kinect_disparity_fix){ - int nrows = 434; - int ncols = 579; - int row_offset = (dmatrix.rows - nrows)/2; - int col_offset = (dmatrix.cols - ncols)/2; - cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); - resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); - for(int i=0;i(i,j) = 0.0; - } - } - for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); - } - } - } - // **********************************************************************// - - // take the detection message and create vectors of ROIs and labels - R_in.clear(); - L_in.clear(); - - // Read camera intrinsic parameters: - Eigen::Matrix3f intrinsic_matrix; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - intrinsic_matrix(i, j) = detection_msg->intrinsic_matrix[i * 3 + j]; - - // Read detections: - for(unsigned int i=0;idetections.size();i++) - { - // theoretical person centroid: - Eigen::Vector3f centroid3d(detection_msg->detections[i].centroid.x, detection_msg->detections[i].centroid.y, detection_msg->detections[i].centroid.z); - Eigen::Vector3f centroid2d = converter.world2cam(centroid3d, intrinsic_matrix); - - // theoretical person top point: - Eigen::Vector3f top3d(detection_msg->detections[i].top.x, detection_msg->detections[i].top.y, detection_msg->detections[i].top.z); - Eigen::Vector3f top2d = converter.world2cam(top3d, intrinsic_matrix); - - // Define Rect and make sure it is not out of the image: - int h = centroid2d(1) - top2d(1); - int w = h * 2 / 3.0; - int x = std::max(0, int(centroid2d(0) - w / 2.0)); - int y = std::max(0, int(top2d(1))); - h = std::min(int(disparity_msg->height - y), int(h)); - w = std::min(int(disparity_msg->width - x), int(w)); - - Rect R(x,y,w,h); - R_in.push_back(R); - L_in.push_back(1); - } - - // do the work of the node - switch(get_mode()){ - case DETECT: - // Perform people detection within the input rois: - label_all = true; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,C_out,label_all); - - // Build output detections message: - createOutputDetectionsMessage(detection_msg, C_out, output_detection_msg_); - - output_rois_.rois.clear(); - output_rois_.header.stamp = image_msg->header.stamp; - output_rois_.header.frame_id = image_msg->header.frame_id; -// ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); - for(unsigned int i=0; i= NS){ - param_name = nn + "/mode"; - node_.setParam(param_name, std::string("train")); - ROS_ERROR("DONE Accumulating, switching to train mode"); - } - } - break; - case TRAIN: - param_name = nn + "/classifier_file"; - node_.param(param_name,cfnm,std::string("/home/clewis/classifiers/test.xml")); - param_name = nn + "/HaarDispAdaPrior"; - node_.getParam(param_name,temp); - HDAC_.HaarDispAdaPrior_ = temp; - ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); - HDAC_.train(cfnm); - param_name = nn + "/mode"; - node_.setParam(nn + "/mode", std::string("evaluate")); - ROS_ERROR("DONE TRAINING, switching to evaluate mode"); - break; - case EVALUATE: - { - if(!HDAC_.loaded){ - param_name = nn + "/classifier_file"; - node_.param(param_name,cfnm,std::string("test.xml")); - std::cout << "HaarDispAda LOADING " << cfnm.c_str() << std::endl; - HDAC_.load(cfnm); - } - label_all = false; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); - - int total0_in_list=0; - int total1_in_list=0; - int fp_in_list=0; - int tp_in_list=0; - - // count the input labels - for(unsigned int i=0; i Date: Mon, 12 Dec 2016 18:01:22 -0800 Subject: [PATCH 08/28] Delete haardispada_nodelet.cpp~ --- detection/apps/haardispada_nodelet.cpp~ | 396 ------------------------ 1 file changed, 396 deletions(-) delete mode 100644 detection/apps/haardispada_nodelet.cpp~ diff --git a/detection/apps/haardispada_nodelet.cpp~ b/detection/apps/haardispada_nodelet.cpp~ deleted file mode 100644 index 92420a4..0000000 --- a/detection/apps/haardispada_nodelet.cpp~ +++ /dev/null @@ -1,396 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013, Southwest Research Institute - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * haardispada_nodelet.cpp - * Created on: Nov, 2012 - * - */ - -#include "open_ptrack/detection/haardispada.h" - -#include "ros/ros.h" -#include "nodelet/nodelet.h" -#include - -//Publish Messages -#include "opt_msgs/RoiRect.h" -#include "opt_msgs/Rois.h" -#include "std_msgs/String.h" - -//Time Synchronizer -// NOTE: Time Synchronizer conflicts with QT includes may need to investigate -#include -#include -#include -#include - -//Subscribe Messages -#include -#include -#include -#include -#include - -// Image Transport -#include -#include - -// Used to display OPENCV images -#include -#include - -using namespace stereo_msgs; -using namespace message_filters::sync_policies; -using namespace opt_msgs; -using namespace sensor_msgs; -using namespace sensor_msgs::image_encodings; -using sensor_msgs::Image; -using cv_bridge::CvImagePtr; -using std::vector; -using std::string; -using cv::Rect; -using cv::Mat; - -namespace open_ptrack -{ - namespace detection - { - class HaarDispAdaNodelet: public nodelet::Nodelet - { - private: - // Define Node - ros::NodeHandle node_; - ros::NodeHandle private_node_; - - // Subscribe to Messages - message_filters::Subscriber sub_disparity_; - message_filters::Subscriber sub_image_; - message_filters::Subscriber sub_rois_; - - // Define the Synchronizer - // typedef ApproximateTime ApproximatePolicy; - typedef ExactTime ApproximatePolicy; - typedef message_filters::Synchronizer ApproximateSync; - boost::shared_ptr approximate_sync_; - - // Messages to Publish - ros::Publisher pub_rois_; - ros::Publisher pub_Color_Image_; - ros::Publisher pub_Disparity_Image_; - - Rois output_rois_; - - enum {ACCUMULATE=0, TRAIN, DETECT, EVALUATE, LOAD}; - - //Define the Classifier Object - open_ptrack::detection::HaarDispAdaClassifier HDAC_; - - int num_class1; - int num_class0; - int num_TP_class1; - int num_FP_class1; - int num_TP_class0; - int num_FP_class0; - - public: - virtual void - onInit() - { - node_ = getNodeHandle(); - private_node_ = getPrivateNodeHandle(); - - num_class1 = 0; - num_class0 = 0; - num_TP_class1 = 0; - num_FP_class1 = 0; - num_TP_class0 = 0; - num_FP_class0 = 0; - - int qs; - if(!private_node_.getParam("Q_Size",qs)){ - ROS_ERROR("could not find Q_Size parameter"); - qs=3; - } - - int NS; - if(!private_node_.getParam("num_Training_Samples",NS)){ - NS = 350; // default sets asside very little for training - private_node_.setParam("num_Training_Samples",NS); - } - HDAC_.setMaxSamples(NS); - - // Published Messages - pub_rois_ = node_.advertise("HaarDispAdaOutputRois",qs); - pub_Color_Image_ = node_.advertise("HaarDispAdaColorImage",qs); - pub_Disparity_Image_= node_.advertise("HaarDispAdaDisparityImage",qs); - - // Subscribe to Messages - sub_image_.subscribe(node_,"Color_Image",qs); - sub_disparity_.subscribe(node_, "Disparity_Image",qs); - sub_rois_.subscribe(node_,"input_rois",qs); - - // Sync the Synchronizer - approximate_sync_.reset(new ApproximateSync(ApproximatePolicy(qs), - sub_image_, - sub_disparity_, - sub_rois_)); - - approximate_sync_->registerCallback(boost::bind(&HaarDispAdaNodelet::imageCb, - this, - _1, - _2, - _3)); - } - int - get_mode() - { - - int callback_mode; - std::string mode; - if(!private_node_.getParam("mode", mode)){ - ROS_ERROR("no mode set in HaarDispAda_Nodelet"); - mode = "load"; - } - if(mode.compare("detect") == 0) - { - callback_mode = DETECT; - } - else if(mode.compare("train")==0) - { - callback_mode = TRAIN; - } - else if(mode.compare("load")==0) - { - callback_mode = LOAD; - } - else if(mode.compare("evaluate")==0) - { - callback_mode = EVALUATE; - } - else if(mode.compare("accumulate")==0) - { - callback_mode = ACCUMULATE; - } - else // default mode is accumulate - { - callback_mode = ACCUMULATE; - } - return(callback_mode); - } - void - imageCb(const ImageConstPtr& image_msg, - const DisparityImageConstPtr& disparity_msg, - const RoisConstPtr& rois_msg){ - - bool label_all; - vector L_in; - vector L_out; - vector R_in; - vector R_out; - string param_name; - string cfnm; - int numSamples; - double temp=0.0; - - // check encoding and create an intensity image from disparity image - assert(disparity_msg->image.encoding == image_encodings::TYPE_32FC1); - cv::Mat_ dmatrix(disparity_msg->image.height, - disparity_msg->image.width, - (float*) &disparity_msg->image.data[0], - disparity_msg->image.step); - - if(!private_node_.getParam("UseMissingDataMask",HDAC_.useMissingDataMask_)){ - HDAC_.useMissingDataMask_ = false; - } - - // **********************************************************************// - // between these comments lies a hack that accounts for the difference // - // between the focal lengths of the kinect's color camera and ir cameras // - // TODO, parameterize to disable this hack for the stereo data // - bool kinect_disparity_fix; - if(!private_node_.getParam("Kinect_Disparity_Fix",kinect_disparity_fix)){ - ROS_ERROR("could not find Kinect_Disparity_Fix parameter"); - kinect_disparity_fix = false; - } - - if(kinect_disparity_fix){ - int nrows = 434; - int ncols = 579; - int row_offset = (dmatrix.rows - nrows)/2; - int col_offset = (dmatrix.cols - ncols)/2; - cv::Mat Scaled_Disparity(nrows,ncols,CV_32FC1); - resize(dmatrix,Scaled_Disparity,cv::Size(ncols,nrows),0,0, CV_INTER_NN ); - for(int i=0;i(i,j) = 0.0; - } - } - for(int i=0;i(i+row_offset,j+col_offset) = Scaled_Disparity.at(i,j); - } - } - } - // **********************************************************************// - - // take the region of interest message and create vectors of ROIs and labels - R_in.clear(); - L_in.clear(); - for(unsigned int i=0;irois.size();i++){ - int x = rois_msg->rois[i].x; - int y = rois_msg->rois[i].y; - int w = rois_msg->rois[i].width; - int h = rois_msg->rois[i].height; - int l = rois_msg->rois[i].label; - Rect R(x,y,w,h); - R_in.push_back(R); - L_in.push_back(l); - } - - // do the work of the node - switch(get_mode()){ - case DETECT: - label_all = false; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); - output_rois_.rois.clear(); - output_rois_.header.stamp = image_msg->header.stamp; - output_rois_.header.frame_id = image_msg->header.frame_id; - ROS_INFO("HaarDispAda found %d objects",(int)L_out.size()); - for(unsigned int i=0; i= NS){ - private_node_.setParam("mode", std::string("train")); - ROS_ERROR("DONE Accumulating, switching to train mode"); - } - } - else{ - ROS_ERROR("could not find num_Training_Samples"); - } - break; - case TRAIN: - if(!private_node_.getParam("classifier_file",cfnm)){ - ROS_ERROR("could not find classifier_file param"); - } - if(!private_node_.getParam("HaarDispAdaPrior",temp)){ - ROS_ERROR("could not find HaarDispAdaPrior param"); - } - HDAC_.HaarDispAdaPrior_ = temp; - ROS_ERROR("Submitting %d Samples to Train ouput= %s",HDAC_.numSamples_,cfnm.c_str()); - HDAC_.train(cfnm); - private_node_.setParam("mode", std::string("evaluate")); - ROS_ERROR("DONE TRAINING, switching to evaluate mode"); - break; - case EVALUATE: - { - if(!HDAC_.loaded){ - if(!private_node_.getParam("classifier_file",cfnm)){ - ROS_ERROR("could not find classifier_file param"); - } - ROS_ERROR("HaarDispAda LOADING %s",cfnm.c_str()); - HDAC_.load(cfnm); - } - label_all = false; - HDAC_.detect(R_in,L_in,dmatrix,R_out,L_out,label_all); - - int total0_in_list=0; - int total1_in_list=0; - int fp_in_list=0; - int tp_in_list=0; - - // count the input labels - for(unsigned int i=0; i -// PLUGINLIB_DECLARE_CLASS(pkg,class_name,class_type,base_class_type) -PLUGINLIB_DECLARE_CLASS(HaarDispAda,haardispada_nodelet, open_ptrack::detection::HaarDispAdaNodelet, nodelet::Nodelet) From abfa1cf29f0f8f202ec35ec0c80e38c39ad35865 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:02:43 -0800 Subject: [PATCH 09/28] Delete CMakeLists.txt~ --- detection/CMakeLists.txt~ | 66 --------------------------------------- 1 file changed, 66 deletions(-) delete mode 100644 detection/CMakeLists.txt~ diff --git a/detection/CMakeLists.txt~ b/detection/CMakeLists.txt~ deleted file mode 100644 index c5c43a7..0000000 --- a/detection/CMakeLists.txt~ +++ /dev/null @@ -1,66 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(detection) -set(CMAKE_BUILD_TYPE RelWithDebInfo) -find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure) - -find_package(VTK REQUIRED) -if(VTK_FOUND) - include_directories(${VTK_INCLUDE_DIRS}) - link_directories(${VTK_LIBRARY_DIRS}) - add_definitions(${VTK_DEFINITIONS}) - include (${VTK_USE_FILE}) -endif() - -find_package(OpenCV 2.4 REQUIRED) -include_directories(${OpenCV_INCLUDE_DIRS}) -link_directories(${OpenCV_LIB_DIR}) -add_definitions(${OpenCV_DEFINITIONS}) - - -find_package(Eigen REQUIRED) -include_directories(${Eigen_INCLUDE_DIRS} include ${catkin_INCLUDE_DIRS}) - -find_package(PCL 1.7 REQUIRED) -include_directories(BEFORE ${PCL_INCLUDE_DIRS}) -link_directories(${PCL_LIBRARY_DIRS}) -add_definitions(${PCL_DEFINITIONS}) -MESSAGE("PCL_INCLUDE_DIRS\n${PCL_INCLUDE_DIRS}\n") -MESSAGE("PCL_LIBRARY_DIRS\n${PCL_LIBRARY_DIRS}\n") -MESSAGE("PCL_DEFINITIONS\n${PCL_DEFINITIONS}\n") -MESSAGE("PCL_COMMON_LIBRARIES\n${PCL_COMMON_LIBRARIES}\n") -if (NOT PCL_FOUND) - MESSAGE(FATAL_ERROR "PCL not found.\n") -endif (NOT PCL_FOUND) - -# Dynamic reconfigure support -generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg) - -include_directories(cfg/cpp) - -catkin_package( - INCLUDE_DIRS include - LIBRARIES ${PROJECT_NAME} - CATKIN_DEPENDS roscpp pcl_ros pcl_conversions opt_utils opt_msgs -) - - -add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp) -add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) -target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS}) - -add_executable(ground_based_people_detector apps/ground_based_people_detector_node.cpp) -SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) -target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) - -add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) -SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) -target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) - -add_library(haar_disp_ada src/haardispada.cpp apps/haardispada_nodelet.cpp) -target_link_libraries(haar_disp_ada ${catkin_LIBRARIES}) - -add_executable(HaarDispAda174 apps/haardispada_node.cpp) -target_link_libraries(HaarDispAda174 haar_disp_ada ${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS} boost_system boost_signals) - -add_executable(HaarDispAda174_sr apps/haardispada_node_sr.cpp) -target_link_libraries(HaarDispAda174_sr haar_disp_ada ${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS} boost_system boost_signals) From 7c85bf3b3cfad19ce94fe40e6c323f4f55d4899a Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:04:01 -0800 Subject: [PATCH 10/28] Delete ground_based_people_detector_zed.yaml~ --- .../ground_based_people_detector_zed.yaml~ | 52 ------------------- 1 file changed, 52 deletions(-) delete mode 100644 detection/conf/ground_based_people_detector_zed.yaml~ diff --git a/detection/conf/ground_based_people_detector_zed.yaml~ b/detection/conf/ground_based_people_detector_zed.yaml~ deleted file mode 100644 index 550747f..0000000 --- a/detection/conf/ground_based_people_detector_zed.yaml~ +++ /dev/null @@ -1,52 +0,0 @@ -####################### -## Ground estimation ## -####################### -# Ground estimation mode - 0:manual, 1:semi-auto, 2:auto with user validation, 3:fully automatic (default 0) -ground_estimation_mode: 0 -# Flag stating if the ground should be read from file, if present: -read_ground_from_file: false -# Flag that locks the ground plane update: -lock_ground: false -# Threshold on the ratio of valid points needed for ground estimation: -valid_points_threshold: 0.1 - -############################ -## Background subtraction ## -############################ -# Flag enabling/disabling background subtraction: -background_subtraction: false -# Resolution of the octree representing the background: -background_resolution: 0.3 -# Seconds to use to learn the background: -background_seconds: 3.0 - -############################################## -## Ground based people detection parameters ## -############################################## -# Minimum detection confidence: -ground_based_people_detection_min_confidence: -5 -# Minimum person height: -minimum_person_height: 0.5 -# Maximum person height: -maximum_person_height: 2.3 -# Kinect's best range is 5.0 m, greater is possible w/ larger error -max_distance: 10.0 -# Point cloud downsampling factor: 4 -sampling_factor: 4 -# Flag stating if classifiers based on RGB image should be used or not: -use_rgb: true -# Threshold on image luminance. If luminance is over this threhsold, classifiers on RGB image are also used: -minimum_luminance: 120 -# If true, sensor tilt angle wrt ground plane is compensated to improve people detection: -sensor_tilt_compensation: flase -# Minimum distance between two persons: -heads_minimum_distance: 0.3 -# Voxel size used to downsample the point cloud (lower: detection slower but more precise; higher: detection faster but less precise): -voxel_size: .06 -# Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: -apply_denoising: false -# MeanK for denoising (the higher it is, the stronger is the filtering): -mean_k_denoising: 5 -# Standard deviation for denoising (the lower it is, the stronger is the filtering): -std_dev_denoising: 0.3 - From dca96d47edc1aae334a65b629e515c726ddf8339 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:04:28 -0800 Subject: [PATCH 11/28] Delete ground_based_people_detection_app.hpp~ --- .../ground_based_people_detection_app.hpp~ | 640 ------------------ 1 file changed, 640 deletions(-) delete mode 100644 detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ diff --git a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ deleted file mode 100644 index 2be5728..0000000 --- a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp~ +++ /dev/null @@ -1,640 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * ground_based_people_detection_app.hpp - * Created on: Nov 30, 2012 - * Author: Matteo Munaro - */ - -#ifndef OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ -#define OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ - -#include - -template -open_ptrack::detection::GroundBasedPeopleDetectionApp::GroundBasedPeopleDetectionApp () -{ -// denoising_viewer_ = pcl::visualization::PCLVisualizer::Ptr (new pcl::visualization::PCLVisualizer("filtering_viewer")); - - rgb_image_ = pcl::PointCloud::Ptr(new pcl::PointCloud); - - // set default values for optional parameters: - sampling_factor_ = 1; - voxel_size_ = 0.06; - max_distance_ = 50.0; - vertical_ = false; - head_centroid_ = true; - min_height_ = 1.3; - max_height_ = 2.3; - min_points_ = 30; // this value is adapted to the voxel size in method "compute" - max_points_ = 5000; // this value is adapted to the voxel size in method "compute" - dimension_limits_set_ = false; - heads_minimum_distance_ = 0.3; - use_rgb_ = true; - mean_luminance_ = 0.0; - sensor_tilt_compensation_ = false; - background_subtraction_ = false; - - // set flag values for mandatory parameters: - sqrt_ground_coeffs_ = std::numeric_limits::quiet_NaN(); - person_classifier_set_flag_ = false; - frame_counter_ = 0; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setInputCloud (PointCloudPtr& cloud) -{ - cloud_ = cloud; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setGround (Eigen::VectorXf& ground_coeffs) -{ - ground_coeffs_ = ground_coeffs; - sqrt_ground_coeffs_ = (ground_coeffs - Eigen::Vector4f(0.0f, 0.0f, 0.0f, ground_coeffs(3))).norm(); -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setSamplingFactor (int sampling_factor) -{ - sampling_factor_ = sampling_factor; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setVoxelSize (float voxel_size) -{ - voxel_size_ = voxel_size; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setDenoisingParameters (bool apply_denoising, int mean_k_denoising, float std_dev_denoising) -{ - apply_denoising_ = apply_denoising; - mean_k_denoising_ = mean_k_denoising; - std_dev_denoising_ = std_dev_denoising; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setMaxDistance (float max_distance) -{ - max_distance_ = max_distance; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setIntrinsics (Eigen::Matrix3f intrinsics_matrix) -{ - intrinsics_matrix_ = intrinsics_matrix; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setClassifier (open_ptrack::detection::PersonClassifier person_classifier) -{ - person_classifier_ = person_classifier; - person_classifier_set_flag_ = true; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setSensorPortraitOrientation (bool vertical) -{ - vertical_ = vertical; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setHeightLimits (float min_height, float max_height) -{ - min_height_ = min_height; - max_height_ = max_height; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setDimensionLimits (int min_points, int max_points) -{ - min_points_ = min_points; - max_points_ = max_points; - dimension_limits_set_ = true; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setMinimumDistanceBetweenHeads (float heads_minimum_distance) -{ - heads_minimum_distance_= heads_minimum_distance; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setHeadCentroid (bool head_centroid) -{ - head_centroid_ = head_centroid; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setSensorTiltCompensation (bool sensor_tilt_compensation) -{ - sensor_tilt_compensation_ = sensor_tilt_compensation; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setUseRGB (bool use_rgb) -{ - use_rgb_ = use_rgb; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::setBackground (bool background_subtraction, float background_octree_resolution, PointCloudPtr& background_cloud) -{ - background_subtraction_ = background_subtraction; - - background_octree_ = new pcl::octree::OctreePointCloud(background_octree_resolution); - background_octree_->defineBoundingBox(-max_distance_/2, -max_distance_/2, 0.0, max_distance_/2, max_distance_/2, max_distance_); - background_octree_->setInputCloud (background_cloud); - background_octree_->addPointsFromInputCloud (); -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::getHeightLimits (float& min_height, float& max_height) -{ - min_height = min_height_; - max_height = max_height_; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::getDimensionLimits (int& min_points, int& max_points) -{ - min_points = min_points_; - max_points = max_points_; -} - -template float -open_ptrack::detection::GroundBasedPeopleDetectionApp::getMinimumDistanceBetweenHeads () -{ - return (heads_minimum_distance_); -} - -template Eigen::VectorXf -open_ptrack::detection::GroundBasedPeopleDetectionApp::getGround () -{ - if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_) - { - PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::getGround] Floor parameters have not been set or they are not valid!\n"); - } - return (ground_coeffs_); -} - -template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr -open_ptrack::detection::GroundBasedPeopleDetectionApp::getNoGroundCloud () -{ - return (no_ground_cloud_); -} - -template float -open_ptrack::detection::GroundBasedPeopleDetectionApp::getMeanLuminance () -{ - return (mean_luminance_); -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::getTiltCompensationTransforms (Eigen::Affine3f& transform, Eigen::Affine3f& anti_transform) -{ - transform = transform_; - anti_transform = anti_transform_; -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::extractRGBFromPointCloud (PointCloudPtr input_cloud, pcl::PointCloud::Ptr& output_cloud) -{ - // Extract RGB information from a point cloud and output the corresponding RGB point cloud - output_cloud->points.resize(input_cloud->height*input_cloud->width); - output_cloud->width = input_cloud->width; - output_cloud->height = input_cloud->height; - - pcl::RGB rgb_point; - for (int j = 0; j < input_cloud->width; j++) - { - for (int i = 0; i < input_cloud->height; i++) - { - rgb_point.r = (*input_cloud)(j,i).r; - rgb_point.g = (*input_cloud)(j,i).g; - rgb_point.b = (*input_cloud)(j,i).b; - (*output_cloud)(j,i) = rgb_point; - } - } -} - -template void -open_ptrack::detection::GroundBasedPeopleDetectionApp::swapDimensions (pcl::PointCloud::Ptr& cloud) -{ - pcl::PointCloud::Ptr output_cloud(new pcl::PointCloud); - output_cloud->points.resize(cloud->height*cloud->width); - output_cloud->width = cloud->height; - output_cloud->height = cloud->width; - for (int i = 0; i < cloud->width; i++) - { - for (int j = 0; j < cloud->height; j++) - { - (*output_cloud)(j,i) = (*cloud)(cloud->width - i - 1, j); - } - } - cloud = output_cloud; -} - -template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr -open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateCloud(PointCloudPtr cloud, Eigen::Affine3f transform ) -{ - PointCloudPtr rotated_cloud (new PointCloud); - pcl::transformPointCloud(*cloud, *rotated_cloud, transform); - rotated_cloud->header.frame_id = cloud->header.frame_id; - - return rotated_cloud; - -} - -template Eigen::VectorXf -open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateGround(Eigen::VectorXf ground_coeffs, Eigen::Affine3f transform){ - - Eigen::VectorXf ground_coeffs_new; - - // Create a cloud with three points on the input ground plane: - pcl::PointCloud::Ptr dummy (new pcl::PointCloud); - - pcl::PointXYZRGB first = pcl::PointXYZRGB(0.0,0.0,0.0); - first.x = 1.0; - pcl::PointXYZRGB second = pcl::PointXYZRGB(0.0,0.0,0.0); - second.y = 1.0; - pcl::PointXYZRGB third = pcl::PointXYZRGB(0.0,0.0,0.0); - third.x = 1.0; - third.y = 1.0; - - dummy->points.push_back( first ); - dummy->points.push_back( second ); - dummy->points.push_back( third ); - - for(uint8_t i = 0; i < dummy->points.size(); i++ ) - { // Find z given x and y: - dummy->points[i].z = (double) ( -ground_coeffs_(3) -(ground_coeffs_(0) * dummy->points[i].x) - (ground_coeffs_(1) * dummy->points[i].y) ) / ground_coeffs_(2); - } - - // Rotate them: - dummy = rotateCloud(dummy, transform); - - // Compute new ground coeffs: - std::vector indices; - for(unsigned int i = 0; i < dummy->points.size(); i++) - { - indices.push_back(i); - } - pcl::SampleConsensusModelPlane model_plane(dummy); - model_plane.computeModelCoefficients(indices, ground_coeffs_new); - - return ground_coeffs_new; -} - -template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr -open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud)//, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) -{ - // Downsample of sampling_factor in every dimension: - PointCloudPtr cloud_downsampled(new PointCloud); - PointCloudPtr cloud_denoised(new PointCloud); - if (sampling_factor_ != 1) - { - cloud_downsampled->width = (input_cloud->width)/sampling_factor_; - cloud_downsampled->height = (input_cloud->height)/sampling_factor_; - cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width); - cloud_downsampled->is_dense = input_cloud->is_dense; - cloud_downsampled->header = input_cloud->header; - for (int j = 0; j < cloud_downsampled->width; j++) - { - for (int i = 0; i < cloud_downsampled->height; i++) - { - (*cloud_downsampled)(j,i) = (*input_cloud)(sampling_factor_*j,sampling_factor_*i); - } - } - } - - if (apply_denoising_) - { - // Denoising with statistical filtering: - pcl::StatisticalOutlierRemoval sor; - if (sampling_factor_ != 1) - sor.setInputCloud (cloud_downsampled); - else - sor.setInputCloud (input_cloud); - sor.setMeanK (mean_k_denoising_); - sor.setStddevMulThresh (std_dev_denoising_); - sor.filter (*cloud_denoised); - } - - // // Denoising viewer - // int v1(0); - // int v2(0); - // denoising_viewer_->removeAllPointClouds(v1); - // denoising_viewer_->removeAllPointClouds(v2); - // denoising_viewer_->createViewPort (0.0, 0.0, 0.5, 1.0, v1); - // pcl::visualization::PointCloudColorHandlerRGBField rgb(input_cloud); - // denoising_viewer_->addPointCloud (input_cloud, rgb, "original", v1); - // denoising_viewer_->createViewPort (0.5, 0.0, 1.0, 1.0, v2); - // pcl::visualization::PointCloudColorHandlerRGBField rgb2(cloud_denoised); - // denoising_viewer_->addPointCloud (cloud_denoised, rgb2, "denoised", v2); - // denoising_viewer_->spinOnce(); - - // Voxel grid filtering: - PointCloudPtr cloud_filtered(new PointCloud); - pcl::VoxelGrid voxel_grid_filter_object; - if (apply_denoising_) - { - //std::cout << "APPLY DENOISING" << std::endl; - voxel_grid_filter_object.setInputCloud(cloud_denoised); - } - else - { - if (sampling_factor_ != 1) - { - //std::cout << "APPLY DownSampled" << std::endl; - voxel_grid_filter_object.setInputCloud(cloud_downsampled); - } - else - { - //std::cout << "Use Input Cloud" << std::endl; - voxel_grid_filter_object.setInputCloud(input_cloud); - } - } - - //std::cout << "Voxel Size " << voxel_size_ << " Max Distance " << max_distance_ << std::endl; - - voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); - voxel_grid_filter_object.setFilterFieldName("z"); - voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); - voxel_grid_filter_object.filter (*cloud_filtered); - - //(*publishBeforeVoxelFilterCloud)(cloud_filtered); - - return cloud_filtered; -} - -template bool -open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters)//, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) -{ - frame_counter_++; - - // Define if debug info should be written or not for this frame: - bool debug_flag = false; - if ((frame_counter_ % 60) == 0) - { - debug_flag = true; - } - - // Check if all mandatory variables have been set: - if (debug_flag) - { - if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_) - { - PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Floor parameters have not been set or they are not valid!\n"); - return (false); - } - if (cloud_ == NULL) - { - PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Input cloud has not been set!\n"); - return (false); - } - if (intrinsics_matrix_(0) == 0) - { - PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Camera intrinsic parameters have not been set!\n"); - return (false); - } - if (!person_classifier_set_flag_) - { - PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Person classifier has not been set!\n"); - return (false); - } - } - - if (!dimension_limits_set_) // if dimension limits have not been set by the user - { - // Adapt thresholds for clusters points number to the voxel size: - max_points_ = int(float(max_points_) * std::pow(0.06/voxel_size_, 2)); - if (voxel_size_ > 0.06) - min_points_ = int(float(min_points_) * std::pow(0.06/voxel_size_, 2)); - } - - // Fill rgb image: - rgb_image_->points.clear(); // clear RGB pointcloud - extractRGBFromPointCloud(cloud_, rgb_image_); // fill RGB pointcloud - - //TODO publish here extractedRGB - //(*publishExtractRGB)(cloud_); - - // Point cloud pre-processing (downsampling and filtering): - PointCloudPtr cloud_filtered(new PointCloud); - cloud_filtered = preprocessCloud (cloud_, publishBeforeVoxelFilterCloud); - - //TODO publish here preProcessed - //(*publishPreProcessed)(cloud_filtered); - - if (use_rgb_) - { - // Compute mean luminance: - int n_points = cloud_filtered->points.size(); - double sumR, sumG, sumB = 0.0; - for (int j = 0; j < cloud_filtered->width; j++) - { - for (int i = 0; i < cloud_filtered->height; i++) - { - sumR += (*cloud_filtered)(j,i).r; - sumG += (*cloud_filtered)(j,i).g; - sumB += (*cloud_filtered)(j,i).b; - } - } - mean_luminance_ = 0.3 * sumR/n_points + 0.59 * sumG/n_points + 0.11 * sumB/n_points; - // mean_luminance_ = 0.2126 * sumR/n_points + 0.7152 * sumG/n_points + 0.0722 * sumB/n_points; - } - - // Ground removal and update: - pcl::IndicesPtr inliers(new std::vector); - boost::shared_ptr > ground_model(new pcl::SampleConsensusModelPlane(cloud_filtered)); - ground_model->selectWithinDistance(ground_coeffs_, voxel_size_, *inliers); - no_ground_cloud_ = PointCloudPtr (new PointCloud); - pcl::ExtractIndices extract; - extract.setInputCloud(cloud_filtered); - extract.setIndices(inliers); - extract.setNegative(true); - extract.filter(*no_ground_cloud_); - if (inliers->size () >= (300 * 0.06 / voxel_size_ / std::pow (static_cast (sampling_factor_), 2))) - ground_model->optimizeModelCoefficients (*inliers, ground_coeffs_, ground_coeffs_); - else - { - if (debug_flag) - { - PCL_INFO ("No groundplane update!\n"); - } - } - - //TODO publish here groundRemoval - //(*publishGroundRemoval)(no_ground_cloud_); - - // Background Subtraction (optional): - if (background_subtraction_) - { - PointCloudPtr foreground_cloud(new PointCloud); - for (unsigned int i = 0; i < no_ground_cloud_->points.size(); i++) - { - if (not (background_octree_->isVoxelOccupiedAtPoint(no_ground_cloud_->points[i].x, no_ground_cloud_->points[i].y, no_ground_cloud_->points[i].z))) - { - foreground_cloud->points.push_back(no_ground_cloud_->points[i]); - } - } - no_ground_cloud_ = foreground_cloud; - } - - - std::vector myvector; - - int count = 0; - for(pcl::PointCloud::iterator it = no_ground_cloud_->begin(); it != no_ground_cloud_->end(); it++) - { - if (!std::isfinite (it->x) || !std::isfinite (it->y) || !std::isfinite (it->z)) - { - myvector.push_back(count); - } - count++; - } - - count = 0; - for (unsigned i=0; ierase(no_ground_cloud_->begin() + myvector[i] - count); - count++; - } - - - if (no_ground_cloud_->points.size() > 0) - { - // Euclidean Clustering: - std::vector cluster_indices; - typename pcl::search::KdTree::Ptr tree (new pcl::search::KdTree); - - tree->setInputCloud(no_ground_cloud_); - pcl::EuclideanClusterExtraction ec; - ec.setClusterTolerance(2 * 0.06); - ec.setMinClusterSize(min_points_); - ec.setMaxClusterSize(max_points_); - ec.setSearchMethod(tree); - ec.setInputCloud(no_ground_cloud_); - ec.extract(cluster_indices); - - // Sensor tilt compensation to improve people detection: - PointCloudPtr no_ground_cloud_rotated(new PointCloud); - Eigen::VectorXf ground_coeffs_new; - if(sensor_tilt_compensation_) - { - // We want to rotate the point cloud so that the ground plane is parallel to the xOz plane of the sensor: - Eigen::Vector3f input_plane, output_plane; - input_plane << ground_coeffs_(0), ground_coeffs_(1), ground_coeffs_(2); - output_plane << 0.0, -1.0, 0.0; - - Eigen::Vector3f axis = input_plane.cross(output_plane); - float angle = acos( input_plane.dot(output_plane)/ ( input_plane.norm()/output_plane.norm() ) ); - transform_ = Eigen::AngleAxisf(angle, axis); - - // Setting also anti_transform for later - anti_transform_ = transform_.inverse(); - no_ground_cloud_rotated = rotateCloud(no_ground_cloud_, transform_); - ground_coeffs_new.resize(4); - ground_coeffs_new = rotateGround(ground_coeffs_, transform_); - } - else - { - transform_ = transform_.Identity(); - anti_transform_ = transform_.inverse(); - no_ground_cloud_rotated = no_ground_cloud_; - ground_coeffs_new = ground_coeffs_; - } - - // To avoid PCL warning: - if (cluster_indices.size() == 0) - cluster_indices.push_back(pcl::PointIndices()); - - // Head based sub-clustering // - pcl::people::HeadBasedSubclustering subclustering; - subclustering.setInputCloud(no_ground_cloud_rotated); - subclustering.setGround(ground_coeffs_new); - subclustering.setInitialClusters(cluster_indices); - subclustering.setHeightLimits(min_height_, max_height_); - subclustering.setMinimumDistanceBetweenHeads(heads_minimum_distance_); - subclustering.setSensorPortraitOrientation(vertical_); - subclustering.subcluster(clusters); - -// for (unsigned int i = 0; i < rgb_image_->points.size(); i++) -// { -// if ((rgb_image_->points[i].r < 0) | (rgb_image_->points[i].r > 255) | isnan(rgb_image_->points[i].r)) -// { -// std::cout << "ERROR in RGB data!" << std::endl; -// } -// } - - if (use_rgb_) // if RGB information can be used - { - // Person confidence evaluation with HOG+SVM: - if (vertical_) // Rotate the image if the camera is vertical - { - swapDimensions(rgb_image_); - } - for(typename std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) - { - //Evaluate confidence for the current PersonCluster: - Eigen::Vector3f centroid = intrinsics_matrix_ * (anti_transform_ * it->getTCenter()); - centroid /= centroid(2); - Eigen::Vector3f top = intrinsics_matrix_ * (anti_transform_ * it->getTTop()); - top /= top(2); - Eigen::Vector3f bottom = intrinsics_matrix_ * (anti_transform_ * it->getTBottom()); - bottom /= bottom(2); - - it->setPersonConfidence(person_classifier_.evaluate(rgb_image_, bottom, top, centroid, vertical_)); - } - } - else - { - for(typename std::vector >::iterator it = clusters.begin(); it != clusters.end(); ++it) - { - it->setPersonConfidence(-100.0); - } - } - } - - return (true); -} - -template -open_ptrack::detection::GroundBasedPeopleDetectionApp::~GroundBasedPeopleDetectionApp () -{ - // TODO Auto-generated destructor stub -} -#endif /* OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_HPP_ */ From 755a509eba94dc8b2058a119dc43edc39e6a6590 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:05:28 -0800 Subject: [PATCH 12/28] Delete ground_based_people_detection_app.h~ --- .../ground_based_people_detection_app.h~ | 427 ------------------ 1 file changed, 427 deletions(-) delete mode 100644 detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ diff --git a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ deleted file mode 100644 index 66c2ed1..0000000 --- a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h~ +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * ground_based_people_detection_app.h - * Created on: Nov 30, 2012 - * Author: Matteo Munaro - */ - -#ifndef OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ -#define OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace open_ptrack -{ - namespace detection - { - /** \brief GroundBasedPeopleDetectionApp performs people detection on RGB-D data having as input the ground plane coefficients. - * It implements the people detection algorithm described here: - * M. Munaro, F. Basso and E. Menegatti, - * Tracking people within groups with RGB-D data, - * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012. - * - * \author Matteo Munaro - */ - template class GroundBasedPeopleDetectionApp; - - template - class GroundBasedPeopleDetectionApp - { - public: - - typedef pcl::PointCloud PointCloud; - typedef boost::shared_ptr PointCloudPtr; - typedef boost::shared_ptr PointCloudConstPtr; - - /** \brief Constructor. */ - GroundBasedPeopleDetectionApp (); - - /** \brief Destructor. */ - virtual ~GroundBasedPeopleDetectionApp (); - - /** - * \brief Set the pointer to the input cloud. - * - * \param[in] cloud A pointer to the input cloud. - */ - void - setInputCloud (PointCloudPtr& cloud); - - /** - * \brief Set the ground coefficients. - * - * \param[in] ground_coeffs Vector containing the four plane coefficients. - */ - void - setGround (Eigen::VectorXf& ground_coeffs); - - /** - * \brief Set sampling factor. - * - * \param[in] sampling_factor Value of the downsampling factor (in each dimension) which is applied to the raw point cloud (default = 1.). - */ - void - setSamplingFactor (int sampling_factor); - - /** - * \brief Set voxel size. - * - * \param[in] voxel_size Value of the voxel dimension (default = 0.06m.). - */ - void - setVoxelSize (float voxel_size); - - /** - * \brief Set denoising parameters. - * - * \param[in] apply_denoising Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise - * \param[in] mean_k_denoising MeanK for denoising (the higher it is, the stronger is the filtering) - * \param[in] std_dev_denoising Standard deviation for denoising (the lower it is, the stronger is the filtering) - */ - void - setDenoisingParameters (bool apply_denoising, int mean_k_denoising, float std_dev_denoising); - - /** - * \brief Set points maximum distance from the sensor. - * - * \param[in] max_distance Set points maximum distance from the sensor (default = 50m.). - */ - void - setMaxDistance (float max_distance); - - /** - * \brief Set intrinsic parameters of the RGB camera. - * - * \param[in] intrinsics_matrix RGB camera intrinsic parameters matrix. - */ - void - setIntrinsics (Eigen::Matrix3f intrinsics_matrix); - - /** - * \brief Set SVM-based person classifier. - * - * \param[in] person_classifier Needed for people detection on RGB data. - */ - void - setClassifier (open_ptrack::detection::PersonClassifier person_classifier); - - /** - * \brief Set sensor orientation (vertical = true means portrait mode, vertical = false means landscape mode). - * - * \param[in] vertical Set landscape/portait camera orientation (default = false). - */ - void - setSensorPortraitOrientation (bool vertical); - - /** - * \brief Set head_centroid_ to true (person centroid is in the head) or false (person centroid is the whole body centroid). - * - * \param[in] head_centroid Set the location of the person centroid (head or body center) (default = true). - */ - void - setHeadCentroid (bool head_centroid); - - /** - * \brief Set minimum and maximum allowed height for a person cluster. - * - * \param[in] min_height Minimum allowed height for a person cluster (default = 1.3). - * \param[in] max_height Maximum allowed height for a person cluster (default = 2.3). - */ - void - setHeightLimits (float min_height, float max_height); - - /** - * \brief Set minimum and maximum allowed number of points for a person cluster. - * - * \param[in] min_points Minimum allowed number of points for a person cluster. - * \param[in] max_points Maximum allowed number of points for a person cluster. - */ - void - setDimensionLimits (int min_points, int max_points); - - /** - * \brief Set minimum distance between persons' heads. - * - * \param[in] heads_minimum_distance Minimum allowed distance between persons' heads (default = 0.3). - */ - void - setMinimumDistanceBetweenHeads (float heads_minimum_distance); - - /** - * \brief Set if RGB should be used or not for people detection. - * - * \param[in] use_rgb True: RGB is used, false: RGB is not used. - */ - void - setUseRGB (bool use_rgb); - - /** - * \brief Set if sensor tilt angle wrt ground plane should be compensated to improve people detection - * - * \param[in] sensor_tilt_compensation True: sensor tilt is compensated, false: sensor tilt is not compensated. - */ - void - setSensorTiltCompensation ( bool sensor_tilt_compensation); - - /** - * \brief Set background subtraction parameters - * - * \param[in] background_subtraction True: background subtraction is performed, false: background subtraction is not performed. - * \param[in] background_octree_resolution Resolution of the octree. - * \param[in] background_cloud Point cloud containing the background. - */ - void - setBackground ( bool background_subtraction, float background_octree_resolution, PointCloudPtr& background_cloud); - - /** - * \brief Get minimum and maximum allowed height for a person cluster. - * - * \param[out] min_height Minimum allowed height for a person cluster. - * \param[out] max_height Maximum allowed height for a person cluster. - */ - void - getHeightLimits (float& min_height, float& max_height); - - /** - * \brief Get minimum and maximum allowed number of points for a person cluster. - * - * \param[out] min_points Minimum allowed number of points for a person cluster. - * \param[out] max_points Maximum allowed number of points for a person cluster. - */ - void - getDimensionLimits (int& min_points, int& max_points); - - /** - * \brief Get minimum distance between persons' heads. - */ - float - getMinimumDistanceBetweenHeads (); - - /** - * \brief Get floor coefficients. - */ - Eigen::VectorXf - getGround (); - - /** - * \brief Get pointcloud after voxel grid filtering and ground removal. - */ - PointCloudPtr - getNoGroundCloud (); - - /** - * \brief Get mean luminance of the RGB data. - */ - float - getMeanLuminance (); - - /** - * \brief Get the transforms to be used to compensate sensor tilt. - * - * \param[in/out] transform Direct transform to compensate sensor tilt. - * \param[in/out] anti_transform Inverse transform wrt transform. - */ - void - getTiltCompensationTransforms (Eigen::Affine3f& transform, Eigen::Affine3f& anti_transform); - - /** - * \brief Extract RGB information from a point cloud and output the corresponding RGB point cloud. - * - * \param[in] input_cloud A pointer to a point cloud containing also RGB information. - * \param[out] output_cloud A pointer to a RGB point cloud. - */ - void - extractRGBFromPointCloud (PointCloudPtr input_cloud, pcl::PointCloud::Ptr& output_cloud); - - /** - * \brief Swap rows/cols dimensions of a RGB point cloud (90 degrees counterclockwise rotation). - * - * \param[in,out] cloud A pointer to a RGB point cloud. - */ - void - swapDimensions (pcl::PointCloud::Ptr& cloud); - - /** - * \brief Rotate the input cloud according to transform - * - * \param[in] cloud Pointer to the input point cloud. - * \param[in] transform Transform to be applied to the input cloud. - * - * \return A pointer to the rotated cloud. - */ - PointCloudPtr - rotateCloud (PointCloudPtr cloud, Eigen::Affine3f transform); - - /** - * \brief Rotate input plane coefficients according to transform - * - * \param[in] ground_coeffs Plane coefficients. - * \param[in] transform Transform to be applied to ground_coeffs. - * - * \return Rotated plane coefficients. - */ - Eigen::VectorXf - rotateGround (Eigen::VectorXf ground_coeffs, Eigen::Affine3f transform); - - /** - * \brief Perform pre-processing operations on the input cloud (downsampling, filtering). - * - * \param[in] input_cloud Input cloud. - * - * \return The cloud after pre-processing. - */ - PointCloudPtr - preprocessCloud (PointCloudPtr& input_cloud, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); - - /** - * \brief Perform people detection on the input data and return people clusters information. - * - * \param[out] clusters Vector of PersonCluster. - * - * \return true if the compute operation is successful, false otherwise. - */ - bool - compute (std::vector >& clusters, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)); - - protected: - /** \brief sampling factor used to downsample the point cloud */ - int sampling_factor_; - - /** \brief voxel size */ - float voxel_size_; - - /** \brief max distance from the sensor */ - float max_distance_; - - /** \brief ground plane coefficients */ - Eigen::VectorXf ground_coeffs_; - - /** \brief ground plane normalization factor */ - float sqrt_ground_coeffs_; - - /** \brief pointer to the input cloud */ - PointCloudPtr cloud_; - - /** \brief pointer to the cloud after voxel grid filtering and ground removal */ - PointCloudPtr no_ground_cloud_; - - /** \brief pointer to a RGB cloud corresponding to cloud_ */ - pcl::PointCloud::Ptr rgb_image_; - - /** \brief person clusters maximum height from the ground plane */ - float max_height_; - - /** \brief person clusters minimum height from the ground plane */ - float min_height_; - - /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ - bool vertical_; - - /** \brief if true, the person centroid is computed as the centroid of the cluster points belonging to the head; - * if false, the person centroid is computed as the centroid of the whole cluster points (default = true) */ - bool head_centroid_; - - /** \brief maximum number of points for a person cluster */ - int max_points_; - - /** \brief minimum number of points for a person cluster */ - int min_points_; - - /** \brief true if min_points and max_points have been set by the user, false otherwise */ - bool dimension_limits_set_; - - /** \brief minimum distance between persons' heads */ - float heads_minimum_distance_; - - /** \brief intrinsic parameters matrix of the RGB camera */ - Eigen::Matrix3f intrinsics_matrix_; - - /** \brief SVM-based person classifier */ - open_ptrack::detection::PersonClassifier person_classifier_; - - /** \brief flag stating if the classifier has been set or not */ - bool person_classifier_set_flag_; - - /** \brief flag stating if RGB information should be used or not for people detection */ - bool use_rgb_; - - /** \brief Mean luminance of the RGB data */ - float mean_luminance_; - - /** \brief flag stating if the sensor tilt with respect to the ground plane should be compensated */ - bool sensor_tilt_compensation_; - - /** \brief transforms used for compensating sensor tilt with respect to the ground plane */ - Eigen::Affine3f transform_, anti_transform_; - - /** \brief Flag stating if background subtraction should be applied or not */ - bool background_subtraction_; - - /** \brief Octree representing the background */ - pcl::octree::OctreePointCloud *background_octree_; - - /** \brief Frame counter */ - unsigned int frame_counter_; - - /** \brief Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: */ - bool apply_denoising_; - - /** \brief MeanK for denoising (the higher it is, the stronger is the filtering): */ - int mean_k_denoising_; - - /** \brief Standard deviation for denoising (the lower it is, the stronger is the filtering): */ - float std_dev_denoising_; - -// pcl::visualization::PCLVisualizer::Ptr denoising_viewer_; - - }; - } /* namespace detection */ -} /* namespace open_ptrack */ -#include -#endif /* OPEN_PTRACK_DETECTION_GROUND_BASED_PEOPLE_DETECTION_APP_H_ */ From cc8b09d1fb689756f2bcc96877f30b76bb5a2751 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:05:38 -0800 Subject: [PATCH 13/28] Delete haardispada.h~ --- .../open_ptrack/detection/haardispada.h~ | 142 ------------------ 1 file changed, 142 deletions(-) delete mode 100644 detection/include/open_ptrack/detection/haardispada.h~ diff --git a/detection/include/open_ptrack/detection/haardispada.h~ b/detection/include/open_ptrack/detection/haardispada.h~ deleted file mode 100644 index 696270a..0000000 --- a/detection/include/open_ptrack/detection/haardispada.h~ +++ /dev/null @@ -1,142 +0,0 @@ -/* -Software License Agreement (BSD License) - -Copyright (c) 2013, Southwest Research Institute -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Southwest Research Institute, nor the names - of its contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef OPEN_PTRACK_DETECTION_HAARDISPADA_H -#define OPEN_PTRACK_DETECTION_HAARDISPADA_H - -/***************************************************************************** - ** Includes - *****************************************************************************/ -#include -#include -#include -#include -#include -#include "/usr/include/opencv2/core/core.hpp" -#include "/usr/include/opencv/cv.h" -#include "/usr/include/opencv/ml.h" -#include -#include - -/***************************************************************************** - ** Namespaces - *****************************************************************************/ - -namespace open_ptrack -{ - namespace detection - { - using std::string; - using std::vector; - using cv::Mat; - using cv::Rect; - using cv::Range; - using cv::Size; - using cv::INTER_AREA; - - /***************************************************************************** - ** Class - *****************************************************************************/ - - class HaarDispAdaClassifier{ - - public: - HaarDispAdaClassifier(); - HaarDispAdaClassifier(string file_name); - void init(); - bool useMissingDataMask_; - bool loaded; - void load(string filename); - int addToTraining(vector &R_in, vector &Labels_in, Mat &Disparity_in); - void train(string filename); - void detect(vector &R_in, - vector &L_in, - Mat &D_in, - vector &R_out, - vector &L_out, - bool label_all); - - void detect(vector &R_in, - vector &L_in, - Mat &D_in, - vector &R_out, - vector &L_out, - vector &C_out, - bool label_all); - - int test(); - int numSamples_; - float HaarDispAdaPrior_; - void setMaxSamples(int n); - int getMaxSamples(){ return(maxSamples_);}; - float getMinConfidence(); - void setMinConfidence(float ); - - private: - CvBoost HDAC_; - string classifier_filename_; // for loading and saving - int maxSamples_; - int num_filters_; - Mat integralImage_; - Mat trainingSamples_; - Mat trainingLabels_; - Mat trainingMissingMask_; - Mat AvePosTrainingImg_; - - // images set by alpah_map() - Mat map; // set by alpha_map() in haar_response() - - // images set by setDImageRoi() - Mat Image4Haar; - Mat haar16x16; - Mat haar8x8; - Mat haar4x4; - - // Minimum classifier confidence for people detection: - float min_confidence_; - - // private functions - void setDImageRoi(Rect &R_in, Mat &I_in); - void setDImageROI_fast(Rect & R_in, Mat &I_in); - void alpha_map(int idx); - void print_map(int k);// a debugging tool - void print_scaled_map(cv::Mat &A);// a debugging tool - void print_Image4Haar();// a debugging tool - int haar_features(Mat & HF, Mat & MH); - int haar_features_fast(Mat & HF); - void mask_scale(Mat & input, Mat & output); - float find_central_disparity(int x, int y, int height, int width, Mat& D_in); - }; - } // namespace detection -} // namespace open_ptrack - -#endif /* OPEN_PTRACK_DETECTION_HAARDISPADA_H */ - From f6dcaa7cb0aa022a13a8d56c15677ae76445b41f Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:06:07 -0800 Subject: [PATCH 14/28] Delete detector_depth.launch~ --- detection/launch/detector_depth.launch~ | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 detection/launch/detector_depth.launch~ diff --git a/detection/launch/detector_depth.launch~ b/detection/launch/detector_depth.launch~ deleted file mode 100644 index 84b9e1c..0000000 --- a/detection/launch/detector_depth.launch~ +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - - - - - - - - - - - - - From c5d664a60c0d5855b3d7c0821d0e1a952a177f98 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:06:22 -0800 Subject: [PATCH 15/28] Delete detector_depth_zed.launch~ --- detection/launch/detector_depth_zed.launch~ | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 detection/launch/detector_depth_zed.launch~ diff --git a/detection/launch/detector_depth_zed.launch~ b/detection/launch/detector_depth_zed.launch~ deleted file mode 100644 index 6782260..0000000 --- a/detection/launch/detector_depth_zed.launch~ +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - From 61e9d53907c570385fff9827ef88865e558cacec Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:06:45 -0800 Subject: [PATCH 16/28] Delete haardispada.cpp~ --- detection/src/haardispada.cpp~ | 575 --------------------------------- 1 file changed, 575 deletions(-) delete mode 100644 detection/src/haardispada.cpp~ diff --git a/detection/src/haardispada.cpp~ b/detection/src/haardispada.cpp~ deleted file mode 100644 index 5f66e05..0000000 --- a/detection/src/haardispada.cpp~ +++ /dev/null @@ -1,575 +0,0 @@ -/* -Software License Agreement (BSD License) - -Copyright (c) 2013, Southwest Research Institute -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Southwest Research Institute, nor the names - of its contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - */ -#include "open_ptrack/detection/haardispada.h" -#include -#include -#include - -/***************************************************************************** - ** Namespaces - *****************************************************************************/ -namespace open_ptrack -{ - namespace detection - { - /***************************************************************************** - ** Implementation - *****************************************************************************/ - HaarDispAdaClassifier::HaarDispAdaClassifier() - { - classifier_filename_ = "UnNamedHaarDispAda.xml"; - HaarDispAdaPrior_ = exp(2); // default prior - loaded = false; - init(); - } - - HaarDispAdaClassifier::HaarDispAdaClassifier(string filename) - { - classifier_filename_ = filename; - loaded = false; - init(); - } - - void - HaarDispAdaClassifier::detect(vector &R_in, - vector &L_in, - Mat &D_in, - vector &R_out, - vector &L_out, - bool label_all) - { - int count =0; - Mat HF(1,num_filters_,CV_32F); - Mat MH(1,num_filters_,CV_8UC1); - - R_out.clear(); - L_out.clear(); - if(!loaded) return; - for(unsigned int i=0;i 2 && R_in[i].height > 2){ - setDImageROI_fast(R_in[i],D_in); // copy region of interest from disparity - int rtn = haar_features_fast(HF); // compute haar features - if(rtn== 1){ - // Compute classifier score: - //result = HDAC_.predict(HF); - cv::ml::StatModel::predict(HF,result,cv::ml::StatModel::RAW_OUTPUT); - } - else{ - ROS_ERROR("WHY O WHY"); - result = 0; - } - } - if(result>0 || label_all == true){ - // Insert in output detections: - R_out.push_back(R_in[i]); - if(label_all) L_out.push_back(result); // apply the label - if(!label_all)L_out.push_back(L_in[i]); // give the same label it came in with to allow eval - if(result>0) count++; - } - } - } - - void - HaarDispAdaClassifier::detect(vector &R_in, - vector &L_in, - Mat &D_in, - vector &R_out, - vector &L_out, - vector &C_out, - bool label_all) - { - int count =0; - Mat HF(1,num_filters_,CV_32F); - Mat MH(1,num_filters_,CV_8UC1); - - R_out.clear(); - L_out.clear(); - if(!loaded) return; - for(unsigned int i=0;i 2 && R_in[i].height > 2){ - setDImageROI_fast(R_in[i],D_in); // copy region of interest from disparity - int rtn = haar_features_fast(HF); // compute haar features - if(rtn== 1){ - // Compute classifier score: - result = HDAC_.predict(HF, cv::Mat(), cv::Range::all(), false, true); - } - else{ - ROS_ERROR("WHY O WHY"); - result = 0; - } - } - - if(result>min_confidence_ || label_all == true){ - // Insert in output detections: - R_out.push_back(R_in[i]); - C_out.push_back(result); // write classifier confidence - if(label_all) - { - if (result > min_confidence_) - L_out.push_back(1); // apply the label - else - L_out.push_back(0); - } - if(!label_all) - L_out.push_back(L_in[i]); // give the same label it came in with to allow evalì - if(result>min_confidence_) count++; - } - } - } - - int - HaarDispAdaClassifier::addToTraining(vector &R_in, vector &L_in, Mat &D_in) - { - Mat HF(1,num_filters_,CV_32F); - Mat MH(1,num_filters_,CV_8UC1); - for(unsigned int i = 0; i 0.0)){ - for(int j=0;j(numSamples_,j) = HF.at(0,j); - } - if(L_in[i] <= 0 ){ - trainingLabels_.at(numSamples_,0) = 0;//classes: 0,1 not -1,1 - } - else{ - trainingLabels_.at(numSamples_,0) = L_in[i]; - } - numSamples_++; - }// end if successful compute feature - }// end of if not too many samples already - }// end each roi - return(numSamples_); - }// end addToTraining - - void - HaarDispAdaClassifier::setDImageRoi(Rect &R_in, Mat &I_in) - { - int xmin = R_in.x; - int xmax = xmin+R_in.width; - int ymin = R_in.y; - int ymax = ymin+R_in.height; - Image4Haar = I_in(Range(ymin,ymax),Range(xmin,xmax)); - } - - void - HaarDispAdaClassifier::train(string filename) - { - CvBoostParams bparams = CvBoostParams(); - float priorFloat[] = { 1.0, HaarDispAdaPrior_ }; // preliminary priors based on ROC) - bparams.priors = &priorFloat[0]; - bparams.use_surrogates = false; - bparams.weak_count = 100; - - // copy sub matrix for training - Mat VarIdx; - Mat Features(numSamples_,num_filters_,CV_32F); - for(int i=0;i(i,j) = trainingSamples_.at(i,j); - } - } - Mat Responses(numSamples_,1,CV_32S); - for(int i=0; i(i,0) == -1) trainingLabels_.at(i,0) = 0;//classes: 0,1 - Responses.at(i,0) = trainingLabels_.at(i,0); - } - Mat vIdx=Mat::ones(Features.cols,1,CV_8UC1); // variables of interest - Mat sIdx=Mat::ones(Responses.rows,1,CV_8UC1); // samples of interest - Mat vtyp=Mat(Features.cols,1,CV_8UC1,CV_VAR_ORDERED); // could be VAR_CATAGORICAL(discrete) - Mat MDM; // no missing mask - HDAC_.train(Features, CV_ROW_SAMPLE, Responses,vIdx,sIdx,vtyp,MDM,bparams,false); - ROS_ERROR("saving trained classifier to %s",filename.c_str()); - loaded = true; - HDAC_.save(filename.c_str()); - // Determine Recall Statistics - int num_TP = 0; - int num_FP = 0; - int num_people = 0; - int num_neg = 0; - int num_TN = 0; - int num_FN = 0; - for(int i=0;i(i,0) == 1) num_people++; - if(Responses.at(i,0) != 1) num_neg++; - if(result==1 && Responses.at(i,0) == 1) num_TP++; // true pos - else if(result==1 && Responses.at(i,0) == 0) num_FP++; // false pos - else if(result==0 && Responses.at(i,0) == 0) num_TN++; // true neg - else if(result==0 && Responses.at(i,0) == 1) num_FN++; // false neg - } - float percent = (float)num_TP/(float)num_people*100.0; - ROS_ERROR("Recall = %6.2f%c",percent,'%'); - percent = (float)num_FP/(float)num_neg*100.0; - ROS_ERROR("False Positives = %6.2f%c",percent,'%'); - } - - void - HaarDispAdaClassifier::setMaxSamples(int n) - { - maxSamples_ = n; - numSamples_ = 0; - trainingSamples_.create(maxSamples_,num_filters_,CV_32FC1); - trainingLabels_.create(maxSamples_,1,CV_32SC1); - - } - - void - HaarDispAdaClassifier::setMinConfidence(float min_confidence) - { - min_confidence_ = min_confidence; - } - - float - HaarDispAdaClassifier::getMinConfidence() - { - return min_confidence_; - } - - void - HaarDispAdaClassifier::init() - { - num_filters_ = 174; - setMaxSamples(350); //use a small number to have a small memory footprint by default - - AvePosTrainingImg_.create(64,64,CV_64F); - - // images used by haar_features() set here since known size and type to avoid realloc - map.create(16,16,CV_16SC1); - - // faster haar - haar16x16.create(16, 16, CV_32F); - haar8x8.create(8, 8, CV_32F); - haar4x4.create(4, 4, CV_32F); - - } - - void - HaarDispAdaClassifier::load(string filename) - { - HDAC_.load(filename.c_str()); - loaded = true; - } - - int - HaarDispAdaClassifier::test() - { - return(1); - } - - // TODO find out what this function is for - void - HaarDispAdaClassifier::alpha_map(int idx) - { - int i, j, rowWidth, lPower, lPower2, type; - - type = idx % 3; // find type (horz, vert, diag) - - if (idx >= 174) - { // in 16x16, there are 225 2x2s - rowWidth = 15; - idx = idx - 174; - lPower = 2; - lPower2 = 1; - } - else if (idx >= 27) - { // in 16x16, there are 49 4x4s - rowWidth = 7; - idx = idx - 27; - lPower = 4; - lPower2 = 2; - } - else - { // in 16x16, there are 9 8x8s - rowWidth = 3; - lPower = 8; - lPower2 = 4; - } // if elseif else - - int i1 = (idx / 3) % rowWidth; - int j1 = (idx / 3) / rowWidth; - - - map = cv::Mat::zeros(map.rows,map.cols,CV_16SC1); - - for (i = i1 * lPower2; i < i1 * lPower2 + lPower; i++) - { // width/x - for (j = j1 * lPower2; j < j1 * lPower2 + lPower; j++) - { // height/y - if (type == 0) - { // horz type - if (i >= i1 * lPower2 + lPower2) - { - map.at(i,j) = 1; - } - else - { - map.at(i,j) = -1; - } - } - else if (type == 1) - { // vert type - if (j >= j1 * lPower2 + lPower2) - { - map.at(i,j) = 1; - } - else - { - map.at(i,j) = -1; - } - } - else - { // diag type (type == 2) - if (((i >= i1 * lPower2 + lPower2) && (j >= j1 * lPower2 + lPower2)) - || ((i < i1 * lPower2 + lPower2) && (j < j1 * lPower2 + lPower2))) - { - map.at(i,j) = 1; - } - else - { - map.at(i,j) = -1; - } - } // if - } // for j - } // for i - } // alpha map - - - void - HaarDispAdaClassifier::mask_scale(Mat & input, Mat & output) - { - assert(input.type() == CV_32F);// must be of type CV_8U - float hratio = (float) input.rows / (float) output.rows; - float wratio = (float) input.cols / (float) output.cols; - for (int i = 0; i < output.rows; i++) - { - float *outputRow = output.ptr(i); - for (int j = 0; j < output.cols; j++) - { - outputRow[j] = 0; - int n = 0; - for (int i1 = i * hratio; (i1 < (i + 1) * hratio) && (i1 < input.rows); i1++) - { - float *inputRow = input.ptr(i1); - for (int j1 = j * wratio; (j1 < (j + 1) * wratio) && (j1 < input.cols); j1++) - { - if (inputRow[j1] > 0) // if the pixel is nonzero, add it to the output pixel - { - outputRow[j] += inputRow[j1]; - n++; // count of non-zero pixels - } // if - } // for i1, j1 - } - if (n > 0) - outputRow[j] /= n; // new value is average of non-zero pixels - } // for i,j - } // i - } // mask_scale - - - void - HaarDispAdaClassifier::setDImageROI_fast(Rect & R_in, Mat & I_in) - { - Mat ROI = I_in(R_in); - mask_scale(ROI, haar16x16); - } - - int - HaarDispAdaClassifier::haar_features_fast(Mat &HF) - { - int jl, idx, i1, j1; - - cv::resize(haar16x16, haar8x8, cv::Size(8,8), 0, 0, CV_INTER_AREA); - cv::resize(haar8x8, haar4x4, cv::Size(4,4), 0, 0, CV_INTER_AREA); - - float* hf_ptr = HF.ptr(0); - - if(haar16x16.rows != 16 || haar16x16.cols !=16) - { - ROS_ERROR("Wrong sz Input4Haar haar_response %dX%d ",haar16x16.rows,haar16x16.cols); - return(0); - } - - - // Largest features are first (8x8s) - for (jl = 0; jl < 27; jl += 3) - { // in 16x16 template, there are 27 8x8s (9 positions, 3 types, windows may overlap by 50%) - idx = jl; - i1 = (idx / 3) % 3; // "x" value of our location in map - j1 = (idx / 3) / 3; // "y" value of our location in map - hf_ptr[jl] = 16 * (-haar4x4.at(j1,i1) + haar4x4.at(j1,i1+1) - -haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); - - hf_ptr[jl+1] = 16 * (-haar4x4.at(j1,i1) - haar4x4.at(j1,i1+1) - +haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); - - hf_ptr[jl+2] = 16 * (haar4x4.at(j1,i1) - haar4x4.at(j1,i1+1) - -haar4x4.at(j1 +1 ,i1) + haar4x4.at(j1 + 1,i1 + 1)); - } // for - - // Medium features are next (4x4s) - for (jl = 27; jl < 174; jl += 3) - { // in 16x16 template, there are 147 4x4s (49 positions, 3 types, windows may overlap by 50%) - idx = jl - 27; - i1 = (idx / 3) % 7; // "x" value of our location in map - j1 = (idx / 3) / 7; // "y" value of our location in map - hf_ptr[jl] = 4 * (-haar8x8.at(j1,i1) + haar8x8.at(j1,i1+1) - -haar8x8.at(j1+1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); - - hf_ptr[jl+1] = 4 * (-haar8x8.at(j1,i1) - haar8x8.at(j1,i1+1) - +haar8x8.at(j1 +1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); - - hf_ptr[jl+2] = 4 * (haar8x8.at(j1,i1) - haar8x8.at(j1,i1+1) - -haar8x8.at(j1 +1 ,i1) + haar8x8.at(j1 + 1,i1 + 1)); - } // for - return 1; - - } // haar_features_fast - - - int - HaarDispAdaClassifier::haar_features(Mat &HF, Mat &MH) - { - for (int k = 0; k < num_filters_; k++){ // do each feature - // generate haar map and scale to size of Image4Haar(roi) - alpha_map(k); - Mat ScaledMap(Image4Haar.rows,Image4Haar.cols,CV_16SC1); - resize(map, ScaledMap, Size(Image4Haar.cols, Image4Haar.rows), 0, 0, CV_INTER_NN ); - - // multiply, and sum, considering missing data in disparity map - float plus_sum = 0; - float minus_sum = 0; - int num_plus=0; - int num_minus=0; - for(int ii=0;ii(ii,0); - short int *map_ptr = (short int *) &ScaledMap.at(ii,0); - for(int jj=0;jj(0,k) =(uchar) 1; // set missing data mask to 1 - HF.at(0,k) = 0; // set feature value to zero - } - else{// not missing data - MH.at(0,k) = (uchar) 0; // set missing data mask to 0 - float plus_ave = plus_sum/num_plus; - float minus_ave = minus_sum/num_minus; - float ave_dif = fabs(plus_ave - minus_ave); - // determine scaling of feature to be consistent with haar features without missing data - float scaling=2.0; - if(k<27){ - scaling = 32.0; - } - else if(k<174){ - scaling = 8.0; - } - HF.at(0,k) = scaling*ave_dif; - } - } - return(1); - }// haar_features - void - HaarDispAdaClassifier::print_map(int k) - { - printf("Map %d :\n",k); - alpha_map(k); - for(int i=0;i(i,j)); - } - printf("\n"); - } - } - void - HaarDispAdaClassifier::print_scaled_map(cv::Mat &A) - { - printf("SMap = [\n"); - for(int i=0;i(i,j)); - } - printf("\n"); - } - printf("]\n"); - } - void - HaarDispAdaClassifier::print_Image4Haar() - { - printf("Image4Haar = [\n"); - for(int i=0;i(i,j)); - } - printf("\n"); - } - printf("]\n"); - } - - float - HaarDispAdaClassifier::find_central_disparity(int x, int y, int height, int width, Mat &D_in) - { - // Insure its a reasonable roi - if(x+width>D_in.cols || y+height>D_in.rows) return(0.0); - - float ad=0.0; - int j=width/2.0;// use central value for x - for(int i=0;i(y+i,x+j)>ad){ - ad = D_in.at(y+i,x+j); - } - } - - return(ad); - } - - - } // namespace detection -} // namespace open_ptrack From 00f5f5bf53d0caf2b0084b5eb82e630af2554b6f Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:08:26 -0800 Subject: [PATCH 17/28] tracking ZED Update --- tracking/launch/tracking/CMakeLists.txt | 39 + .../apps/moving_average_filter_node.cpp | 501 +++++++++++ .../tracking/apps/tracker_filter_node.cpp | 500 +++++++++++ .../launch/tracking/apps/tracker_node.cpp | 810 ++++++++++++++++++ .../tracking/cfg/MovingAverageSmoother.cfg | 40 + tracking/launch/tracking/cfg/Tracker.cfg | 78 ++ .../launch/tracking/cfg/TrackerSmoother.cfg | 42 + .../tracking/conf/moving_average_filter.yaml | 26 + .../tracking/conf/multicamera_tracking.rviz | 180 ++++ tracking/launch/tracking/conf/tracker.yaml | 74 ++ .../launch/tracking/conf/tracker_filter.yaml | 28 + .../tracking/conf/tracker_multicamera.yaml | 78 ++ tracking/launch/tracking/conf/tracker_sr.yaml | 74 ++ tracking/launch/tracking/conf/tracking.rviz | 249 ++++++ .../open_ptrack/tracking/kalman_filter.h | 317 +++++++ .../include/open_ptrack/tracking/munkres.h | 133 +++ .../include/open_ptrack/tracking/track.h | 402 +++++++++ .../include/open_ptrack/tracking/tracker.h | 332 +++++++ .../launch/detection_and_tracking.launch | 15 + .../detection_and_tracking_kinect2.launch | 15 + .../launch/detection_and_tracking_sr.launch | 15 + .../detection_and_tracking_stereo.launch | 15 + .../launch/detection_and_tracking_zed.launch | 15 + .../launch/tracking/launch/tracker.launch | 21 + .../tracking/launch/tracker_network.launch | 24 + .../launch/tracking/launch/tracker_sr.launch | 22 + .../tracking/launch/tracking_node.launch | 15 + tracking/launch/tracking/package.xml | 46 + .../launch/tracking/src/kalman_filter.cpp | 367 ++++++++ tracking/launch/tracking/src/munkres.cpp | 410 +++++++++ tracking/launch/tracking/src/track.cpp | 637 ++++++++++++++ tracking/launch/tracking/src/tracker.cpp | 566 ++++++++++++ 32 files changed, 6086 insertions(+) create mode 100644 tracking/launch/tracking/CMakeLists.txt create mode 100644 tracking/launch/tracking/apps/moving_average_filter_node.cpp create mode 100644 tracking/launch/tracking/apps/tracker_filter_node.cpp create mode 100644 tracking/launch/tracking/apps/tracker_node.cpp create mode 100644 tracking/launch/tracking/cfg/MovingAverageSmoother.cfg create mode 100644 tracking/launch/tracking/cfg/Tracker.cfg create mode 100644 tracking/launch/tracking/cfg/TrackerSmoother.cfg create mode 100644 tracking/launch/tracking/conf/moving_average_filter.yaml create mode 100644 tracking/launch/tracking/conf/multicamera_tracking.rviz create mode 100644 tracking/launch/tracking/conf/tracker.yaml create mode 100644 tracking/launch/tracking/conf/tracker_filter.yaml create mode 100644 tracking/launch/tracking/conf/tracker_multicamera.yaml create mode 100644 tracking/launch/tracking/conf/tracker_sr.yaml create mode 100644 tracking/launch/tracking/conf/tracking.rviz create mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h create mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/munkres.h create mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/track.h create mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/tracker.h create mode 100644 tracking/launch/tracking/launch/detection_and_tracking.launch create mode 100644 tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch create mode 100644 tracking/launch/tracking/launch/detection_and_tracking_sr.launch create mode 100644 tracking/launch/tracking/launch/detection_and_tracking_stereo.launch create mode 100644 tracking/launch/tracking/launch/detection_and_tracking_zed.launch create mode 100644 tracking/launch/tracking/launch/tracker.launch create mode 100644 tracking/launch/tracking/launch/tracker_network.launch create mode 100644 tracking/launch/tracking/launch/tracker_sr.launch create mode 100644 tracking/launch/tracking/launch/tracking_node.launch create mode 100644 tracking/launch/tracking/package.xml create mode 100644 tracking/launch/tracking/src/kalman_filter.cpp create mode 100644 tracking/launch/tracking/src/munkres.cpp create mode 100644 tracking/launch/tracking/src/track.cpp create mode 100644 tracking/launch/tracking/src/tracker.cpp diff --git a/tracking/launch/tracking/CMakeLists.txt b/tracking/launch/tracking/CMakeLists.txt new file mode 100644 index 0000000..496bda8 --- /dev/null +++ b/tracking/launch/tracking/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 2.8.3) +project(tracking) +set(CMAKE_BUILD_TYPE RelWithDebInfo) +find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rosconsole image_transport tf tf_conversions bayes pcl_ros detection opt_msgs opt_utils dynamic_reconfigure) + +find_package(OpenCV 2.4 REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) +link_directories(${OpenCV_LIB_DIR}) + +find_package(Eigen REQUIRED) +include_directories(${Eigen_INCLUDE_DIRS} include ${catkin_INCLUDE_DIRS}) + +# Dynamic reconfigure support +generate_dynamic_reconfigure_options(cfg/Tracker.cfg + #cfg/TrackerSmoother.cfg + cfg/MovingAverageSmoother.cfg +) + +include_directories(cfg/cpp) + +catkin_package( + INCLUDE_DIRS + LIBRARIES ${PROJECT_NAME} + CATKIN_DEPENDS roscpp bayes opencv2 pcl_ros detection tf tf_conversions opt_msgs opt_utils +) + +add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}) + +add_executable(tracker apps/tracker_node.cpp) +target_link_libraries(tracker ${PROJECT_NAME} ${catkin_LIBRARIES}) + +#add_executable(tracker_filter apps/tracker_filter_node.cpp) +#add_dependencies(tracker_filter ${PROJECT_NAME}_gencfg) +#target_link_libraries(tracker_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) + +add_executable(moving_average_filter apps/moving_average_filter_node.cpp) +add_dependencies(moving_average_filter ${PROJECT_NAME}_gencfg) +target_link_libraries(moving_average_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) diff --git a/tracking/launch/tracking/apps/moving_average_filter_node.cpp b/tracking/launch/tracking/apps/moving_average_filter_node.cpp new file mode 100644 index 0000000..1521260 --- /dev/null +++ b/tracking/launch/tracking/apps/moving_average_filter_node.cpp @@ -0,0 +1,501 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2015-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Filippo Basso [bassofil@dei.unipd.it] + * Matteo Munaro [matteo.munaro@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace open_ptrack +{ +namespace tracking +{ + +typedef ::tracking::MovingAverageSmootherConfig Config; +typedef ::dynamic_reconfigure::Server ReconfigureServer; + + +struct TrackPositionNode +{ + typedef boost::shared_ptr Ptr; + + TrackPositionNode(const Eigen::Array2d & position, const ros::Time & time) : position_(position), time_(time) {} + + const Eigen::Array2d position_; + const ros::Time time_; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +class TrackFilter +{ +public: + + typedef boost::shared_ptr Ptr; + + TrackFilter(int window_size) : window_size_(window_size), last_positions_sum_(Eigen::Array2d::Zero()) {} + + Eigen::Array2d movingAveragePosition() + { + return last_positions_sum_ / last_positions_.size(); + } + + void addPosition(const Eigen::Array2d & position, const ros::Time & time) + { + TrackPositionNode::Ptr node = boost::make_shared(position, time); + if (last_positions_.size() < window_size_) + { + last_positions_.push_front(node); + last_positions_sum_ += node->position_; + } + else + { + const TrackPositionNode::Ptr & remove_node = last_positions_.back(); + last_positions_sum_ -= remove_node->position_; + last_positions_.pop_back(); + last_positions_.push_front(node); + last_positions_sum_ += node->position_; + } + } + + void setWindowSize(int new_size) + { + if (last_positions_.size() > new_size) + { + while (last_positions_.size() > new_size) + { + const TrackPositionNode::Ptr & remove_node = last_positions_.back(); + last_positions_sum_ -= remove_node->position_; + last_positions_.pop_back(); + } + } + window_size_ = new_size; + } + + void removeOldPositions(const ros::Time & min_allowed_time) + { + bool removed = false; + while (not removed) + { + const TrackPositionNode::Ptr & test_node = last_positions_.back(); + if (test_node->time_ < min_allowed_time and last_positions_.size() > 1) + { + last_positions_sum_ -= test_node->position_; + last_positions_.pop_back(); + } + else + { + removed = true; + } + } + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + +protected: + + std::list last_positions_; + int window_size_; + + Eigen::Array2d last_positions_sum_; + +}; + +struct TrackInfo +{ + TrackFilter::Ptr track_; + opt_msgs::Track::Ptr last_msg_; + ros::Time last_msg_time_; +}; + +class MovingAverageSmoother +{ + +public: + + MovingAverageSmoother(ros::NodeHandle & node_handle) + : node_handle_(node_handle), + reconfigure_server_(new ReconfigureServer(config_mutex_, node_handle_)), + rate_(ros::Rate(30.0)) + { + double rate_d, heartbeat_time; + + node_handle_.param("rate", rate_d, 30.0); + node_handle_.param("publish_empty", publish_empty_, true); + node_handle_.param("heartbeat_time", heartbeat_time, 5.0); + node_handle_.param("max_history_size", max_history_size_, 1000); + + node_handle_.param("window_size", window_size_, 5); + + double position_lifetime_d, track_lifetime_d; + node_handle_.param("track_lifetime_with_no_detections", track_lifetime_d, 5.0); + node_handle_.param("position_lifetime", position_lifetime_d, 0.5); + track_lifetime_ = ros::Duration(track_lifetime_d); + position_lifetime_ = ros::Duration(position_lifetime_d); + + generateColors(); + history_cloud_ = boost::make_shared >(); + history_cloud_index_ = 0; + history_cloud_->header.frame_id = "world"; + + heartbeat_time_duration_ = ros::Duration(heartbeat_time); + + tracking_sub_ = node_handle_.subscribe("input", 100, &MovingAverageSmoother::trackingCallback, this); + tracking_pub_ = node_handle_.advertise("output", 1); + marker_array_pub_ = node_handle_.advertise("markers_array", 1); + history_pub_ = node_handle_.advertise >("history", 1); + + rate_ = ros::Rate(rate_d); + + ReconfigureServer::CallbackType callback = boost::bind(&MovingAverageSmoother::configCallback, this, _1, _2); + reconfigure_server_->setCallback(callback); + + } + + void configCallback(Config & config, uint32_t level) + { + //rate_ = ros::Rate(config.rate); + + config.track_lifetime_with_no_detections = std::max(config.track_lifetime_with_no_detections, rate_.expectedCycleTime().toSec()); + track_lifetime_ = ros::Duration(config.track_lifetime_with_no_detections); + + heartbeat_time_duration_ = ros::Duration(config.heartbeat_time); + publish_empty_ = config.publish_empty; + + window_size_ = config.window_size; + for (std::map::iterator it = track_map_.begin(); it != track_map_.end(); ++it) + { + TrackInfo & track_info = it->second; + track_info.track_->setWindowSize(window_size_); + } + + config.position_lifetime = std::max(config.position_lifetime, rate_.expectedCycleTime().toSec()); + position_lifetime_ = ros::Duration(config.position_lifetime); + + max_history_size_ = config.max_history_size; + + } + + void trackingCallback(const opt_msgs::TrackArray::ConstPtr & msg) + { + for (size_t i = 0; i < msg->tracks.size(); ++i) + { + const opt_msgs::Track & track_msg = msg->tracks[i]; + + std::map::iterator it = track_map_.find(track_msg.id); + + if (it == track_map_.end()) // Track does not exist + { + TrackInfo track_info; + track_info.track_ = boost::make_shared(window_size_); + track_map_[track_msg.id] = track_info; + } + + TrackInfo & track_info = track_map_[track_msg.id]; + + track_info.track_->addPosition(Eigen::Array2d(track_msg.x, track_msg.y), msg->header.stamp); + track_info.last_msg_ = boost::make_shared(track_msg); + if (track_info.last_msg_->visibility < 2) + track_info.last_msg_time_ = msg->header.stamp; + } + } + + void spin() + { + ros::Time last_heartbeat_time = ros::Time::now(); + + while (ros::ok()) + { + ros::spinOnce(); + + opt_msgs::TrackArray track_msg; + visualization_msgs::MarkerArray marker_msg; + + int n = createMsg(track_msg, marker_msg); + ros::Time current_time = ros::Time::now(); + + config_mutex_.lock(); + if (publish_empty_ or n > 0) + { + tracking_pub_.publish(track_msg); + marker_array_pub_.publish(marker_msg); + history_pub_.publish(history_cloud_); + last_heartbeat_time = current_time; + } + else if (not publish_empty_) + { + // Publish a heartbeat message every 'heartbeat_time' seconds + if ((current_time - last_heartbeat_time) > heartbeat_time_duration_) + { + opt_msgs::TrackArray heartbeat_msg; + heartbeat_msg.header.stamp = current_time; + heartbeat_msg.header.frame_id = "heartbeat"; + tracking_pub_.publish(heartbeat_msg); + marker_array_pub_.publish(marker_msg); + history_pub_.publish(history_cloud_); + last_heartbeat_time = current_time; + } + } + config_mutex_.unlock(); + + rate_.sleep(); + } + } + +private: + + void generateColors() + { + for (size_t i = 0; i <= 4; ++i) + for (size_t j = 0; j <= 4; ++j) + for (size_t k = 0; k <= 4; ++k) + color_set_.push_back(Eigen::Vector3f(i * 0.25f, j * 0.25f, k * 0.25f)); + + std::random_shuffle(color_set_.begin(), color_set_.end()); + } + + void appendToHistory(const opt_msgs::Track & track_msg) + { +// if (data.last_msg.tracks[0].visibility == opt_msgs::Track::NOT_VISIBLE) +// return; + + config_mutex_.lock(); + if (history_cloud_->size() < max_history_size_) + { + pcl::PointXYZRGB point; + history_cloud_->push_back(point); + } + config_mutex_.unlock(); + + toPointXYZRGB(history_cloud_->points[history_cloud_index_], track_msg); + history_cloud_index_ = (history_cloud_index_ + 1) % max_history_size_; + } + + int createMsg(opt_msgs::TrackArray & track_msg, + visualization_msgs::MarkerArray & marker_msg) + { + int added = 0; + ros::Time now = ros::Time::now(); + + track_msg.header.stamp = now; + track_msg.header.frame_id = "world"; + + history_cloud_->header.stamp = now.toNSec() / 1000; + std::vector to_remove; + + for (std::map::iterator it = track_map_.begin(); it != track_map_.end(); ++it) + { + TrackInfo & track_info = it->second; + + config_mutex_.lock(); + bool ok = (now - track_info.last_msg_time_) < track_lifetime_; + config_mutex_.unlock(); + + if (ok) + { + track_info.track_->removeOldPositions(now - position_lifetime_); + + Eigen::Array2d position = track_info.track_->movingAveragePosition(); + track_info.last_msg_->x = position[0]; + track_info.last_msg_->y = position[1]; + + track_msg.tracks.push_back(*track_info.last_msg_); + createMarker(marker_msg, *track_info.last_msg_, track_msg.header); + appendToHistory(*track_info.last_msg_); + ++added; + + } + else + { + to_remove.push_back(it->first); + } + } + + for (size_t i = 0; i < to_remove.size(); ++i) + track_map_.erase(to_remove[i]); + + + return added; + } + + + void createMarker(visualization_msgs::MarkerArray & msg, + const opt_msgs::Track & track_msg, + const std_msgs::Header & header) + { +// if(track.visibility == Track::NOT_VISIBLE) +// return; + + visualization_msgs::Marker marker; + + marker.header.frame_id = header.frame_id; + marker.header.stamp = header.stamp; + + marker.ns = "people"; + marker.id = track_msg.id; + + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = track_msg.x; + marker.pose.position.y = track_msg.y; + marker.pose.position.z = 3 * track_msg.height / 4; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + + marker.color.r = color_set_[track_msg.id % color_set_.size()](2); + marker.color.g = color_set_[track_msg.id % color_set_.size()](1); + marker.color.b = color_set_[track_msg.id % color_set_.size()](0); + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0.2); + + msg.markers.push_back(marker); + + //------------------------------------ + + visualization_msgs::Marker text_marker; + + text_marker.header.frame_id = header.frame_id; + text_marker.header.stamp = header.stamp; + + text_marker.ns = "numbers"; + text_marker.id = track_msg.id; + + text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; + text_marker.action = visualization_msgs::Marker::ADD; + + std::stringstream ss; + ss << track_msg.id; + text_marker.text = ss.str(); + + text_marker.pose.position.x = track_msg.x; + text_marker.pose.position.y = track_msg.y; + text_marker.pose.position.z = track_msg.height + 0.1; + text_marker.pose.orientation.x = 0.0; + text_marker.pose.orientation.y = 0.0; + text_marker.pose.orientation.z = 0.0; + text_marker.pose.orientation.w = 1.0; + + text_marker.scale.x = 0.2; + text_marker.scale.y = 0.2; + text_marker.scale.z = 0.2; + + text_marker.color.r = color_set_[track_msg.id % color_set_.size()](2); + text_marker.color.g = color_set_[track_msg.id % color_set_.size()](1); + text_marker.color.b = color_set_[track_msg.id % color_set_.size()](0); + text_marker.color.a = 1.0; + + text_marker.lifetime = ros::Duration(0.2); + + msg.markers.push_back(text_marker); + } + + void toPointXYZRGB(pcl::PointXYZRGB & p, + const opt_msgs::Track & track_msg) + { +// if(track.visibility == Track::NOT_VISIBLE) +// return; + + p.x = float(track_msg.x); + p.y = float(track_msg.y); + p.z = float(3 * track_msg.height / 4); + uchar * rgb_ptr = reinterpret_cast(&p.rgb); + *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](0) * 255.0f); + *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](1) * 255.0f); + *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](2) * 255.0f); + } + + ros::NodeHandle node_handle_; + + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + ros::Subscriber tracking_sub_; + ros::Publisher tracking_pub_; + ros::Publisher marker_array_pub_; + ros::Publisher history_pub_; + + bool publish_empty_; + ros::Duration heartbeat_time_duration_; + + std::vector > color_set_; + + pcl::PointCloud::Ptr history_cloud_; + int max_history_size_; + size_t history_cloud_index_; + + ros::Rate rate_; + int window_size_; + + ros::Duration track_lifetime_; + ros::Duration position_lifetime_; + + std::map track_map_; + +}; + +} // namespace tracking +} // namespace open_ptrack + +using open_ptrack::tracking::MovingAverageSmoother; + +int main(int argc, char **argv) +{ + // Initialization: + ros::init(argc, argv, "tracking_filter"); + ros::NodeHandle nh("~"); + + MovingAverageSmoother smoother(nh); + smoother.spin(); + + return 0; +} diff --git a/tracking/launch/tracking/apps/tracker_filter_node.cpp b/tracking/launch/tracking/apps/tracker_filter_node.cpp new file mode 100644 index 0000000..cfebfff --- /dev/null +++ b/tracking/launch/tracking/apps/tracker_filter_node.cpp @@ -0,0 +1,500 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2014-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Filippo Basso [bassofil@dei.unipd.it] + * Matteo Munaro [matteo.munaro@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace open_ptrack +{ +namespace tracking +{ + +typedef ::tracking::TrackerSmootherConfig Config; +typedef ::dynamic_reconfigure::Server ReconfigureServer; + +struct LastData +{ + ros::Time last_visible_time; + opt_msgs::TrackArray last_msg; +}; + +struct TrackState +{ + Eigen::Array2d position_, velocity_, acceleration_; + + void predict(double delta_t, Eigen::Array2d & position, Eigen::Array2d & velocity, Eigen::Array2d & acceleration) + { + position = position_ + delta_t * (velocity_ + delta_t * 0.5 * acceleration_); + velocity = velocity_ + delta_t * acceleration_; + acceleration = acceleration_; + } + + void predict(double delta_t) + { + position_ += delta_t * (velocity_ + delta_t * 0.5 * acceleration_); + velocity_ += delta_t * acceleration_; + } + + void update(const Eigen::Array2d & measured_position, double delta_t, double alpha, double beta, double phi) + { + Eigen::Array2d residual = measured_position - position_; + position_ += alpha * residual; + velocity_ += beta / delta_t * residual; + acceleration_ += phi * 0.5 / (delta_t * delta_t) * residual; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + +}; + +class TrackSmoother +{ + +public: + + TrackSmoother(ros::NodeHandle & node_handle) + : node_handle_(node_handle), + reconfigure_server_(new ReconfigureServer(config_mutex_, node_handle_)), + rate_(ros::Rate(30.0)) + { + double rate_d, track_lifetime_with_no_detections, heartbeat_time; + + node_handle_.param("rate", rate_d, 30.0); + node_handle_.param("track_lifetime_with_no_detections", track_lifetime_with_no_detections, 1.0); + node_handle_.param("publish_empty", publish_empty_, true); + node_handle_.param("heartbeat_time", heartbeat_time, 5.0); + node_handle_.param("max_history_size", max_history_size_, 1000); + + double sigma_process, sigma_noise; + node_handle_.param("sigma_process", sigma_process, 1.0); + node_handle_.param("sigma_noise", sigma_noise, 0.0); + computeWeights(sigma_process, sigma_noise); + + double prediction_duration_d; + node_handle_.param("prediction_duration", prediction_duration_d, 0.1); + prediction_duration_ = ros::Duration(prediction_duration_d); + + generateColors(); + history_cloud_ = boost::make_shared >(); + history_cloud_index_ = 0; + history_cloud_->header.frame_id = "world"; + + time_alive_ = ros::Duration(track_lifetime_with_no_detections); + heartbeat_time_duration_ = ros::Duration(heartbeat_time); + + tracking_sub_ = node_handle_.subscribe("input", 100, &TrackSmoother::trackingCallback, this); + tracking_pub_ = node_handle_.advertise("output", 1); + marker_array_pub_ = node_handle_.advertise("markers_array", 1); + history_pub_ = node_handle_.advertise >("history", 1); + + rate_ = ros::Rate(rate_d); + + ReconfigureServer::CallbackType callback = boost::bind(&TrackSmoother::configCallback, this, _1, _2); + reconfigure_server_->setCallback(callback); + + } + + void configCallback(Config & config, uint32_t level) + { + //rate_ = ros::Rate(config.rate); + + config.track_lifetime_with_no_detections = std::max(config.track_lifetime_with_no_detections, rate_.expectedCycleTime().toSec()); + time_alive_ = ros::Duration(config.track_lifetime_with_no_detections); + + heartbeat_time_duration_ = ros::Duration(config.heartbeat_time); + publish_empty_ = config.publish_empty; + + computeWeights(config.sigma_process, config.sigma_noise); + prediction_duration_ = ros::Duration(config.prediction_duration); + + max_history_size_ = config.max_history_size; + + } + + void trackingCallback(const opt_msgs::TrackArray::ConstPtr & tracking_msg) + { + for (size_t i = 0; i < tracking_msg->tracks.size(); ++i) + { + opt_msgs::Track msg_track = tracking_msg->tracks[i]; + opt_msgs::TrackArray msg; + msg.header = tracking_msg->header; + + std::map::iterator it = last_data_map_.find(msg_track.id); + + if (it != last_data_map_.end()) // Track exists + { + LastData & last_data = it->second; + double delta_t = (msg.header.stamp - last_data.last_msg.header.stamp).toSec(); + if (delta_t <= 0) + break; + + if (state_map_.find(msg_track.id) != state_map_.end()) + { + TrackState & state = *state_map_[msg_track.id]; + state.predict(delta_t); + config_mutex_.lock(); + state.update(Eigen::Array2d(msg_track.x, msg_track.y), delta_t, alpha_, beta_, gamma_); + config_mutex_.unlock(); + } + else + { + boost::shared_ptr state = boost::make_shared(); + state->position_ = Eigen::Array2d(msg_track.x, msg_track.y); + state->velocity_ = Eigen::Array2d(0, 0);//(state->position_ - Eigen::Array2d(last_data.last_msg.tracks[0].x, last_data.last_msg.tracks[0].y)) / delta_t; + state->acceleration_ = Eigen::Array2d(0, 0); + state_map_[msg_track.id] = state; + } + + TrackState & state = *state_map_[msg_track.id]; + msg_track.x = state.position_[0]; + msg_track.y = state.position_[1]; + msg.tracks.push_back(msg_track); + + last_data.last_msg = msg; + if (msg_track.visibility != opt_msgs::Track::NOT_VISIBLE) + last_data.last_visible_time = msg.header.stamp; + + } + else// if (msg_track.visibility != opt_msgs::Track::NOT_VISIBLE) + { + LastData data; + msg.tracks.push_back(msg_track); + data.last_msg = msg; + data.last_visible_time = msg.header.stamp; + last_data_map_[msg_track.id] = data; + } + } + } + + void spin() + { + ros::Time last_heartbeat_time = ros::Time::now(); + + while (ros::ok()) + { + ros::spinOnce(); + + opt_msgs::TrackArray track_msg; + visualization_msgs::MarkerArray marker_msg; + + int n = createMsg(track_msg, marker_msg); + ros::Time current_time = ros::Time::now(); + + config_mutex_.lock(); + if (publish_empty_ or n > 0) + { + tracking_pub_.publish(track_msg); + marker_array_pub_.publish(marker_msg); + history_pub_.publish(history_cloud_); + last_heartbeat_time = current_time; + } + else if (not publish_empty_) + { + // Publish a heartbeat message every 'heartbeat_time' seconds + if ((current_time - last_heartbeat_time) > heartbeat_time_duration_) + { + opt_msgs::TrackArray heartbeat_msg; + heartbeat_msg.header.stamp = current_time; + heartbeat_msg.header.frame_id = "heartbeat"; + tracking_pub_.publish(heartbeat_msg); + marker_array_pub_.publish(marker_msg); + history_pub_.publish(history_cloud_); + last_heartbeat_time = current_time; + } + } + config_mutex_.unlock(); + + rate_.sleep(); + } + } + +private: + + void computeWeights(double sigma_process, double sigma_noise) + { + if (sigma_noise < 1e-5) + { + alpha_ = 1.0; + beta_ = gamma_ = 0.0; + } + else + { + double t = rate_.expectedCycleTime().toSec(); + double lambda = sigma_process * t * t / sigma_noise; + double b = lambda / 2.0 - 3.0; + double c = lambda / 2.0 + 3.0; + double p = c - b * b / 3.0; + double q = 2.0 * b * b * b / 27.0 - b * c / 3.0 - 1.0; + double v = std::sqrt(q * q + 4 * p * p * p / 27.0); + double z = -std::pow(q + v / 2.0, 1.0 / 3.0); + double s = z - p / (3.0 * z) - b / 3.0; + alpha_ = 1.0 - s * s; + beta_ = 2.0 * (1.0 - s) * (1.0 - s); + gamma_ = beta_ * beta_ / (2 * alpha_); + } + } + + void generateColors() + { + for (size_t i = 0; i <= 4; ++i) + for (size_t j = 0; j <= 4; ++j) + for (size_t k = 0; k <= 4; ++k) + color_set_.push_back(Eigen::Vector3f(i * 0.25f, j * 0.25f, k * 0.25f)); + + std::random_shuffle(color_set_.begin(), color_set_.end()); + } + + void appendToHistory(const LastData & data) + { +// if (data.last_msg.tracks[0].visibility == opt_msgs::Track::NOT_VISIBLE) +// return; + + config_mutex_.lock(); + if (history_cloud_->size() < max_history_size_) + { + pcl::PointXYZRGB point; + history_cloud_->push_back(point); + } + config_mutex_.unlock(); + + toPointXYZRGB(history_cloud_->points[history_cloud_index_], data); + history_cloud_index_ = (history_cloud_index_ + 1) % max_history_size_; + } + + int createMsg(opt_msgs::TrackArray & track_msg, + visualization_msgs::MarkerArray & marker_msg) + { + int added = 0; + ros::Time now = ros::Time::now(); + + track_msg.header.stamp = now; + history_cloud_->header.stamp = now.toNSec() / 1000; + std::vector to_remove; + + for (std::map::iterator it = last_data_map_.begin(); it != last_data_map_.end(); ++it) + { + opt_msgs::TrackArray & saved_msg = it->second.last_msg; + const ros::Time & time = it->second.last_visible_time; + + config_mutex_.lock(); + bool ok = (now - time) < time_alive_; + bool prediction = (now - time) < prediction_duration_; + config_mutex_.unlock(); + + if (ok) + { + if (state_map_.find(it->first) != state_map_.end() and prediction) + { + Eigen::Array2d position, velocity, acceleration; + state_map_[it->first]->predict((now - time).toSec(), position, velocity, acceleration); + saved_msg.tracks[0].x = position[0]; + saved_msg.tracks[0].y = position[1]; + track_msg.tracks.push_back(saved_msg.tracks[0]); + track_msg.header.frame_id = "world"; + createMarker(marker_msg, it->second); + appendToHistory(it->second); + ++added; + } + } + else + { + to_remove.push_back(saved_msg.tracks[0].id); + } + } + + for (size_t i = 0; i < to_remove.size(); ++i) + { + last_data_map_.erase(to_remove[i]); + state_map_.erase(to_remove[i]); + } + + return added; + } + + + void createMarker(visualization_msgs::MarkerArray & msg, + const LastData & data) + { + const opt_msgs::Track & track = data.last_msg.tracks[0]; + +// if(track.visibility == Track::NOT_VISIBLE) +// return; + + visualization_msgs::Marker marker; + + marker.header.frame_id = data.last_msg.header.frame_id; + marker.header.stamp = data.last_msg.header.stamp; + + marker.ns = "people"; + marker.id = track.id; + + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = track.x; + marker.pose.position.y = track.y; + marker.pose.position.z = track.height / 2; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + + marker.color.r = color_set_[track.id % color_set_.size()](2); + marker.color.g = color_set_[track.id % color_set_.size()](1); + marker.color.b = color_set_[track.id % color_set_.size()](0); + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0.2); + + msg.markers.push_back(marker); + + //------------------------------------ + + visualization_msgs::Marker text_marker; + + text_marker.header.frame_id = data.last_msg.header.frame_id; + text_marker.header.stamp = data.last_msg.header.stamp; + + text_marker.ns = "numbers"; + text_marker.id = track.id; + + text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; + text_marker.action = visualization_msgs::Marker::ADD; + + std::stringstream ss; + ss << track.id; + text_marker.text = ss.str(); + + text_marker.pose.position.x = track.x; + text_marker.pose.position.y = track.y; + text_marker.pose.position.z = track.height + 0.1; + text_marker.pose.orientation.x = 0.0; + text_marker.pose.orientation.y = 0.0; + text_marker.pose.orientation.z = 0.0; + text_marker.pose.orientation.w = 1.0; + + text_marker.scale.x = 0.2; + text_marker.scale.y = 0.2; + text_marker.scale.z = 0.2; + + text_marker.color.r = color_set_[track.id % color_set_.size()](2); + text_marker.color.g = color_set_[track.id % color_set_.size()](1); + text_marker.color.b = color_set_[track.id % color_set_.size()](0); + text_marker.color.a = 1.0; + + text_marker.lifetime = ros::Duration(0.2); + + msg.markers.push_back(text_marker); + } + + void toPointXYZRGB(pcl::PointXYZRGB & p, + const LastData & data) + { + const opt_msgs::Track & track = data.last_msg.tracks[0]; + +// if(track.visibility == Track::NOT_VISIBLE) +// return; + + p.x = float(track.x); + p.y = float(track.y); + p.z = float(track.height / 2); + uchar * rgb_ptr = reinterpret_cast(&p.rgb); + *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](0) * 255.0f); + *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](1) * 255.0f); + *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](2) * 255.0f); + } + + ros::NodeHandle node_handle_; + + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + ros::Subscriber tracking_sub_; + ros::Publisher tracking_pub_; + ros::Publisher marker_array_pub_; + ros::Publisher history_pub_; + + ros::Duration heartbeat_time_duration_; + + bool publish_empty_; + + std::map last_data_map_; + ros::Duration time_alive_; + std::vector > color_set_; + + pcl::PointCloud::Ptr history_cloud_; + int max_history_size_; + size_t history_cloud_index_; + + ros::Rate rate_; + double alpha_, beta_, gamma_; + ros::Duration prediction_duration_; + + std::map > state_map_; + +}; + +} // namespace tracking +} // namespace open_ptrack + +using open_ptrack::tracking::TrackSmoother; + +int main(int argc, char **argv) +{ + // Initialization: + ros::init(argc, argv, "tracking_filter"); + ros::NodeHandle nh("~"); + + TrackSmoother smoother(nh); + smoother.spin(); + + return 0; +} diff --git a/tracking/launch/tracking/apps/tracker_node.cpp b/tracking/launch/tracking/apps/tracker_node.cpp new file mode 100644 index 0000000..0111bd8 --- /dev/null +++ b/tracking/launch/tracking/apps/tracker_node.cpp @@ -0,0 +1,810 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +// Dynamic reconfigure: +#include +#include + +typedef tracking::TrackerConfig Config; +typedef dynamic_reconfigure::Server ReconfigureServer; + +// Global variables: +std::map detection_sources_map; +tf::TransformListener* tf_listener; +std::string world_frame_id; +bool output_history_pointcloud; +int output_history_size; +int detection_history_size; +bool output_markers; +bool output_image_rgb; +bool output_tracking_results; +bool output_detection_results; // Enables/disables the publishing of detection positions to be visualized in RViz +bool vertical; +ros::Publisher results_pub; +ros::Publisher marker_pub_tmp; +ros::Publisher marker_pub; +ros::Publisher pointcloud_pub; +ros::Publisher detection_marker_pub; +ros::Publisher detection_trajectory_pub; +ros::Publisher alive_ids_pub; +size_t starting_index; +size_t detection_insert_index; +tf::Transform camera_frame_to_world_transform; +tf::Transform world_to_camera_frame_transform; +bool extrinsic_calibration; +double period; +open_ptrack::tracking::Tracker* tracker; +pcl::PointCloud::Ptr history_pointcloud(new pcl::PointCloud); +pcl::PointCloud::Ptr detection_history_pointcloud(new pcl::PointCloud); +bool swissranger; +double min_confidence; +double min_confidence_sr; +double min_confidence_detections; +double min_confidence_detections_sr; +std::vector camera_colors; // vector containing colors to use to identify cameras in the network +std::map color_map; // map between camera frame_id and color +// Chi square distribution +std::map chi_map; +bool velocity_in_motion_term; +double acceleration_variance; +double position_variance_weight; +double voxel_size; +double gate_distance; +bool calibration_refinement; +std::map registration_matrices; +double max_detection_delay; +ros::Time latest_time; + +std::map last_received_detection_; +ros::Duration max_time_between_detections_; + +std::map > number_messages_delay_map_; + +/** + * \brief Create marker to be visualized in RViz + * + * \param[in] id The marker ID. + * \param[in] frame_id The marker reference frame. + * \param[in] position The marker position. + * \param[in] color The marker color. + */ +visualization_msgs::Marker +createMarker (int id, std::string frame_id, ros::Time stamp, Eigen::Vector3d position, cv::Vec3f color) +{ + visualization_msgs::Marker marker; + + marker.header.frame_id = world_frame_id; + marker.header.stamp = stamp; + marker.ns = frame_id; + marker.id = id; + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + marker.pose.position.x = position(0); + marker.pose.position.y = position(1); + marker.pose.position.z = position(2); + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + marker.color.r = color(0); + marker.color.g = color(1); + marker.color.b = color(2); + marker.color.a = 1.0; + marker.lifetime = ros::Duration(0.2); + + return marker; +} + +void +plotCameraLegend (std::map curr_color_map) +{ + // Compose camera legend: + cv::Mat legend_image = cv::Mat::zeros(500, 500, CV_8UC3); + for(std::map::iterator colormap_iterator = curr_color_map.begin(); colormap_iterator != curr_color_map.end(); colormap_iterator++) + { + int color_index = colormap_iterator->second; + cv::Vec3f color = camera_colors[color_index]; + int y_coord = color_index * legend_image.rows / (curr_color_map.size()+1) + 0.5 * legend_image.rows / (curr_color_map.size()+1); + cv::line(legend_image, cv::Point(0,y_coord), cv::Point(100,y_coord), cv::Scalar(255*color(2), 255*color(1), 255*color(0)), 8); + cv::putText(legend_image, colormap_iterator->first, cv::Point(110,y_coord), 1, 1, cv::Scalar(255, 255, 255), 1); + } + + // Display the cv image + cv::imshow("Camera legend", legend_image); + cv::waitKey(1); +} + +Eigen::Matrix4d +readMatrixFromFile (std::string filename) +{ + Eigen::Matrix4d matrix; + std::string line; + std::ifstream myfile (filename.c_str()); + if (myfile.is_open()) + { + int k = 0; + std::string number; + while (myfile >> number) + { + matrix(int(k/4), int(k%4)) = std::atof(number.c_str()); + k++; + } + myfile.close(); + } + + std::cout << matrix << std::endl; + + return matrix; +} + +/** + * \brief Read the DetectionArray message and use the detections for creating/updating/deleting tracks + * + * \param[in] msg the DetectionArray message. + */ +void +detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) +{ + // Read message header information: + std::string frame_id = msg->header.frame_id; + ros::Time frame_time = msg->header.stamp; + + std::string frame_id_tmp = frame_id; + int pos = frame_id_tmp.find("_rgb_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_rgb_optical_frame").size(), ""); + pos = frame_id_tmp.find("_depth_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_depth_optical_frame").size(), ""); + last_received_detection_[frame_id_tmp] = frame_time; + + // Compute delay of detection message, if any: + double time_delay = 0.0; + if (frame_time > latest_time) + { + latest_time = frame_time; + time_delay = 0.0; + } + else + { + time_delay = (latest_time - frame_time).toSec(); + } + + tf::StampedTransform transform; + tf::StampedTransform inverse_transform; + // cv_bridge::CvImage::Ptr cvPtr; + + try + { + // Read transforms between camera frame and world frame: + if (!extrinsic_calibration) + { + static tf::TransformBroadcaster world_to_camera_tf_publisher; +// world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(camera_frame_to_world_transform, ros::Time::now(), world_frame_id, frame_id)); + world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(world_to_camera_frame_transform, ros::Time::now(), frame_id, world_frame_id)); + } + + //Calculate direct and inverse transforms between camera and world frame: + tf_listener->lookupTransform(world_frame_id, frame_id, ros::Time(0), transform); + tf_listener->lookupTransform(frame_id, world_frame_id, ros::Time(0), inverse_transform); + + // cvPtr = cv_bridge::toCvCopy(msg->image, sensor_msgs::image_encodings::BGR8); + + // Read camera intrinsic parameters: + Eigen::Matrix3d intrinsic_matrix; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + intrinsic_matrix(i, j) = msg->intrinsic_matrix[i * 3 + j]; + + // Add a new DetectionSource or update an existing one: + if(detection_sources_map.find(frame_id) == detection_sources_map.end()) + { + detection_sources_map[frame_id] = new open_ptrack::detection::DetectionSource(cv::Mat(0, 0, CV_8UC3), + transform, inverse_transform, intrinsic_matrix, frame_time, frame_id); + } + else + { + detection_sources_map[frame_id]->update(cv::Mat(0, 0, CV_8UC3), transform, inverse_transform, + intrinsic_matrix, frame_time, frame_id); + double d = detection_sources_map[frame_id]->getDuration().toSec() / period; + int lostFrames = int(round(d)) - 1; + } + open_ptrack::detection::DetectionSource* source = detection_sources_map[frame_id]; + + // Create a Detection object for every detection in the detection message: + std::vector detections_vector; + for(std::vector::const_iterator it = msg->detections.begin(); + it != msg->detections.end(); it++) + { + detections_vector.push_back(open_ptrack::detection::Detection(*it, source)); + } + + // Convert HOG+SVM confidences to HAAR+ADABOOST-like people detection confidences: + if (not std::strcmp(msg->confidence_type.c_str(), "hog+svm")) + { + for(unsigned int i = 0; i < detections_vector.size(); i++) + { +// double new_confidence = detections_vector[i].getConfidence(); +// new_confidence = (new_confidence - min_confidence_detections_sr) / (min_confidence_sr - min_confidence_detections_sr) * +// (min_confidence - min_confidence_detections) + min_confidence_detections; +// detections_vector[i].setConfidence(new_confidence+2); + + double new_confidence = detections_vector[i].getConfidence(); + new_confidence = (new_confidence - (-3)) / 3 * 4 + 2; + detections_vector[i].setConfidence(new_confidence); + + //std::cout << detections_vector[i].getConfidence() << std::endl; + } + } + + // Detection correction by means of calibration refinement: + if (calibration_refinement) + { + if (strcmp(frame_id.substr(0,1).c_str(), "/") == 0) + { + frame_id = frame_id.substr(1, frame_id.size() - 1); + } + + Eigen::Matrix4d registration_matrix; + std::map::iterator registration_matrices_iterator = registration_matrices.find(frame_id); + if (registration_matrices_iterator != registration_matrices.end()) + { // camera already present + registration_matrix = registration_matrices_iterator->second; + } + else + { // camera not present + std::cout << "Reading refinement matrix of " << frame_id << " from file." << std::endl; + std::string refinement_filename = ros::package::getPath("opt_calibration") + "/conf/registration_" + frame_id + ".txt"; + std::ifstream f(refinement_filename.c_str()); + if (f.good()) // if the file exists + { + f.close(); + registration_matrix = readMatrixFromFile (refinement_filename); + registration_matrices.insert(std::pair (frame_id, registration_matrix)); + } + else // if the file does not exist + { + // insert the identity matrix + std::cout << "Refinement file not found! Not doing refinement for this sensor." << std::endl; + registration_matrices.insert(std::pair (frame_id, Eigen::Matrix4d::Identity())); + } + } + + if(detections_vector.size() > 0) + { + // Apply detection refinement: + for(unsigned int i = 0; i < detections_vector.size(); i++) + { + Eigen::Vector3d old_centroid = detections_vector[i].getWorldCentroid(); + +// std::cout << frame_id << std::endl; +// std::cout << registration_matrix << std::endl; +// std::cout << "old_centroid: " << old_centroid.transpose() << std::endl; + Eigen::Vector4d old_centroid_homogeneous(old_centroid(0), old_centroid(1), old_centroid(2), 1.0); + Eigen::Vector4d refined_centroid = registration_matrix * old_centroid_homogeneous; + detections_vector[i].setWorldCentroid(Eigen::Vector3d(refined_centroid(0), refined_centroid(1), refined_centroid(2))); + + Eigen::Vector3d refined_centroid2 = detections_vector[i].getWorldCentroid(); +// std::cout << "refined_centroid2: " << refined_centroid2.transpose() << std::endl; +// std::cout << "difference: " << (refined_centroid2 - old_centroid).transpose() << std::endl << std::endl; + } + } + } + + // If at least one detection has been received: + if((detections_vector.size() > 0) && (time_delay < max_detection_delay)) + { + // Perform detection-track association: + tracker->newFrame(detections_vector); + tracker->updateTracks(); + + // Create a TrackingResult message with the output of the tracking process + if(output_tracking_results) + { + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = ros::Time::now();//frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + tracker->toMsg(tracking_results_msg); + // Publish tracking message: + results_pub.publish(tracking_results_msg); + } + +// //Show the tracking process' results as an image +// if(output_image_rgb) +// { +// tracker->drawRgb(); +// for(std::map::iterator +// it = detection_sources_map.begin(); it != detection_sources_map.end(); it++) +// { +// cv::Mat image_to_show = it->second->getImage(); +// if (not vertical) +// { +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// else +// { +// cv::flip(image_to_show.t(), image_to_show, -1); +// cv::flip(image_to_show, image_to_show, 1); +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// cv::waitKey(2); +// } +// } + + // Publish IDs of active tracks: + opt_msgs::IDArray::Ptr alive_ids_msg(new opt_msgs::IDArray); + alive_ids_msg->header.stamp = ros::Time::now(); + alive_ids_msg->header.frame_id = world_frame_id; + tracker->getAliveIDs (alive_ids_msg); + alive_ids_pub.publish (alive_ids_msg); + + // Show the pose of each tracked object with a 3D marker (to be visualized with ROS RViz) + if(output_markers) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + tracker->toMarkerArray(marker_msg); + marker_pub.publish(marker_msg); + } + + // Show the history of the movements in 3D (3D trajectory) of each tracked object as a PointCloud (which can be visualized in RViz) + if(output_history_pointcloud) + { + history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + history_pointcloud->header.frame_id = world_frame_id; + starting_index = tracker->appendToPointCloud(history_pointcloud, starting_index, + output_history_size); + pointcloud_pub.publish(history_pointcloud); + } + + // Create message for showing detection positions in RViz: + if (output_detection_results) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + detection_history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + detection_history_pointcloud->header.frame_id = world_frame_id; + std::string frame_id = detections_vector[0].getSource()->getFrameId(); + + // Define color: + int color_index; + std::map::iterator colormap_iterator = color_map.find(frame_id); + if (colormap_iterator != color_map.end()) + { // camera already present + color_index = colormap_iterator->second; + } + else + { // camera not present + color_index = color_map.size(); + color_map.insert(std::pair (frame_id, color_index)); + + // Plot legend with camera names and colors: + plotCameraLegend (color_map); + } + for (unsigned int i = 0; i < detections_vector.size(); i++) + { + // Create marker and add it to message: + Eigen::Vector3d centroid = detections_vector[i].getWorldCentroid(); + visualization_msgs::Marker marker = createMarker (i, frame_id, frame_time, centroid, camera_colors[color_index]); + marker_msg->markers.push_back(marker); + + // Point cloud: + pcl::PointXYZRGB point; + point.x = marker.pose.position.x; + point.y = marker.pose.position.y; + point.z = marker.pose.position.z; + point.r = marker.color.r * 255.0f; + point.g = marker.color.g * 255.0f; + point.b = marker.color.b * 255.0f; + detection_insert_index = (detection_insert_index + 1) % detection_history_size; + detection_history_pointcloud->points[detection_insert_index] = point; + } + detection_marker_pub.publish(marker_msg); // publish marker message + detection_trajectory_pub.publish(detection_history_pointcloud); // publish trajectory message + } + } + else // if no detections have been received or detection_delay is above max_detection_delay + { + if(output_tracking_results) + { // Publish an empty tracking message + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + results_pub.publish(tracking_results_msg); + } + if((detections_vector.size() > 0) && (time_delay >= max_detection_delay)) + { + if (number_messages_delay_map_.find(msg->header.frame_id) == number_messages_delay_map_.end()) + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + + number_messages_delay_map_[msg->header.frame_id].first += time_delay; + number_messages_delay_map_[msg->header.frame_id].second++; + + if (number_messages_delay_map_[msg->header.frame_id].second == 100) + { + double avg = number_messages_delay_map_[msg->header.frame_id].first / number_messages_delay_map_[msg->header.frame_id].second; + ROS_WARN_STREAM("[" << msg->header.frame_id << "] received 100 detections with average delay " << avg << " > " << max_detection_delay); + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + } + } + } + } +// catch(cv_bridge::Exception& ex) +// { +// ROS_ERROR("cv_bridge exception: %s", ex.what()); +// } + catch(tf::TransformException& ex) + { + ROS_ERROR("transform exception: %s", ex.what()); + } +} + +void +generateColors(int colors_number, std::vector& colors) +{ + for (unsigned int i = 0; i < colors_number; i++) + { + colors.push_back(cv::Vec3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255)); + } +} + +void +fillChiMap(std::map& chi_map, bool velocity_in_motion_term) +{ + if (velocity_in_motion_term) // chi square values with state dimension = 4 + { + chi_map[0.5] = 3.357; + chi_map[0.75] = 5.385; + chi_map[0.8] = 5.989; + chi_map[0.9] = 7.779; + chi_map[0.95] = 9.488; + chi_map[0.98] = 11.668; + chi_map[0.99] = 13.277; + chi_map[0.995] = 14.860; + chi_map[0.998] = 16.924; + chi_map[0.999] = 18.467; + } + else // chi square values with state dimension = 2 + { + chi_map[0.5] = 1.386; + chi_map[0.75] = 2.773; + chi_map[0.8] = 3.219; + chi_map[0.9] = 4.605; + chi_map[0.95] = 5.991; + chi_map[0.98] = 7.824; + chi_map[0.99] = 9.210; + chi_map[0.995] = 10.597; + chi_map[0.998] = 12.429; + chi_map[0.999] = 13.816; + } +} + +void +configCb(Config &config, uint32_t level) +{ + tracker->setMinConfidenceForTrackInitialization (config.min_confidence_initialization); + max_detection_delay = config.max_detection_delay; + calibration_refinement = config.calibration_refinement; + tracker->setSecBeforeOld (config.sec_before_old); + tracker->setSecBeforeFake (config.sec_before_fake); + tracker->setSecRemainNew (config.sec_remain_new); + tracker->setDetectionsToValidate (config.detections_to_validate); + tracker->setDetectorLikelihood (config.detector_likelihood); + tracker->setLikelihoodWeights (config.detector_weight*chi_map[0.999]/18.467, config.motion_weight); + +// if (config.velocity_in_motion_term != velocity_in_motion_term) +// { +// // Take chi square values with regards to the state dimension: +// fillChiMap(chi_map, config.velocity_in_motion_term); +// +// double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; +// tracker->setVelocityInMotionTerm (config.velocity_in_motion_term, config.acceleration_variance, position_variance); +// } +// else +// { + if (config.acceleration_variance != acceleration_variance) + { + tracker->setAccelerationVariance (config.acceleration_variance); + } + + if (config.position_variance_weight != position_variance_weight) + { + double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + tracker->setPositionVariance (position_variance); + } +// } + + gate_distance = chi_map.find(config.gate_distance_probability) != chi_map.end() ? chi_map[config.gate_distance_probability] : chi_map[0.999]; + tracker->setGateDistance (config.gate_distance_probability); +} + +int +main(int argc, char** argv) +{ + + ros::init(argc, argv, "tracker"); + ros::NodeHandle nh("~"); + + // Subscribers/Publishers: + ros::Subscriber input_sub = nh.subscribe("input", 5, detection_cb); + marker_pub_tmp = nh.advertise("/tracker/markers", 1); + marker_pub = nh.advertise("/tracker/markers_array", 1); + pointcloud_pub = nh.advertise >("/tracker/history", 1); + results_pub = nh.advertise("/tracker/tracks", 100); + detection_marker_pub = nh.advertise("/detector/markers_array", 1); + detection_trajectory_pub = nh.advertise >("/detector/history", 1); + alive_ids_pub = nh.advertise("/tracker/alive_ids", 1); + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + tf_listener = new tf::TransformListener(); + + // Read tracking parameters: + nh.param("world_frame_id", world_frame_id, std::string("/odom")); + + nh.param("orientation/vertical", vertical, false); + nh.param("extrinsic_calibration", extrinsic_calibration, false); + + nh.param("voxel_size", voxel_size, 0.06); + + double rate; + nh.param("rate", rate, 30.0); + +// double min_confidence; + nh.param("min_confidence_initialization", min_confidence, -2.5); //0.0); + + double chi_value; + nh.param("gate_distance_probability", chi_value, 0.9); + + nh.param("acceleration_variance", acceleration_variance, 1.0); + + nh.param("position_variance_weight", position_variance_weight, 1.0); + + bool detector_likelihood; + nh.param("detector_likelihood", detector_likelihood, false); + + nh.param("velocity_in_motion_term", velocity_in_motion_term, false); + + double detector_weight; + nh.param("detector_weight", detector_weight, -1.0); + + double motion_weight; + nh.param("motion_weight", motion_weight, 0.5); + + double sec_before_old; + nh.param("sec_before_old", sec_before_old, 3.6); + + double sec_before_fake; + nh.param("sec_before_fake", sec_before_fake, 2.4); + + double sec_remain_new; + nh.param("sec_remain_new", sec_remain_new, 1.2); + + int detections_to_validate; + nh.param("detections_to_validate", detections_to_validate, 5); + + double haar_disp_ada_min_confidence, ground_based_people_detection_min_confidence; + nh.param("haar_disp_ada_min_confidence", haar_disp_ada_min_confidence, -2.5); //0.0); + nh.param("ground_based_people_detection_min_confidence", ground_based_people_detection_min_confidence, -2.5); //0.0); + + nh.param("swissranger", swissranger, false); + + nh.param("ground_based_people_detection_min_confidence_sr", min_confidence_detections_sr, -1.5); + nh.param("min_confidence_initialization_sr", min_confidence_sr, -1.1); + + nh.param("history_pointcloud", output_history_pointcloud, false); + nh.param("history_size", output_history_size, 1000); + nh.param("markers", output_markers, true); + nh.param("image_rgb", output_image_rgb, true); + nh.param("tracking_results", output_tracking_results, true); + + nh.param("detection_debug", output_detection_results, true); + nh.param("detection_history_size", detection_history_size, 1000); + + bool debug_mode; + nh.param("debug_active", debug_mode, false); + + nh.param("calibration_refinement", calibration_refinement, false); + nh.param("max_detection_delay", max_detection_delay, 3.0); + + double max_time_between_detections_d; + nh.param("max_time_between_detections", max_time_between_detections_d, 10.0); + max_time_between_detections_ = ros::Duration(max_time_between_detections_d); + + // Read number of sensors in the network: + int num_cameras = 1; + if (extrinsic_calibration) + { + num_cameras = 0; + XmlRpc::XmlRpcValue network; + nh.getParam("network", network); + for (unsigned i = 0; i < network.size(); i++) + { + num_cameras += network[i]["sensors"].size(); + for (unsigned j = 0; j < network[i]["sensors"].size(); j++) + { + std::string frame_id = network[i]["sensors"][j]["id"]; + last_received_detection_["/" + frame_id] = ros::Time(0); + } + } + } + + // Set min_confidence_detections variable based on sensor type: + if (swissranger) + min_confidence_detections = ground_based_people_detection_min_confidence; + else + min_confidence_detections = haar_disp_ada_min_confidence; + + // Take chi square values with regards to the state dimension: + fillChiMap(chi_map, velocity_in_motion_term); + + // Compute additional parameters: + period = 1.0 / rate; + gate_distance = chi_map.find(chi_value) != chi_map.end() ? chi_map[chi_value] : chi_map[0.999]; + + double position_variance; +// position_variance = 3*std::pow(2 * voxel_size, 2) / 12.0; // DEFAULT + position_variance = position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + std::vector likelihood_weights; + likelihood_weights.push_back(detector_weight*chi_map[0.999]/18.467); + likelihood_weights.push_back(motion_weight); + + // Generate colors used to identify different cameras: + generateColors(num_cameras, camera_colors); + + // Initialize point cloud containing detections trajectory: + pcl::PointXYZRGB nan_point; + nan_point.x = std::numeric_limits::quiet_NaN(); + nan_point.y = std::numeric_limits::quiet_NaN(); + nan_point.z = std::numeric_limits::quiet_NaN(); + detection_history_pointcloud->points.resize(detection_history_size, nan_point); + + ros::Rate hz(num_cameras*rate); + +// cv::namedWindow("TRACKER ", CV_WINDOW_NORMAL); + + // Initialize an instance of the Tracker object: + tracker = new open_ptrack::tracking::Tracker( + gate_distance, + detector_likelihood, + likelihood_weights, + velocity_in_motion_term, + min_confidence, + min_confidence_detections, + sec_before_old, + sec_before_fake, + sec_remain_new, + detections_to_validate, + period, + position_variance, + acceleration_variance, + world_frame_id, + debug_mode, + vertical); + + starting_index = 0; + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); + reconfigure_server_->setCallback(f); + + // If extrinsic calibration is not available: + if (!extrinsic_calibration) + { // Set fixed transformation from rgb frame and base_link + tf::Vector3 fixed_translation(0, 0, 0); // camera_rgb_optical_frame -> world + tf::Quaternion fixed_rotation(-0.5, 0.5, -0.5, -0.5); // camera_rgb_optical_frame -> world + tf::Vector3 inv_fixed_translation(0.0, 0.0, 0); // world -> camera_rgb_optical_frame + tf::Quaternion inv_fixed_rotation(-0.5, 0.5, -0.5, 0.5); // world -> camera_rgb_optical_frame + world_to_camera_frame_transform = tf::Transform(fixed_rotation, fixed_translation); + camera_frame_to_world_transform = tf::Transform(inv_fixed_rotation, inv_fixed_translation); + } + + // Spin and execute callbacks: +// ros::spin(); + + std::map last_message; + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + last_message[it->first] = ros::Time::now(); + + ros::Time last_camera_legend_update = ros::Time::now(); // last time when the camera legend has been updated + + while (ros::ok()) + { + ros::spinOnce(); + ros::Time now = ros::Time::now(); + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + { + ros::Duration duration(now - it->second); + if (duration > max_time_between_detections_) + { + if (it->second > ros::Time(0) and now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] last detection was " << duration.toSec() << " seconds ago"); + last_message[it->first] = now; + } + else if (now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] still waiting for detection messages..."); + last_message[it->first] = now; + } + } + + // Update camera legend every second: + if ((now - last_camera_legend_update) > ros::Duration(1.0)) // if more than one second passed since last update + { // update OpenCV image with a waitKey: + cv::waitKey(1); + last_camera_legend_update = now; + } + } + hz.sleep(); + } + + return 0; +} diff --git a/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg b/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg new file mode 100644 index 0000000..8e9739a --- /dev/null +++ b/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg @@ -0,0 +1,40 @@ +#! /usr/bin/env python + +# Declare parameters that control the track smoother + +PACKAGE='tracking' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +######################### +## Tracking parameters ## +######################### +# Rate at which messages are published: +#gen.add("rate", double_t, 0, "Rate at which messages are published", 30.0, 0.0, 100.0) +# Time lapse (after last detection) before a track is removed: +gen.add("track_lifetime_with_no_detections", double_t, 0, "Time lapse (after last detection) before a track is removed", 0.5, 0.0, 5.0) +# Interval between two heartbeat messages: +gen.add("heartbeat_time", double_t, 0, "Interval between two heartbeat messages", 5.0, 1.0, 20.0) +# Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published): +gen.add("publish_empty", bool_t, 0, "Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published)", True) + +########################## +## Smoothing parameters ## +########################## +# Number of positions to account when calculating the average position: +gen.add("window_size", int_t, 0, "Number of positions to account when calculating the average position", 10, 1, 100) +# Time lapse before a position is removed from the position list. This option removes old data even if they are within the averaging window: +gen.add("position_lifetime", double_t, 0, "Time lapse before a position is removed from the position list", 1.0, 0.0, 5.0) + +########### +## Debug ## +########### +# Dimension of the point cloud containing track trajectories: +gen.add("max_history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) + +# First string value is node name, used only for generating documentation +# Second string value ("TrackerSmoother") is name of class and generated +# .h file, with "Config" added, so class TrackerSmootherConfig +exit(gen.generate(PACKAGE, "moving_average_smoother", "MovingAverageSmoother")) diff --git a/tracking/launch/tracking/cfg/Tracker.cfg b/tracking/launch/tracking/cfg/Tracker.cfg new file mode 100644 index 0000000..b515734 --- /dev/null +++ b/tracking/launch/tracking/cfg/Tracker.cfg @@ -0,0 +1,78 @@ +#! /usr/bin/env python + +# Declare parameters that control people tracking + +PACKAGE='tracking' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +gen.add("min_confidence_initialization", double_t, 0, "Mininum confidence for track initialization", 4.0, -10.0, 10.0) +# Maximum delay that a detection message can have in order to be considered for tracking: +gen.add("max_detection_delay", double_t, 0, "Maximum delay that a detection message can have in order to be considered for tracking", 2.0, 0.0, 3.0) +# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: +gen.add("calibration_refinement", bool_t, 0, "Flag stating if the results of a calibration refinement procedure should be used to correct detection positions", False) + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_enum = gen.enum([ gen.const("0", double_t, 0.5, "0.5"), + gen.const("1", double_t, 0.75, "0.75"), + gen.const("2", double_t, 0.8, "0.8"), + gen.const("3", double_t, 0.9, "0.9"), + gen.const("4", double_t, 0.95, "0.95"), + gen.const("5", double_t, 0.98, "0.98"), + gen.const("6", double_t, 0.99, "0.99"), + gen.const("7", double_t, 0.995, "0.995"), + gen.const("8", double_t, 0.998, "0.998"), + gen.const("9", double_t, 0.999, "0.999")], + "An enum to set gate_distance_probability") +gen.add("gate_distance_probability", double_t, 0, "The higher is this, the higher motion variance is allowed", 0.99, 0.5, 0.999, edit_method=gate_distance_enum) +# Acceleration variance in people motion model: +gen.add("acceleration_variance", double_t, 0, "Acceleration variance in people motion model", 100, 0.1, 1000) +# Additional weight for position variance in people motion model: +gen.add("position_variance_weight", double_t, 0, "Additional weight for position variance in people motion model", 30, 1, 100) + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +gen.add("detector_likelihood", bool_t, 0, "Flag stating if detection likelihood should be used in data association", True) +# Weight of detection likelihood in data association: +gen.add("detector_weight", double_t, 0, "Weight of detection likelihood in data association", -0.25, -10.0, 0.0) +# Flag stating if track velocity should be considered in data association: +#gen.add("velocity_in_motion_term", bool_t, 0, " Flag stating if track velocity should be considered in data association", True) +# Weight of motion likelihood in data association: +gen.add("motion_weight", double_t, 0, "Weight of motion likelihood in data association", 0.25, 0.0, 10.0) + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +gen.add("sec_before_old", double_t, 0, "Track duration (seconds) after last valid detection", 8.0, 0.0, 100.0) +# Seconds within which a track should be validated (otherwise it is discarded): +gen.add("sec_before_fake", double_t, 0, "Seconds within which a track should be validated (otherwise it is discarded)", 2.4, 0.0, 10.0) +# Seconds after which a visible track obtain NORMAL status: +gen.add("sec_remain_new", double_t, 0, "Seconds after which a visible track obtain NORMAL status", 1.2, 0.0, 10.0) +# Minimum number of detection<->track associations needed for validating a track: +gen.add("detections_to_validate", int_t, 0, "Minimum number of detection<->track associations needed for validating a track", 3, 1, 20) + +########### +## Debug ## +########### +# Dimension of the point cloud containing track trajectories: +#gen.add("history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) +# Dimension of the point cloud containing detection trajectories: +#gen.add("detection_history_size", int_t, 0, "Dimension of the point cloud containing detection trajectories", 500, 1, 10000) + +# First string value is node name, used only for generating documentation +# Second string value ("Tracker") is name of class and generated +# .h file, with "Config" added, so class TrackerConfig +exit(gen.generate(PACKAGE, "tracking", "Tracker")) diff --git a/tracking/launch/tracking/cfg/TrackerSmoother.cfg b/tracking/launch/tracking/cfg/TrackerSmoother.cfg new file mode 100644 index 0000000..0344c49 --- /dev/null +++ b/tracking/launch/tracking/cfg/TrackerSmoother.cfg @@ -0,0 +1,42 @@ +#! /usr/bin/env python + +# Declare parameters that control the track smoother + +PACKAGE='tracking' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +######################### +## Tracking parameters ## +######################### +# Rate at which messages are published: +#gen.add("rate", double_t, 0, "Rate at which messages are published", 30.0, 0.0, 100.0) +# Time lapse (after last detection) before a track is removed: +gen.add("track_lifetime_with_no_detections", double_t, 0, "Time lapse (after last detection) before a track is removed", 0.5, 0.0, 5.0) +# Interval between two heartbeat messages: +gen.add("heartbeat_time", double_t, 0, "Interval between two heartbeat messages", 5.0, 1.0, 20.0) +# Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published): +gen.add("publish_empty", bool_t, 0, "Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published)", True) + +########################## +## Smoothing parameters ## +########################## +# Error of the measurements provided by the tracker (0.0 means no error and therefore no smoothing will be applied): +gen.add("sigma_noise", double_t, 0, "Error of the measurements provided by the tracker (0.0 = no error: no smoothing will be applied)", 0.20, 0.0, 1.0) +# How much the acceleration of a track can vary (small values = high smoothing): +gen.add("sigma_process", double_t, 0, "How much the acceleration of a track can vary (small values = high smoothing)", 0.15, 0.0001, 1.0) +# Time lapse (after last detection) that a the position of a track is predicted and published: +gen.add("prediction_duration", double_t, 0, "Time lapse (after last detection) that a the position of a track is predicted and published", 0.1, 0.0, 10.0) + +########### +## Debug ## +########### +# Dimension of the point cloud containing track trajectories: +gen.add("max_history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) + +# First string value is node name, used only for generating documentation +# Second string value ("TrackerSmoother") is name of class and generated +# .h file, with "Config" added, so class TrackerSmootherConfig +exit(gen.generate(PACKAGE, "tracker_smoother", "TrackerSmoother")) diff --git a/tracking/launch/tracking/conf/moving_average_filter.yaml b/tracking/launch/tracking/conf/moving_average_filter.yaml new file mode 100644 index 0000000..7c0e001 --- /dev/null +++ b/tracking/launch/tracking/conf/moving_average_filter.yaml @@ -0,0 +1,26 @@ +##################### +## Node parameters ## +##################### +# Tracking output frame rate: +rate: 30.0 +# Time lapse (after last detection) before a track is removed: +track_lifetime_with_no_detections: 1.0 +# Interval between two heartbeat messages: +heartbeat_time: 5.0 +# Flag stating if empty tracking messages should be published or not: +publish_empty: true + +########################## +## Smoothing parameters ## +########################## +# Number of positions to account when calculating the average position: +window_size: 10 +# Time lapse before a position is removed from the position list. This option removes old data even if they are within the averaging window: +position_lifetime: 1.0 + +########### +## Debug ## +########### +# Max size of the history pointcloud: +max_history_size: 1000 + diff --git a/tracking/launch/tracking/conf/multicamera_tracking.rviz b/tracking/launch/tracking/conf/multicamera_tracking.rviz new file mode 100644 index 0000000..868f31a --- /dev/null +++ b/tracking/launch/tracking/conf/multicamera_tracking.rviz @@ -0,0 +1,180 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /PointCloud21 + - /MarkerArray1 + Splitter Ratio: 0.526846 + Tree Height: 775 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.03 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 5 + Size (m): 0.01 + Style: Points + Topic: /tracker/history_smoothed + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/markers_array_smoothed + Name: MarkerArray + Namespaces: + numbers: true + people: true + Queue Size: 100 + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: false + Kinect: + Value: true + Kinect_depth_frame: + Value: true + Kinect_depth_optical_frame: + Value: true + Kinect_link: + Value: true + Kinect_rgb_frame: + Value: true + Kinect_rgb_optical_frame: + Value: true + world: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: true + Tree: + world: + Kinect: + Kinect_link: + Kinect_depth_frame: + Kinect_depth_optical_frame: + {} + Kinect_rgb_frame: + Kinect_rgb_optical_frame: + {} + Update Interval: 0 + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: world + Frame Rate: 60 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 7.62407 + Focal Point: + X: -0.326729 + Y: 0.467386 + Z: 1.2193 + Name: Current View + Near Clip Distance: 0.01 + Pitch: 1.5698 + Target Frame: + Value: Orbit (rviz) + Yaw: 1.5704 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1056 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd00000004000000000000013c00000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d0065010000000000000780000002f600fffffffb0000000800540069006d006501000000000000045000000000000000000000063e0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1920 + X: 0 + Y: 24 diff --git a/tracking/launch/tracking/conf/tracker.yaml b/tracking/launch/tracking/conf/tracker.yaml new file mode 100644 index 0000000..c637686 --- /dev/null +++ b/tracking/launch/tracking/conf/tracker.yaml @@ -0,0 +1,74 @@ +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +min_confidence_initialization: 4.0 +# Tracking maximum frame rate per sensor: +rate: 30 +# Tracking reference frame: +world_frame_id: "/world" +# Voxel size used for people detection: +voxel_size: 0.06 +# Flag stating if extrinsic (multicamera) calibration has been performed or not: +extrinsic_calibration: false + +######################## +## Sensor orientation ## +######################## +# Flag stating if the sensor is vertically placed, or not: +orientation: + vertical: false + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_probability: 0.999 +# Acceleration variance in people motion model: +acceleration_variance: 100 +# Additional weight for position variance in people motion model: +position_variance_weight: 30 + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +detector_likelihood: true +# Flag stating if track velocity should be considered in data association: +velocity_in_motion_term: true +# Weight of detection likelihood in data association: +detector_weight: -0.25 +# Weight of motion likelihood in data association: +motion_weight: 0.25 + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +sec_before_old: 8 +# Seconds within which a track should be validated (otherwise it is discarded): +sec_before_fake: 2.4 +# Seconds after which a visible track obtain NORMAL status: +sec_remain_new: 1.2 +# Minimum number of detection<->track associations needed for validating a track: +detections_to_validate: 3 + +########### +## Debug ## +########### +# Flag activating debug/visualization options: +debug_active: true +# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: +history_pointcloud: true +# Dimension of the point cloud containing track trajectories: +history_size: 500 +# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: +markers: true +# Flag stating if tracking results should be sent over the network: +tracking_results: true +# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: +detection_debug: true +# Dimension of the point cloud containing detection trajectories: +detection_history_size: 500 + diff --git a/tracking/launch/tracking/conf/tracker_filter.yaml b/tracking/launch/tracking/conf/tracker_filter.yaml new file mode 100644 index 0000000..54415e0 --- /dev/null +++ b/tracking/launch/tracking/conf/tracker_filter.yaml @@ -0,0 +1,28 @@ +##################### +## Node parameters ## +##################### +# Tracking output frame rate: +rate: 30.0 +# Time lapse (after last detection) before a track is removed: +track_lifetime_with_no_detections: 1.0 +# Interval between two heartbeat messages: +heartbeat_time: 5.0 +# Flag stating if empty tracking messages should be published or not: +publish_empty: true + +########################## +## Smoothing parameters ## +########################## +# Error of the measurements provided by the tracker (0.0 means no error, i.e. no smoothing will be applied): +sigma_noise: 0.0 #0.30 +# How much the acceleration of a track can vary (small values = high smoothing): +sigma_process: 0.05 +# Time lapse (after last detection) that a the position of a track is predicted and published: +prediction_duration: 0.1 + +########### +## Debug ## +########### +# Max size of the history pointcloud: +max_history_size: 1000 + diff --git a/tracking/launch/tracking/conf/tracker_multicamera.yaml b/tracking/launch/tracking/conf/tracker_multicamera.yaml new file mode 100644 index 0000000..2cb5a99 --- /dev/null +++ b/tracking/launch/tracking/conf/tracker_multicamera.yaml @@ -0,0 +1,78 @@ +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +min_confidence_initialization: 4.0 +# Tracking maximum frame rate per sensor: +rate: 30 +# Tracking reference frame: +world_frame_id: "/world" +# Voxel size used for people detection: +voxel_size: 0.06 +# Flag stating if extrinsic (multicamera) calibration has been performed or not: +extrinsic_calibration: true +# Maximum delay that a detection message can have in order to be considered for tracking: +max_detection_delay: 2.0 +# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: +calibration_refinement: true + +######################## +## Sensor orientation ## +######################## +# Flag stating if the sensor is vertically placed, or not: +orientation: + vertical: false + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_probability: 0.99 +# Acceleration variance in people motion model: +acceleration_variance: 100 +# Additional weight for position variance in people motion model: +position_variance_weight: 30 + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +detector_likelihood: true +# Flag stating if track velocity should be considered in data association: +velocity_in_motion_term: true +# Weight of detection likelihood in data association: +detector_weight: -0.25 +# Weight of motion likelihood in data association: +motion_weight: 0.25 + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +sec_before_old: 8 +# Seconds within which a track should be validated (otherwise it is discarded): +sec_before_fake: 2.4 +# Seconds after which a visible track obtain NORMAL status: +sec_remain_new: 1.2 +# Minimum number of detection<->track associations needed for validating a track: +detections_to_validate: 3 + +########### +## Debug ## +########### +# Flag activating debug/visualization options: +debug_active: true +# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: +history_pointcloud: true +# Dimension of the point cloud containing track trajectories: +history_size: 500 +# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: +markers: true +# Flag stating if tracking results should be sent over the network: +tracking_results: true +# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: +detection_debug: true +# Dimension of the point cloud containing detection trajectories: +detection_history_size: 500 + diff --git a/tracking/launch/tracking/conf/tracker_sr.yaml b/tracking/launch/tracking/conf/tracker_sr.yaml new file mode 100644 index 0000000..c637686 --- /dev/null +++ b/tracking/launch/tracking/conf/tracker_sr.yaml @@ -0,0 +1,74 @@ +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +min_confidence_initialization: 4.0 +# Tracking maximum frame rate per sensor: +rate: 30 +# Tracking reference frame: +world_frame_id: "/world" +# Voxel size used for people detection: +voxel_size: 0.06 +# Flag stating if extrinsic (multicamera) calibration has been performed or not: +extrinsic_calibration: false + +######################## +## Sensor orientation ## +######################## +# Flag stating if the sensor is vertically placed, or not: +orientation: + vertical: false + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_probability: 0.999 +# Acceleration variance in people motion model: +acceleration_variance: 100 +# Additional weight for position variance in people motion model: +position_variance_weight: 30 + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +detector_likelihood: true +# Flag stating if track velocity should be considered in data association: +velocity_in_motion_term: true +# Weight of detection likelihood in data association: +detector_weight: -0.25 +# Weight of motion likelihood in data association: +motion_weight: 0.25 + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +sec_before_old: 8 +# Seconds within which a track should be validated (otherwise it is discarded): +sec_before_fake: 2.4 +# Seconds after which a visible track obtain NORMAL status: +sec_remain_new: 1.2 +# Minimum number of detection<->track associations needed for validating a track: +detections_to_validate: 3 + +########### +## Debug ## +########### +# Flag activating debug/visualization options: +debug_active: true +# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: +history_pointcloud: true +# Dimension of the point cloud containing track trajectories: +history_size: 500 +# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: +markers: true +# Flag stating if tracking results should be sent over the network: +tracking_results: true +# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: +detection_debug: true +# Dimension of the point cloud containing detection trajectories: +detection_history_size: 500 + diff --git a/tracking/launch/tracking/conf/tracking.rviz b/tracking/launch/tracking/conf/tracking.rviz new file mode 100644 index 0000000..72ff906 --- /dev/null +++ b/tracking/launch/tracking/conf/tracking.rviz @@ -0,0 +1,249 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /PointCloud21 + - /Marker1 + - /MarkerArray1 + - /DepthCloud1/Auto Size1 + Splitter Ratio: 0.526846 + Tree Height: 405 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: Image +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.03 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 5 + Size (m): 0.01 + Style: Points + Topic: /tracker/history_smoothed + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /tracker/markers + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: false + map: + Value: true + world: + Value: true + zed_initial_frame: + Value: true + zed_tracked_frame: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: true + Tree: + map: + zed_initial_frame: + zed_tracked_frame: + world: + {} + Update Interval: 0 + Value: true + - Alpha: 1 + Auto Size: + Auto Size Factor: 1 + Value: true + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/DepthCloud + Color: 255; 255; 255 + Color Image Topic: "" + Color Transformer: RGB8 + Color Transport Hint: raw + Decay Time: 0 + Depth Map Topic: /camera/depth/image_rect_color + Depth Map Transport Hint: raw + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: DepthCloud + Occlusion Compensation: + Occlusion Time-Out: 30 + Value: false + Position Transformer: XYZ + Queue Size: 5 + Selectable: true + Size (Pixels): 3 + Style: Flat Squares + Topic Filter: true + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/Image + Enabled: true + Image Topic: /camera/rgb/image_rect_color + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /camera/depth/image_rect_color + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: world + Frame Rate: 60 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 11.4858 + Enable Stereo Rendering: + Stereo Eye Separation: 0.06 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: -0.222038 + Y: -0.00615923 + Z: 2.37807 + Name: Current View + Near Clip Distance: 0.01 + Pitch: 0.709796 + Target Frame: + Value: Orbit (rviz) + Yaw: 3.0004 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1056 + Hide Left Dock: false + Hide Right Dock: true + Image: + collapsed: false + QMainWindow State: 000000ff00000000fd00000004000000000000016a00000396fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000224000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000002520000009c0000001600fffffffb0000000a0049006d00610067006501000002f4000000ca0000001600ffffff000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f0000003efc0100000002fb0000000800540069006d006501000000000000073f000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000005cf0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1855 + X: 65 + Y: 24 diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h b/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h new file mode 100644 index 0000000..f143ff8 --- /dev/null +++ b/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h @@ -0,0 +1,317 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ +#define OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ + +#include +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief PredictModel Prediction model (linear state predict model) */ + class PredictModel : public Bayesian_filter::Linear_predict_model + { + protected: + /* \brief time step */ + const double dt_; + + public: + /** \brief Constructor. */ + PredictModel(double dt, double acceleration_variance); + + /** \brief Destructor. */ + virtual ~PredictModel(); + }; + + /** \brief ObserveModel Observation model (linear observation is additive uncorrelated model) */ + class ObserveModel : public Bayesian_filter::Linear_uncorrelated_observe_model + { + protected: + /** \brief Position variance. */ + double position_variance_; + + public: + /** \brief Constructor. */ + ObserveModel(double position_variance, int ouput_dimension); + + /** \brief Destructor. */ + virtual ~ObserveModel(); + }; + + /** \brief MahalanobisParameters2d Contains variables for bayesian estimation with state dimension = 2. */ + class MahalanobisParameters2d + { + public: + + MahalanobisParameters2d() : SI(Bayesian_filter_matrix::Empty), x(0), y(0) + { + SI.resize(2, 2, false); + } + + /** \brief Innovation covariance matrix. */ + Bayesian_filter::FM::SymMatrix SI; + + /** \brief Position x component. */ + double x; + + /** \brief Position y component. */ + double y; + }; + + /** \brief MahalanobisParameters4d Contains variables for bayesian estimation with state dimension = 4. */ + class MahalanobisParameters4d + { + public: + + MahalanobisParameters4d() : SI(Bayesian_filter_matrix::Empty), x(0), y(0), vx(0), vy(0) + { + SI.resize(4, 4, false); + } + + /** \brief Innovation covariance matrix. */ + Bayesian_filter::FM::SymMatrix SI; + + /** \brief Position x component. */ + double x; + + /** \brief Position y component. */ + double y; + + /** \brief Velocity x component. */ + double vx; + + /** \brief Velocity y component. */ + double vy; + }; + + /** \brief KalmanFilter provides methods for bayesian estimation with Kalman Filter. */ + class KalmanFilter + { + protected: + + /** \brief Time interval.*/ + double dt_; + + /** \brief Scale factor for computing depth noise variance.*/ + double depth_multiplier_; + + /** \brief Position variance. */ + double position_variance_; + + /** \brief Acceleration variance.*/ + double acceleration_variance_; + + /** \brief State/output dimension.*/ + int output_dimension_; + + Bayesian_filter::Unscented_scheme* filter_; + + /** \brief Prediction model. */ + PredictModel* predict_model_; + + /** \brief Observation model. */ + ObserveModel* observe_model_; + + public: + + /** \brief Constructor. */ + KalmanFilter(double dt, double position_variance, double acceleration_variance, int output_dimension); + + /** \brief Constructor initializing a new KalmanFilter with another one. */ + KalmanFilter(const KalmanFilter& orig); + + /** \brief Overload of = operator for copying KalmanFilter objects. */ + KalmanFilter& operator=(const KalmanFilter& orig); + + /** \brief Destructor. */ + virtual ~KalmanFilter(); + + /** + * \brief Filter initialization procedure. + * + * \param[in] x Position x component. + * \param[in] y Position y component. + * \param[in] distance Distance from the sensor. + * \param[in] velocity_in_motion_term If true, both target position and velocity constitute the output vector. + */ + void + init(double x, double y, double distance, bool velocity_in_motion_term); + + /** + * \brief Prediction step. + */ + void + predict(); + + /** + * \brief Prediction step. + * + * \param[out] x Position x component. + * \param[out] y Position y component. + * \param[out] vx Velocity x component. + * \param[out] vy Velocity y component. + */ + void + predict(double& x, double& y, double& vx, double& vy); + + /** + * \brief Update step. + */ + void + update(); + + /** + * \brief Update step. + * + * \param[in] x Position x component. + * \param[in] y Position y component. + * \param[in] distance Distance from the sensor. + */ + void + update(double x, double y, double distance); + + /** + * \brief Update step. + * + * \param[in] x Position x component. + * \param[in] y Position y component. + * \param[in] vx Velocity x component. + * \param[in] vy Velocity y component. + * \param[in] distance Distance from the sensor. + */ + void + update(double x, double y, double vx, double vy, double distance); + + /** + * \brief Get filter state. + * + * \param[out] x Position x component. + * \param[out] y Position y component. + * \param[out] vx Velocity x component. + * \param[out] vy Velocity y component. + */ + void + getState(double& x, double& y, double& vx, double& vy); + + /** + * \brief Get filter state. + * + * \param[out] x Position x component. + * \param[out] y Position y component. + */ + void + getState(double& x, double& y); + + /** + * \brief Obtain variables for bayesian estimation with output dimension = 2. + * + * \param[out] mp Object of class MahalanobisParameters2d. + */ + void + getMahalanobisParameters(MahalanobisParameters2d& mp); + + /** + * \brief Obtain variables for bayesian estimation with output dimension = 4. + * + * \param[out] mp Object of class MahalanobisParameters4d. + */ + void + getMahalanobisParameters(MahalanobisParameters4d& mp); + + /** + * \brief Compute Mahalanobis distance between measurement and target predicted state. + * + * \param[in] x Input x position. + * \param[in] y Input y position. + * \param[in] mp Object of class MahalanobisParameters2d. + * + * \return Mahalanobis distance between measurement (x,y) and target predicted state. + */ + static double + performMahalanobisDistance(double x, double y, const MahalanobisParameters2d& mp); + + /** + * \brief Compute Mahalanobis distance between measurement and target predicted state. + * + * \param[in] x Input x position. + * \param[in] y Input y position. + * \param[in] vx Input x velocity. + * \param[in] vy Input y velocity. + * \param[in] mp Object of class MahalanobisParameters4d. + * + * \return Mahalanobis distance between measurement (x,y,vx,vy) and target predicted state. + */ + static double + performMahalanobisDistance(double x, double y, double vx, double vy, const MahalanobisParameters4d& mp); + + /** + * \brief Get filter innovation covariance. + * + * \return innovation covariance matrix. + */ + Bayesian_filter::FM::SymMatrix + getInnovationCovariance(); + + /** + * \brief Set prediction model. + * + * \param[in] acceleration_variance Acceleration variance. + */ + void + setPredictModel (double acceleration_variance); + + /** + * \brief Set observation model. + * + * \param[in] position_variance Position variance. + */ + void + setObserveModel (double position_variance); + + }; + + } /* namespace tracking */ +} /* namespace open_ptrack */ +#endif /* OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h b/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h new file mode 100644 index 0000000..241de3c --- /dev/null +++ b/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h @@ -0,0 +1,133 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef OPEN_PTRACK_TRACKING_MUNKRES_H_ +#define OPEN_PTRACK_TRACKING_MUNKRES_H_ + +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief Munkres solves Global Nearest Neighbor problem with the Hungarian (Munkres) algorithm + * Implementation of the algorithm described here: http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html + **/ + class Munkres { + + public: + + /** + * \brief Main method for solving GNN problems. + * + * \param[in] matrix Cost matrix for the GNN problem (rows are workers, columns are jobs) + * \param[in] max If true, higher values are considered to be better. If false, lower values are better (lower cost). + */ + cv::Mat solve(cv::Mat& matrix, bool max); + + private: + + /* \brief Preprocessing for applying the Munkres algorithm. */ + double** + preprocess(cv::Mat& matrix_in, bool max); + + /* \brief First step of the Munkres algorithm. */ + void + step_one(double** matrix, int rows, int cols, int& step); + + /* \brief Second step of the hungarian algorithm. */ + void + step_two(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, int& step); + + /* \brief Third step of the hungarian algorithm. */ + void + step_three(int rows, int cols, int* colCover, double** m, int& step); + + /* \brief Utility function for step 4. */ + void + find_a_zero(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& row, int& col); + + /* \brief Utility function for step 4. */ + bool + star_in_row(double** m, int rows, int cols, int row); + + /* \brief Utility function for step 4. */ + void + find_star_in_row(double** m, int rows, int cols, int row, int& col); + + /* \brief First step of the hungarian algorithm. */ + void + step_four(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, int& path_col_0, int& step); + + /* \brief Utility function for step 5. */ + void + find_star_in_col(int rows, int cols, double** m, int c, int& r); + + /* \brief Utility function for step 5. */ + void + find_prime_in_row(int rows, int cols, double** m, int r, int& c); + + /* \brief Utility function for step 5. */ + void + augment_path(double** m, int path_count, int** path); + + /* \brief Utility function for step 5. */ + void + clear_covers(int rows, int cols, int* rowCover, int* colCover); + + /* \brief Utility function for step 5. */ + void + erase_primes(int rows, int cols, double** m); + + /* \brief Fifth step of the hungarian algorithm. */ + void + step_five(int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, int& path_col_0, int& path_count, int** path, int& step); + + /* \brief Utility function for step 6. */ + void + find_smallest(double** matrix, int rows, int cols, int* rowCover, int* colCover, double& minval); + + /* \brief Sixth step of the hungarian algorithm. */ + void + step_six(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& step); + + }; + } /* namespace tracking */ +} /* namespace open_ptrack */ +#endif /* !defined(OPEN_PTRACK_TRACKING_MUNKRES_H_) */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/track.h b/tracking/launch/tracking/include/open_ptrack/tracking/track.h new file mode 100644 index 0000000..6d790eb --- /dev/null +++ b/tracking/launch/tracking/include/open_ptrack/tracking/track.h @@ -0,0 +1,402 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACK_H_ +#define OPEN_PTRACK_TRACKING_TRACK_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief Track represents information about a track (or target) */ + class Track + { + public: + + /** \brief A track has Status NEW if it has been recently created, otherwise it has NORMAL Status */ + enum Status + { + NEW, + NORMAL + }; + + /** \brief Visibility states if the track is currently visible by the sensor or partially occluded or totally occluded */ + enum Visibility + { + VISIBLE = 0, // No occlusion + OCCLUDED = 1, // Partially occlusion + NOT_VISIBLE = 2 // Total occlusion + }; + + protected: + + /** \brief Dimension of a circular buffer which keep tracks of filter parameters along time */ + int MAX_SIZE; + + /** \brief Track ID */ + const int id_; + + /** \brief Track frame id (frame id of the last detection associated to the track */ + const std::string frame_id_; + + /** \brief Inverse of the frame rate */ + const double period_; + + /** \brief If true, the track is validated, meaning that it has been associated with a certain number of high confidence detections */ + bool validated_; + + /** \brief Kalman filter associated to the track */ + open_ptrack::tracking::KalmanFilter* filter_; + + /** \brief Temporary copy of the Kalman filter associated to the track (used for recovery filter information when a track is re-found) */ + open_ptrack::tracking::KalmanFilter* tmp_filter_; + + /** \brief First time a detection is associated to the track */ + ros::Time first_time_detected_; + + /** \brief Last time a detection is associated to the track */ + ros::Time last_time_detected_; + + /** \brief Last time a detection with high detection confidence is associated to the track */ + ros::Time last_time_detected_with_high_confidence_; + + /** \brief Last time a prediction has been performed for the track */ + ros::Time last_time_predicted_; + + /** \brief Index in the circular buffer corresponding to the last time a prediction has been performed */ + int last_time_predicted_index_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map2d_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map4d_; + + /** \brief Track Status*/ + Status status_; + + /** \brief Track Visibility */ + Visibility visibility_; + + /** \brief Number of high confidence detections associated to the track */ + int updates_with_enough_confidence_; + + /** \brief Track centroid z coordinate */ + double z_; + + /** \brief Track height */ + double height_; + + /** \brief Track distance from the camera */ + double distance_; + + /** \brief Track age (in seconds) */ + double age_; + + /** \brief Confidence of the last detection associated to the track */ + double last_detector_confidence_; + + /** \brief Last data association score obtained by this track */ + double data_association_score_; + + /** \brief Color associated to the track */ + Eigen::Vector3f color_; + + /** \brief DetectionSource which provided the last detection associated to the track */ + open_ptrack::detection::DetectionSource* detection_source_; + + /** \brief If true, both position and velocity are considered in computing detection<->track Mahalanobis distance */ + bool velocity_in_motion_term_; + + /** \brief Count the number of consecutive updates with low confidence detections */ + int low_confidence_consecutive_frames_; + + public: + + /** \brief Constructor. */ + Track( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term); + + /** \brief Destructor. */ + virtual ~Track(); + + /** \brief Track initialization with an old track. */ + void + init(const Track& old_track); + + /** + * \brief Track initialization. + * + * \param[in] x Track centroid x coordinate + * \param[in] y Track centroid y coordinate + * \param[in] z Track centroid z coordinate + * \param[in] height Track height + * \param[in] distance Track distance from the sensor + * \param[in] detection_source DetectionSource which provided the last detection associated to the track + */ + void + init( + double x, + double y, + double z, + double height, + double distance, + open_ptrack::detection::DetectionSource* detection_source); + + /** + * \brief Update track with new detection information. + * + * \param[in] x Detection centroid x coordinate + * \param[in] y Detection centroid y coordinate + * \param[in] z Detection centroid z coordinate + * \param[in] height Detection height + * \param[in] distance Detection distance from the sensor + * \param[in] confidence Detection confidence + * \param[in] min_confidence Minimum confidence for track initialization + * \param[in] min_confidence_detections Minimum confidence for detection + * \param[in] detection_source DetectionSource which provided the detection + */ + void + update( + double x, + double y, + double z, + double height, + double distance, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update = false); + + /** + * \brief Compute Mahalanobis distance between detection with position (x,y) and track. + * + * \param[in] x Detection centroid x coordinate. + * \param[in] y Detection centroid y coordinate. + * \param[in] when Time instant. + * + * \return the Mahalanobis distance. + */ + double + getMahalanobisDistance(double x, double y, const ros::Time& when); + + /* Validate a track */ + void + validate(); + + /** + * \brief Get track validation flag + * + * \return true if the track has been validated, false otherwise. + */ + bool + isValidated(); + + /** + * \brief Get track ID + * + * \return track ID + */ + int + getId(); + + /** + * \brief Set track status to s + * + * \param[in] s status + */ + void + setStatus(Status s); + + /** + * \brief Get track status + * + * \return track status + */ + Status + getStatus(); + + /** + * \brief Set track Visibility. + * + * \param[in] v Visibility status. + */ + void + setVisibility(Visibility v); + + /** + * \brief Get track Visibility. + * + * \return track Visibility. + */ + Visibility + getVisibility(); + + /** + * \brief Get time passed from first detection-track association. + * + * \return time passed from first detection-track association. + */ + float + getSecFromFirstDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association. + * + * \return time passed from last detection-track association. + */ + float + getSecFromLastDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association with a high confidence detection. + * + * \return time passed from last detection-track association with a high confidence detection. + */ + float + getSecFromLastHighConfidenceDetection(ros::Time current_time); + + /** + * \brief Get the number of consecutive updates with low confidence detections. + * + * \return the number of consecutive updates with low confidence detections. + */ + float + getLowConfidenceConsecutiveFrames(); + + /** + * \brief Get the number of updates with enough confidence detections. + * + * \return the number of updates with enough confidence detections. + */ + int + getUpdatesWithEnoughConfidence(); + + /** + * \brief Draw track bounding box in the image. + * + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + draw(bool vertical); + + /** + * \brief Create RViz visualization marker with the track position. + * + * \param[in/out] msg Array containing markers of every track. + */ + void + createMarker(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Get a XYZRGB point from a point cloud. + * + * \param[in/out] p Point containing position information and to be filled with color. + * + * \return true if track is visible, false if not visible. + */ + bool + getPointXYZRGB(pcl::PointXYZRGB& p); + + /** + * \brief Create track ROS message. + * + * \param[in/out] track_msg Track ROS message. + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + toMsg(opt_msgs::Track& track_msg, bool vertical); + + /** + * \brief Get the DetectionSource corresponding to the last associated detection. + * + * \return the DetectionSource corresponding to the last associated detection. + */ + open_ptrack::detection::DetectionSource* + getDetectionSource(); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + }; + + } /* namespace tracking */ +} /* namespace open_ptrack */ +#endif /* OPEN_PTRACK_TRACKING_TRACK_H_ */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h b/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h new file mode 100644 index 0000000..a275acd --- /dev/null +++ b/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h @@ -0,0 +1,332 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACKER_H_ +#define OPEN_PTRACK_TRACKING_TRACKER_H_ + +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief Tracker performs tracking-by-detection */ + class Tracker + { + protected: + /** \brief List of all active tracks */ + std::list tracks_; + + /** \brief List of lost tracks */ + std::list lost_tracks_; + + /** \brief List of tracks with Status = NEW */ + std::list new_tracks_; + + /** \brief List of current detections */ + std::vector detections_; + + /** \brief List of current detections not associated to any track */ + std::list unassociated_detections_; + + /** \brief Track ID counter */ + int tracks_counter_; + + /** \brief World reference frame used for tracking */ + std::string world_frame_id_; + + /** \brief Minimum confidence for track initialization */ + double min_confidence_; + + /** \brief Minimum confidence of detections sent to tracking */ + const double min_confidence_detections_; + + /** \brief Minimum number of detection<->track associations needed for validating a track */ + int detections_to_validate_; + + /** \brief Time after which a not visible track becomes old */ + double sec_before_old_; + + /** \brief Time after which a visible track obtain NORMAL status */ + double sec_remain_new_; + + /** \brief Time within which a track should be validated (otherwise it is discarded) */ + double sec_before_fake_; + + /** \brief Gate distance for joint likelihood in data association */ + double gate_distance_; + + /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ + bool detector_likelihood_; + + /** \brief Weights for the single terms of the joint likelihood */ + std::vector likelihood_weights_; + + /** \brief If true, people velocity is also used in motion term for data association */ + bool velocity_in_motion_term_; + + /** \brief Minimum time period between two detections messages */ + const double period_; + + /** \brief Position variance (for Kalman Filter) */ + double position_variance_; + + /** \brief Acceleration variance (for Kalman Filter) */ + double acceleration_variance_; + + /** \brief Flag enabling debug mode */ + const bool debug_mode_; + + /** \brief Detections<->tracks distance matrix for data association */ + cv::Mat_ distance_matrix_; + + /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + cv::Mat_ cost_matrix_; + + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; + + /** \brief Create detections<->tracks distance matrix for data association */ + void + createDistanceMatrix(); + + /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + void + createCostMatrix(); + + /** \brief Update tracks associated to a detection in the current frame */ + void + updateDetectedTracks(); + + /** \brief Fill list containing unassociated detections */ + void + fillUnassociatedDetections(); + + /** \brief Create new tracks with high confidence unassociated detections */ + void + createNewTracks(); + + /** \brief Create a new track with detection information */ + int + createNewTrack(open_ptrack::detection::Detection& detection); + + /** \brief Update lost tracks */ + void + updateLostTracks(); + + public: + /** \brief Constructor */ + Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, + double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, + double sec_remain_new, int detections_to_validate, double period, double position_variance, + double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); + + /** \brief Destructor */ + virtual ~Tracker(); + + /** + * \brief Initialization when a new set of detections arrive. + * + * \param[in] detections Vector of current detections. + * + */ + void + newFrame(const std::vector& detections); + + /** + * \brief Update the list of tracks according to the current set of detections. + */ + void + updateTracks(); + +// /** +// * \brief Draw the tracks into the RGB image given by its sensor. +// */ +// void +// drawRgb(); + + /** + * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance + * of its centroid) and its number. + * + * \param[in] msg The MarkerArray message to fill. + */ + void + toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Writes the state of each track into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg); + + /** + * \brief Writes the state of tracks with a given frame id into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + * \param[in] source_frame_id Frame id of tracks that have to be written to msg. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); + + /** + * \brief Writes the ID of each alive track into an IDArray message. + * + * \param[in] msg The IDArray message to fill. + */ + void + getAliveIDs (opt_msgs::IDArray::Ptr& msg); + + /** + * \brief Appends the location of each track to a point cloud starting from starting_index (using + * a circular array) + * + * \param[in] pointcloud The point cloud where to append the locations. + * \param[in] starting_index The starting index of the array. + * \param[in] max_size The maximum size of the point cloud (when reached the points overwrite the initial ones) + * + * \return the new starting_index. + */ + size_t + appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, + size_t starting_index, size_t max_size); + + /** + * \brief Set minimum confidence for track initialization + * + * \param[in] min_confidence Minimum confidence for track initialization + */ + void + setMinConfidenceForTrackInitialization (double min_confidence); + + /** + * \brief Set time after which a not visible track becomes old + * + * \param[in] sec_before_old Time after which a not visible track becomes old + */ + void + setSecBeforeOld (double sec_before_old); + + /** + * \brief Set time within which a track should be validated (otherwise it is discarded) + * + * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) + */ + void + setSecBeforeFake (double sec_before_fake); + + /** + * \brief Set time after which a visible track obtain NORMAL status + * + * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status + */ + void + setSecRemainNew (double sec_remain_new); + + /** + * \brief Set minimum number of detection<->track associations needed for validating a track + * + * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track + */ + void + setDetectionsToValidate (int detections_to_validate); + + /** + * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) + * + * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) + */ + void + setDetectorLikelihood (bool detector_likelihood); + + /** + * \brief Set likelihood weights for data association + * + * \param[in] detector_weight Weight for detector likelihood + * \param[in] motion_weight Weight for motion likelihood + */ + void + setLikelihoodWeights (double detector_weight, double motion_weight); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + + /** + * \brief Set gate distance for joint likelihood in data association + * + * \param[in] gate_distance Gate distance for joint likelihood in data association. + */ + void + setGateDistance (double gate_distance); + }; + + } /* namespace tracking */ +} /* namespace open_ptrack */ + +#endif /* OPEN_PTRACK_TRACKING_TRACKER_H_ */ diff --git a/tracking/launch/tracking/launch/detection_and_tracking.launch b/tracking/launch/tracking/launch/detection_and_tracking.launch new file mode 100644 index 0000000..39f7fae --- /dev/null +++ b/tracking/launch/tracking/launch/detection_and_tracking.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch b/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch new file mode 100644 index 0000000..317f7a8 --- /dev/null +++ b/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/detection_and_tracking_sr.launch b/tracking/launch/tracking/launch/detection_and_tracking_sr.launch new file mode 100644 index 0000000..b755a9c --- /dev/null +++ b/tracking/launch/tracking/launch/detection_and_tracking_sr.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch b/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch new file mode 100644 index 0000000..449f827 --- /dev/null +++ b/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/detection_and_tracking_zed.launch b/tracking/launch/tracking/launch/detection_and_tracking_zed.launch new file mode 100644 index 0000000..e5be647 --- /dev/null +++ b/tracking/launch/tracking/launch/detection_and_tracking_zed.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/tracker.launch b/tracking/launch/tracking/launch/tracker.launch new file mode 100644 index 0000000..57e1ccc --- /dev/null +++ b/tracking/launch/tracking/launch/tracker.launch @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/tracker_network.launch b/tracking/launch/tracking/launch/tracker_network.launch new file mode 100644 index 0000000..e8427d2 --- /dev/null +++ b/tracking/launch/tracking/launch/tracker_network.launch @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/tracker_sr.launch b/tracking/launch/tracking/launch/tracker_sr.launch new file mode 100644 index 0000000..88d56ca --- /dev/null +++ b/tracking/launch/tracking/launch/tracker_sr.launch @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/launch/tracking_node.launch b/tracking/launch/tracking/launch/tracking_node.launch new file mode 100644 index 0000000..a7af869 --- /dev/null +++ b/tracking/launch/tracking/launch/tracking_node.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking/package.xml b/tracking/launch/tracking/package.xml new file mode 100644 index 0000000..feb082f --- /dev/null +++ b/tracking/launch/tracking/package.xml @@ -0,0 +1,46 @@ + + + tracking + 0.0.0 + Module containing object/people tracking algorithms. + + Matteo Munaro + + BSD + + Matteo Munaro > + + catkin + + cmake_modules + roscpp + sensor_msgs + std_msgs + opencv2 + opt_msgs + rospy + tf + tf_conversions + visualization_msgs + bayes + detection + pcl_ros + opt_utils + dynamic_reconfigure + + roscpp + sensor_msgs + std_msgs + opencv2 + opt_msgs + rospy + tf + tf_conversions + visualization_msgs + bayes + detection + pcl_ros + opt_utils + dynamic_reconfigure + + diff --git a/tracking/launch/tracking/src/kalman_filter.cpp b/tracking/launch/tracking/src/kalman_filter.cpp new file mode 100644 index 0000000..9efd8d1 --- /dev/null +++ b/tracking/launch/tracking/src/kalman_filter.cpp @@ -0,0 +1,367 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ + namespace tracking + { + + PredictModel::PredictModel(double dt, double acceleration_variance) : + Bayesian_filter::Linear_predict_model(4, 2), dt_(dt) + { + for(size_t i = 0; i < 4; i++) + for(size_t j = 0; j < 4; j++) + Fx(i, j) = 0.0; + + Fx(0, 0) = 1.0; + Fx(1, 1) = 1.0; + Fx(2, 2) = 1.0; + Fx(3, 3) = 1.0; + Fx(0, 2) = dt; + Fx(1, 3) = dt; + + q[0] = acceleration_variance; + q[1] = acceleration_variance; + + for(size_t i = 0; i < 4; i++) + for(size_t j = 0; j < 2; j++) + G(i, j) = 0.0; + + G(0, 0) = dt * dt / 2.0; + G(1, 1) = dt * dt / 2.0; + G(2, 0) = dt; + G(3, 1) = dt; + } + + PredictModel::~PredictModel() + { + + } + + ObserveModel::ObserveModel(double position_variance, int output_dimension) : + Bayesian_filter::Linear_uncorrelated_observe_model(4, output_dimension), position_variance_(position_variance) + { + + for(size_t i = 0; i < output_dimension; i++) + for(size_t j = 0; j < 4; j++) + Hx(i, j) = 0.0; + + Hx(0, 0) = 1.0; + Hx(1, 1) = 1.0; + + Zv[0] = position_variance_; + Zv[1] = position_variance_; + if (output_dimension == 4) + { + Hx(2, 2) = 1.0; + Hx(3, 3) = 1.0; + Zv[2] = 16 * position_variance_; + Zv[3] = 16 * position_variance_; + } + + } + + ObserveModel::~ObserveModel() + { + + } + + KalmanFilter::KalmanFilter(double dt, double position_variance, double acceleration_variance, int output_dimension) : + dt_(dt), position_variance_(position_variance), depth_multiplier_(std::pow(0.005 / 1.96, 2)), + acceleration_variance_(acceleration_variance), output_dimension_(output_dimension) + { + predict_model_ = new PredictModel(dt, acceleration_variance); + observe_model_ = new ObserveModel(position_variance, output_dimension); + filter_ = new Bayesian_filter::Unscented_scheme(4); + } + + KalmanFilter::KalmanFilter(const KalmanFilter& orig) + { + *this = orig; + } + + KalmanFilter& + KalmanFilter::operator=(const KalmanFilter& orig) + { + this->dt_ = orig.dt_; + this->position_variance_ = orig.position_variance_; + this->depth_multiplier_ = orig.depth_multiplier_; + this->acceleration_variance_ = orig.acceleration_variance_; + delete this->predict_model_; + delete this->observe_model_; + delete this->filter_; + this->predict_model_ = new PredictModel(dt_, acceleration_variance_); + this->observe_model_ = new ObserveModel(position_variance_, output_dimension_); + this->filter_ = new Bayesian_filter::Unscented_scheme(4, 2); + + this->filter_->s.resize(orig.filter_->s.size(), false); + for(size_t i = 0; i < orig.filter_->s.size(); i++) + { + this->filter_->s(i) = orig.filter_->s(i); + } + + this->filter_->S.resize(orig.filter_->S.size1(), orig.filter_->S.size2(), false); + for(size_t i = 0; i < orig.filter_->S.size1(); i++) + { + for(size_t j = 0; j < orig.filter_->S.size2(); j++) + { + this->filter_->S(i, j) = orig.filter_->S(i, j); + } + } + + this->filter_->SI.resize(orig.filter_->SI.size1(), orig.filter_->SI.size2(), false); + for(size_t i = 0; i < orig.filter_->SI.size1(); i++) + { + for(size_t j = 0; j < orig.filter_->SI.size2(); j++) + { + this->filter_->SI(i, j) = orig.filter_->SI(i, j); + } + } + + this->filter_->x.resize(orig.filter_->x.size(), false); + for(size_t i = 0; i < orig.filter_->x.size(); i++) + { + this->filter_->x(i) = orig.filter_->x(i); + } + + this->filter_->X.resize(orig.filter_->X.size1(), orig.filter_->X.size2(), false); + for(size_t i = 0; i < orig.filter_->X.size1(); i++) + { + for(size_t j = 0; j < orig.filter_->X.size2(); j++) + { + this->filter_->X(i, j) = orig.filter_->X(i, j); + } + } + + this->filter_->XX.resize(orig.filter_->XX.size1(), orig.filter_->XX.size2(), false); + for(size_t i = 0; i < orig.filter_->XX.size1(); i++) + { + for(size_t j = 0; j < orig.filter_->XX.size2(); j++) + { + this->filter_->XX(i, j) = orig.filter_->XX(i, j); + } + } + + return *this; + } + + KalmanFilter::~KalmanFilter() + { + delete predict_model_; + delete observe_model_; + delete filter_; + } + + void + KalmanFilter::init(double x, double y, double distance, bool velocity_in_motion_term) + { + + Bayesian_filter_matrix::Vec state(4); + Bayesian_filter_matrix::SymMatrix cov(4, 4); + + state[0] = x; + state[1] = y; + state[2] = 0.0; + state[3] = 0.0; + + for(size_t i = 0; i < 4; i++) + for(size_t j = 0; j < 4; j++) + cov(i, j) = 0.0; + + cov(2, 2) = 100; //1000.0; + cov(3, 3) = 100; //1000.0; + + // Filter initialization: + filter_->init_kalman(state, cov); + + // First update: + if (velocity_in_motion_term) + update(x, y, 0, 0, distance); + else + update(x, y, distance); + + } + + void + KalmanFilter::predict() + { + filter_->predict(*predict_model_); + } + + void + KalmanFilter::predict(double& x, double& y, double& vx, double& vy) + { + predict(); + + x = filter_->x[0]; + y = filter_->x[1]; + vx = filter_->x[2]; + vy = filter_->x[3]; + } + + void + KalmanFilter::update() + { + filter_->update(); + //filter_->update_XX(2.0); + } + + void + KalmanFilter::update(double x, double y, double distance) + { + + Bayesian_filter_matrix::Vec observation(2); + observation[0] = x; + observation[1] = y; + + //printf("%d %f %f %f ", _id, x, y, height); + + observe_model_->Zv[0] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; + observe_model_->Zv[1] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; + + filter_->observe(*observe_model_, observation); + filter_->update(); + //filter_->update_XX(2.0); + + } + + void + KalmanFilter::update(double x, double y, double vx, double vy, double distance) + { + + Bayesian_filter_matrix::Vec observation(4); + observation[0] = x; + observation[1] = y; + observation[2] = vx; + observation[3] = vy; + + //printf("%d %f %f %f ", _id, x, y, height); + + // observe_model_->Zv[0] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; + // observe_model_->Zv[1] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; + // observe_model_->Zv[2] = 16 * position_variance_ + std::pow(distance, 4) * depth_multiplier_; + // observe_model_->Zv[3] = 16 * position_variance_ + std::pow(distance, 4) * depth_multiplier_; + + filter_->observe(*observe_model_, observation); + filter_->update(); + //filter_->update_XX(2.0); + + } + + void + KalmanFilter::getMahalanobisParameters(MahalanobisParameters2d& mp) + { + mp.SI = filter_->SI; + mp.x = filter_->x[0]; + mp.y = filter_->x[1]; + } + + void + KalmanFilter::getMahalanobisParameters(MahalanobisParameters4d& mp) + { + mp.SI = filter_->SI; + mp.x = filter_->x[0]; + mp.y = filter_->x[1]; + mp.vx = filter_->x[2]; + mp.vy = filter_->x[3]; + } + + double + KalmanFilter::performMahalanobisDistance(double x, double y, const MahalanobisParameters2d& mp) + { + Bayesian_filter_matrix::Vec v(2); + v[0] = x - mp.x; + v[1] = y - mp.y; + return Bayesian_filter_matrix::prod_SPDT(v, mp.SI); + } + + double + KalmanFilter::performMahalanobisDistance(double x, double y, double vx, double vy, const MahalanobisParameters4d& mp) + { + Bayesian_filter_matrix::Vec v(4); + v[0] = x - mp.x; + v[1] = y - mp.y; + v[2] = vx - mp.vx; + v[3] = vy - mp.vy; + + // Symmetric Positive (Semi) Definite multiply: p = v'*(mp.SI)*v + return Bayesian_filter_matrix::prod_SPDT(v, mp.SI); + } + + Bayesian_filter::FM::SymMatrix + KalmanFilter::getInnovationCovariance() + { + return filter_->SI; + } + + void + KalmanFilter::getState(double& x, double& y, double& vx, double& vy) + { + x = filter_->x[0]; + y = filter_->x[1]; + vx = filter_->x[2]; + vy = filter_->x[3]; + } + + void + KalmanFilter::getState(double& x, double& y) + { + x = filter_->x[0]; + y = filter_->x[1]; + } + + void + KalmanFilter::setPredictModel (double acceleration_variance) + { + acceleration_variance_ = acceleration_variance; + predict_model_ = new PredictModel(dt_, acceleration_variance_); + } + + void + KalmanFilter::setObserveModel (double position_variance) + { + position_variance_ = position_variance; + observe_model_ = new ObserveModel(position_variance_, output_dimension_); + } + } /* namespace tracking */ +} /* namespace open_ptrack */ diff --git a/tracking/launch/tracking/src/munkres.cpp b/tracking/launch/tracking/src/munkres.cpp new file mode 100644 index 0000000..5445ad6 --- /dev/null +++ b/tracking/launch/tracking/src/munkres.cpp @@ -0,0 +1,410 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it] + * + */ + +#include "open_ptrack/tracking/munkres.h" + +namespace open_ptrack +{ + namespace tracking + { + using namespace std; + + void + Munkres::step_one(double** matrix, int rows, int cols, int& step) { + double min_in_row; + + for (int r = 0; r < rows; r++) { + min_in_row = matrix[r][0]; + for (int c = 0; c < cols; c++) + if (matrix[r][c] < min_in_row) + min_in_row = matrix[r][c]; + for (int c = 0; c < cols; c++) + matrix[r][c] -= min_in_row; + } + step = 2; + } + + void + Munkres::step_two(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, + int& step) { + for (int r = 0; r < rows; r++) + for (int c = 0; c < cols; c++) { + if (matrix[r][c] == 0 && rowCover[r] == 0 && colCover[c] == 0) { + m[r][c] = 1; + rowCover[r] = 1; + colCover[c] = 1; + } + } + for (int r = 0; r < rows; r++) + rowCover[r] = 0; + for (int c = 0; c < cols; c++) + colCover[c] = 0; + step = 3; + } + + void + Munkres::step_three(int rows, int cols, int* colCover, double** m, int& step) { + int colcount; + for (int r = 0; r < rows; r++) + for (int c = 0; c < cols; c++) + if (m[r][c] == 1) + colCover[c] = 1; + + colcount = 0; + for (int c = 0; c < cols; c++) + if (colCover[c] == 1) + colcount += 1; + if (colcount >= rows || colcount >= cols) + step = 7; + else + step = 4; + } + + void + Munkres::find_a_zero(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& row, + int& col) { + int r = 0; + int c; + bool done; + row = -1; + col = -1; + done = false; + while (!done) { + c = 0; + while (true) { + if (matrix[r][c] == 0 && rowCover[r] == 0 && colCover[c] == 0) { + row = r; + col = c; + done = true; + } + c += 1; + if (c >= cols || done) + break; + } + r += 1; + if (r >= rows) + done = true; + } + } + + bool + Munkres::star_in_row(double** m, int rows, int cols, int row) { + bool tmp = false; + for (int c = 0; c < cols; c++) + if (m[row][c] == 1) + tmp = true; + return tmp; + } + + void + Munkres::find_star_in_row(double** m, int rows, int cols, int row, int& col) { + col = -1; + for (int c = 0; c < cols; c++) + if (m[row][c] == 1) + col = c; + } + + void + Munkres::step_four(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, + int& path_row_0, int& path_col_0, int& step) { + int row = -1; + int col = -1; + bool done; + + done = false; + while (!done) { + find_a_zero(matrix, rows, cols, rowCover, colCover, row, col); + if (row == -1) { + done = true; + step = 6; + } else { + m[row][col] = 2; + if (star_in_row(m, rows, cols, row)) { + find_star_in_row(m, rows, cols, row, col); + rowCover[row] = 1; + colCover[col] = 0; + } else { + done = true; + step = 5; + path_row_0 = row; + path_col_0 = col; + } + } + } + } + + void + Munkres::find_star_in_col(int rows, int cols, double** m, int c, int& r) { + r = -1; + for (int i = 0; i < rows; i++) + if (m[i][c] == 1) + r = i; + } + + void + Munkres::find_prime_in_row(int rows, int cols, double** m, int r, int& c) { + for (int j = 0; j < cols; j++) + if (m[r][j] == 2) + c = j; + } + + void + Munkres::augment_path(double** m, int path_count, int** path) { + for (int p = 0; p < path_count; p++) + if (m[path[p][0]][path[p][1]] == 1) + m[path[p][0]][path[p][1]] = 0; + else + m[path[p][0]][path[p][1]] = 1; + } + + void + Munkres::clear_covers(int rows, int cols, int* rowCover, int* colCover) { + for (int r = 0; r < rows; r++) + rowCover[r] = 0; + for (int c = 0; c < cols; c++) + colCover[c] = 0; + } + + void + Munkres::erase_primes(int rows, int cols, double** m) { + for (int r = 0; r < rows; r++) + for (int c = 0; c < cols; c++) + if (m[r][c] == 2) + m[r][c] = 0; + } + + void + Munkres::step_five(int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, + int& path_col_0, int& path_count, int** path, int& step) { + bool done; + int r = -1; + int c = -1; + + path_count = 1; + path[path_count - 1][0] = path_row_0; + path[path_count - 1][1] = path_col_0; + done = false; + while (!done) { + find_star_in_col(rows, cols, m, path[path_count - 1][1], r); + if (r > -1) { + path_count += 1; + path[path_count - 1][0] = r; + path[path_count - 1][1] = path[path_count - 2][1]; + } else + done = true; + if (!done) { + find_prime_in_row(rows, cols, m, path[path_count - 1][0], c); + path_count += 1; + path[path_count - 1][0] = path[path_count - 2][0]; + path[path_count - 1][1] = c; + } + } + augment_path(m, path_count, path); + clear_covers(rows, cols, rowCover, colCover); + erase_primes(rows, cols, m); + step = 3; + } + + void + Munkres::find_smallest(double** matrix, int rows, int cols, int* rowCover, int* colCover, + double& minval) { + for (int r = 0; r < rows; r++) + for (int c = 0; c < cols; c++) + if (rowCover[r] == 0 && colCover[c] == 0) + if (minval > matrix[r][c]) + minval = matrix[r][c]; + } + + void + Munkres::step_six(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& step) { + double minval = INFINITY; + find_smallest(matrix, rows, cols, rowCover, colCover, minval); + for (int r = 0; r < rows; r++) + for (int c = 0; c < cols; c++) { + if (rowCover[r] == 1) + matrix[r][c] += minval; + if (colCover[c] == 0) + matrix[r][c] -= minval; + } + step = 4; + } + + double** + Munkres::preprocess(cv::Mat& matrix_in, bool max) + { + // Pad input matrix in order to make it square: + int max_dim = MAX(matrix_in.rows, matrix_in.cols); + cv::Mat matrix_in_padded(max_dim, max_dim, CV_64F, 1000000.0); // padding values are very high numbers (not valid for association) + for (int i = 0; i < matrix_in.rows; i++) + { + for (int j = 0; j < matrix_in.cols; j++) + { + matrix_in_padded.at(i,j) = matrix_in.at(i,j); + } + } + + // Conversion from cv::Mat to array: + int rows = matrix_in_padded.rows; + int cols = matrix_in_padded.cols; + double** matrix = new double*[rows]; + for (int i = 0; i < rows; i++) + { + matrix[i] = new double[cols]; + for (int j = 0; j < cols; j++) + { + matrix[i][j] = matrix_in_padded.at(i,j); + } + } + + // Change cost matrix according to the type of optimum to find (minimum or maximum): + if (max == true) { + double maxValue = matrix[0][0]; + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + if (matrix[i][j] > maxValue) { + maxValue = matrix[i][j]; + } + } + } + + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + matrix[i][j] = maxValue - matrix[i][j]; + } + } + } + + return matrix; + } + + cv::Mat + Munkres::solve(cv::Mat& matrix_in, bool max) + { + // Preprocessing: + double** matrix = preprocess(matrix_in, max); + + int max_dim = MAX(matrix_in.rows, matrix_in.cols); + int rows = max_dim; + int cols = max_dim; + + bool done = false; + int step = 1; + int* rowCover = new int[rows]; + int* colCover = new int[cols]; + double** m = new double*[rows]; + int path_row_0 = 0; + int path_col_0 = 0; + int path_count = 0; + for (int j = 0; j < cols; j++) { + colCover[j] = 0; + } + for (int i = 0; i < rows; i++) { + rowCover[i] = 0; + m[i] = new double[cols]; + for (int j = 0; j < cols; j++) { + m[i][j] = 0; + } + } + int** path = new int*[rows*cols]; + for (int i = 0; i < rows*cols; i++) { + path[i] = new int[2]; + } + + // Main loop: + while (!done) { + switch (step) { + case 1: + step_one(matrix, rows, cols, step); + break; + case 2: + step_two(matrix, rows, cols, rowCover, colCover, m, step); + break; + case 3: + step_three(rows, cols, colCover, m, step); + break; + case 4: + step_four(matrix, rows, cols, rowCover, colCover, m, path_row_0, path_col_0, + step); + break; + case 5: + step_five(rows, cols, rowCover, colCover, m, path_row_0, path_col_0, + path_count, path, step); + break; + case 6: + step_six(matrix, rows, cols, rowCover, colCover, step); + break; + case 7: + done = true; + break; + } + } + + // Conversion from array to cv::Mat: + cv::Mat matrix_out(matrix_in.rows, matrix_in.cols, CV_64F); + for (int i = 0; i < matrix_in.rows; i++) + { + for (int j = 0; j < matrix_in.cols; j++) + { + matrix_out.at(i,j) = m[i][j] - 1; + } + } + + // Releasing the memory + for(int r = 0; r < max_dim; ++r) + { + delete []matrix[r]; + } + delete []matrix; + delete []rowCover; + delete []colCover; + for (int i = 0; i < rows; i++) { + delete []m[i]; + } + delete []m; + for (int i = 0; i < rows*cols; i++) { + delete []path[i]; + } + delete []path; + + return matrix_out; + } + + } /* namespace tracking */ +} /* namespace open_ptrack */ + diff --git a/tracking/launch/tracking/src/track.cpp b/tracking/launch/tracking/src/track.cpp new file mode 100644 index 0000000..840c659 --- /dev/null +++ b/tracking/launch/tracking/src/track.cpp @@ -0,0 +1,637 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ + namespace tracking + { + + Track::Track( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term) : + id_(id), + frame_id_(frame_id), + period_(period), + velocity_in_motion_term_(velocity_in_motion_term) + { + color_ = Eigen::Vector3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255); + + MAX_SIZE = 90; //XXX create a parameter!!! + if (velocity_in_motion_term_) + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + mahalanobis_map4d_.resize(MAX_SIZE, MahalanobisParameters4d()); + } + else + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + mahalanobis_map2d_.resize(MAX_SIZE, MahalanobisParameters2d()); + } + + } + + Track::~Track() + { + delete filter_; + delete tmp_filter_; + } + + void + Track::init(const Track& old_track) + { + double x, y; + old_track.filter_->getState(x, y); + + filter_->init(x, y, 10, old_track.velocity_in_motion_term_); + + *tmp_filter_ = *filter_; + visibility_ = old_track.visibility_; + + ROS_INFO("%d -> %d", old_track.id_, id_); + + z_ = old_track.z_; + height_ = old_track.height_; + distance_ = old_track.distance_; + age_ = old_track.age_; + + detection_source_ = old_track.detection_source_; + velocity_in_motion_term_ = old_track.velocity_in_motion_term_; + validated_ = validated_ || old_track.validated_; + low_confidence_consecutive_frames_ = old_track.low_confidence_consecutive_frames_; + + first_time_detected_ = old_track.first_time_detected_; + last_time_detected_ = old_track.last_time_detected_; + last_time_detected_with_high_confidence_ = old_track.last_time_detected_with_high_confidence_; + last_time_predicted_ = old_track.last_time_predicted_; + last_time_predicted_index_ = old_track.last_time_predicted_index_; + + data_association_score_ = old_track.data_association_score_; + } + + void + Track::init(double x, double y, double z, double height, double distance, + open_ptrack::detection::DetectionSource* detection_source) + { + //Init Kalman filter + filter_->init(x, y, distance, velocity_in_motion_term_); + z_ = z; + height_ = height; + distance_ = distance; + status_ = NEW; + visibility_ = VISIBLE; + validated_ = false; + updates_with_enough_confidence_ = low_confidence_consecutive_frames_ = 0; + detection_source_ = detection_source; + first_time_detected_ = detection_source->getTime(); + last_time_predicted_ = last_time_detected_ = last_time_detected_with_high_confidence_ = detection_source->getTime(); + last_time_predicted_index_ = 0; + age_ = 0.0; + } + + void + Track::update( + double x, + double y, + double z, + double height, + double distance, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update) + { + //Update Kalman filter + int difference; + double vx, vy; + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (detection_source->getTime() - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (detection_source->getTime() - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + } + + // Update Kalman filter from the last time the track was visible: + int framesLost = int(round((detection_source->getTime() - last_time_detected_).toSec() / period_)) - 1; + + for(int i = 0; i < framesLost; i++) + { + filter_->predict(); + filter_->update(); + } + + filter_->predict(); + if (velocity_in_motion_term_) + { + filter_->update(x, y, vx, vy, distance); + } + else + { + filter_->update(x, y, distance); + } + + *tmp_filter_ = *filter_; + difference = int(round((detection_source->getTime() - last_time_predicted_).toSec() / period_)); + last_time_predicted_index_ = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + last_time_predicted_ = last_time_detected_ = detection_source->getTime(); + if (velocity_in_motion_term_) + filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + + // Update z_ and height_ with a weighted combination of current and new values: + z_ = z_ * 0.9 + z * 0.1; + height_ = height_ * 0.9 + height * 0.1; + distance_ = distance; + + if(confidence > min_confidence) + { + updates_with_enough_confidence_++; + last_time_detected_with_high_confidence_ = last_time_detected_; + } + +// if (((confidence - 0.5) < min_confidence_detections) && ((last_detector_confidence_ - 0.5) < min_confidence_detections)) + if ((confidence < (min_confidence + min_confidence_detections)/2) && (last_detector_confidence_ < (min_confidence + min_confidence_detections)/2)) + { + low_confidence_consecutive_frames_++; + } + else + { + low_confidence_consecutive_frames_ = 0; + } + last_detector_confidence_ = confidence; + + data_association_score_ = data_assocation_score; + + // Compute track age: + age_ = (detection_source->getTime() - first_time_detected_).toSec(); + + detection_source_ = detection_source; + } + + void + Track::validate() + { + validated_ = true; + } + + bool + Track::isValidated() + { + return validated_; + } + + int + Track::getId() + { + return id_; + } + + void + Track::setStatus(Track::Status s) + { + status_ = s; + } + + Track::Status + Track::getStatus() + { + return status_; + } + + void + Track::setVisibility(Track::Visibility v) + { + visibility_ = v; + } + + Track::Visibility + Track::getVisibility() + { + return visibility_; + } + + float + Track::getSecFromFirstDetection(ros::Time current_time) + { + return (current_time - first_time_detected_).toSec(); + } + + float + Track::getSecFromLastDetection(ros::Time current_time) + { + return (current_time - last_time_detected_).toSec(); + } + + float + Track::getSecFromLastHighConfidenceDetection(ros::Time current_time) + { + return (current_time - last_time_detected_with_high_confidence_).toSec(); + } + + float + Track::getLowConfidenceConsecutiveFrames() + { + return low_confidence_consecutive_frames_; + } + + int + Track::getUpdatesWithEnoughConfidence() + { + return updates_with_enough_confidence_; + } + + double + Track::getMahalanobisDistance(double x, double y, const ros::Time& when) + { + int difference = int(round((when - last_time_predicted_).toSec() / period_)); +// std::cout << "time difference from last detection: " << difference << std::endl; + int index; + if(difference <= 0) + { + index = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + } + else + { + for(int i = 0; i < difference; i++) + { + tmp_filter_->predict(); + last_time_predicted_index_ = (last_time_predicted_index_ + 1) % MAX_SIZE; + if (velocity_in_motion_term_) + tmp_filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + tmp_filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + tmp_filter_->update(); + } + last_time_predicted_ = when; + index = last_time_predicted_index_; + } + + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (when - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (last_time_predicted_ - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + +// std::cout << "dt: " << dt << std::endl; +// std::cout << "vIndex: " << vIndex << std::endl; + + double vx, vy; + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + +// std::cout << "vx: " << vx << ", vy: " << vy<< std::endl; + + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, vx, vy, mahalanobis_map4d_[index]); + } + else + { + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, mahalanobis_map2d_[index]); + } + + } + + void + Track::draw(bool vertical) + { + cv::Scalar color(int(255.0 * color_(0)), int(255.0 * color_(1)), int(255.0 * color_(2))); + + double _x2, _y2; + tmp_filter_->getState(_x2, _y2); + Eigen::Vector3d centroid2(_x2, _y2, z_); + centroid2 = detection_source_->transformToCam(centroid2); + + if(visibility_ == Track::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + cv::Scalar darkBlue(130,0,0); + cv::Scalar white(255,255,255); + Eigen::Vector3d centroid(_x, _y, z_ ); + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + + std::vector points; + double delta = height_ / 5.0; + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ + (height_/2))); + + //TODO loop for each detection source + + centroid = detection_source_->transformToCam(centroid); + cv::circle(detection_source_->getImage(), cv::Point(centroid(0), centroid(1)), 5, color, 1); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + for(std::vector::iterator it = points.begin(); it != points.end(); it++) + *it = detection_source_->transformToCam(*it); + + // Draw a paralellepiped around the person: + for(size_t i = 0; i < 4; i++) + { + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[(i + 1) % 4](0), points[(i + 1) % 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i + 4](0), points[i + 4](1)), + cv::Point(points[(i + 1) % 4 + 4](0), points[(i + 1) % 4 + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[i + 4](0), points[i + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + } + + std::stringstream ss; + float distance_to_display = float(int(distance_*100))/100; + ss << id_ << ": " << distance_to_display; + + float id_half_number_of_digits = (float)(ss.str().size())/2; + + // Draw white id_ on a blue background: + if (not vertical) + { + cv::rectangle(detection_source_->getImage(), cv::Point(top(0)-8*id_half_number_of_digits, top(1)-12), + cv::Point(top(0)+12*id_half_number_of_digits, top(1) +2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(detection_source_->getImage(), ss.str(), cv::Point(top(0)-8*id_half_number_of_digits, + top(1)), cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + } + else + { + cv::Mat rotated_image = detection_source_->getImage(); + cv::flip(rotated_image.t(), rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + cv::rectangle(rotated_image, cv::Point(top(1)-8*id_half_number_of_digits, (rotated_image.rows - top(0)+1)-12), + cv::Point(top(1) +12*id_half_number_of_digits, (rotated_image.rows - top(0)+1)+2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(rotated_image, ss.str(), cv::Point(top(1)-8*id_half_number_of_digits, rotated_image.rows - top(0)+1), + cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + cv::flip(rotated_image, rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + rotated_image = rotated_image.t(); + detection_source_->setImage(rotated_image); + } + //TODO end loop + } + + void + Track::createMarker(visualization_msgs::MarkerArray::Ptr& msg) + { + if(visibility_ == Track::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + visualization_msgs::Marker marker; + + marker.header.frame_id = frame_id_; + marker.header.stamp = ros::Time::now(); + + marker.ns = "people"; + marker.id = id_; + + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = _x; + marker.pose.position.y = _y; + marker.pose.position.z = z_; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + + marker.color.r = color_(2); + marker.color.g = color_(1); + marker.color.b = color_(0); + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(marker); + + /////////////////////////////////// + + visualization_msgs::Marker text_marker; + + text_marker.header.frame_id = frame_id_; + text_marker.header.stamp = ros::Time::now(); + + text_marker.ns = "numbers"; + text_marker.id = id_; + + text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; + text_marker.action = visualization_msgs::Marker::ADD; + + std::stringstream ss; + ss << id_; + text_marker.text = ss.str(); + + text_marker.pose.position.x = _x; + text_marker.pose.position.y = _y; + text_marker.pose.position.z = z_ + (height_/2) + 0.1; + text_marker.pose.orientation.x = 0.0; + text_marker.pose.orientation.y = 0.0; + text_marker.pose.orientation.z = 0.0; + text_marker.pose.orientation.w = 1.0; + + text_marker.scale.x = 0.2; + text_marker.scale.y = 0.2; + text_marker.scale.z = 0.2; + + text_marker.color.r = color_(2); + text_marker.color.g = color_(1); + text_marker.color.b = color_(0); + text_marker.color.a = 1.0; + + text_marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(text_marker); + + } + + bool + Track::getPointXYZRGB(pcl::PointXYZRGB& p) + { + if(visibility_ == Track::NOT_VISIBLE) + return false; + + double _x, _y; + filter_->getState(_x, _y); + + p.x = float(_x); + p.y = float(_y); + p.z = float(z_); + uchar* rgb_ptr = (uchar*)&p.rgb; + *rgb_ptr++ = uchar(color_(0) * 255.0f); + *rgb_ptr++ = uchar(color_(1) * 255.0f); + *rgb_ptr++ = uchar(color_(2) * 255.0f); + return true; + } + + void + Track::toMsg(opt_msgs::Track& track_msg, bool vertical) + { + + double _x, _y; + filter_->getState(_x, _y); + + track_msg.id = id_; + track_msg.x = _x; + track_msg.y = _y; + track_msg.height = height_; + track_msg.distance = distance_; + track_msg.age = age_; + track_msg.confidence = - data_association_score_; // minus for transforming distance into a sort of confidence + track_msg.visibility = visibility_; + + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + if (not vertical) + { + track_msg.box_2D.height = int(std::abs((top - bottom)(1))); + track_msg.box_2D.width = track_msg.box_2D.height / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.height / 4; + track_msg.box_2D.y = int(top(1)); + } + else + { + track_msg.box_2D.width = int(std::abs((top - bottom)(0))); + track_msg.box_2D.height = track_msg.box_2D.width / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.width; + track_msg.box_2D.y = int(top(1)) - track_msg.box_2D.width / 4; + } + } + + open_ptrack::detection::DetectionSource* + Track::getDetectionSource() + { + return detection_source_; + } + + void + Track::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) + { + velocity_in_motion_term_ = velocity_in_motion_term; + + // Re-initialize Kalman filter + filter_->setPredictModel (acceleration_variance); + filter_->setObserveModel (position_variance); + double x, y; + filter_->getState(x, y); + filter_->init(x, y, distance_, velocity_in_motion_term_); + + *tmp_filter_ = *filter_; + } + + void + Track::setAccelerationVariance (double acceleration_variance) + { + filter_->setPredictModel (acceleration_variance); + tmp_filter_->setPredictModel (acceleration_variance); + } + + void + Track::setPositionVariance (double position_variance) + { + filter_->setObserveModel (position_variance); + tmp_filter_->setObserveModel (position_variance); + } + } /* namespace tracking */ +} /* namespace open_ptrack */ diff --git a/tracking/launch/tracking/src/tracker.cpp b/tracking/launch/tracking/src/tracker.cpp new file mode 100644 index 0000000..e8e7b27 --- /dev/null +++ b/tracking/launch/tracking/src/tracker.cpp @@ -0,0 +1,566 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ + namespace tracking + { + + Tracker::Tracker( + double gate_distance, + bool detector_likelihood, + std::vector likelihood_weights, + bool velocity_in_motion_term, + double min_confidence, + double min_confidence_detections, + double sec_before_old, + double sec_before_fake, + double sec_remain_new, + int detections_to_validate, + double period, + double position_variance, + double acceleration_variance, + std::string world_frame_id, + bool debug_mode, + bool vertical) : + gate_distance_(gate_distance), + detector_likelihood_(detector_likelihood), + likelihood_weights_(likelihood_weights), + velocity_in_motion_term_(velocity_in_motion_term), + min_confidence_(min_confidence), + min_confidence_detections_(min_confidence_detections), + sec_before_old_(sec_before_old), + sec_before_fake_(sec_before_fake), + sec_remain_new_(sec_remain_new), + detections_to_validate_(detections_to_validate), + period_(period), + position_variance_(position_variance), + acceleration_variance_(acceleration_variance), + world_frame_id_(world_frame_id), + debug_mode_(debug_mode), + vertical_(vertical) + { + tracks_counter_ = 0; + } + + Tracker::~Tracker() + { + // TODO Auto-generated destructor stub + } + + void + Tracker::newFrame(const std::vector& detections) + { + detections_.clear(); + unassociated_detections_.clear(); + lost_tracks_.clear(); + new_tracks_.clear(); + detections_ = detections; + + ros::Time current_detections_time = detections_[0].getSource()->getTime(); + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end();) + { + open_ptrack::tracking::Track* t = *it; + bool deleted = false; + + if(((t->getVisibility() == Track::NOT_VISIBLE && (t->getSecFromLastHighConfidenceDetection(current_detections_time)) >= sec_before_old_) + || (!t->isValidated() && t->getSecFromFirstDetection(current_detections_time) >= sec_before_fake_))) + { + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " DELETED" << std::endl; + } + delete t; + it = tracks_.erase(it); + deleted = true; + } + else if(!t->isValidated() && t->getUpdatesWithEnoughConfidence() == detections_to_validate_) + { + t->validate(); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " VALIDATED" << std::endl; + } + } + else if(t->getStatus() == Track::NEW && t->getSecFromFirstDetection(current_detections_time) >= sec_remain_new_) + { + t->setStatus(Track::NORMAL); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " set to NORMAL" << std::endl; + } + } + + if(!deleted) + { + if(t->getStatus() == Track::NEW && t->getVisibility() == Track::VISIBLE) + new_tracks_.push_back(t); + if(t->getVisibility() == Track::NOT_VISIBLE) + lost_tracks_.push_back(t); + it++; + } + + } + } + + void + Tracker::updateTracks() + { + createDistanceMatrix(); + createCostMatrix(); + + // Solve Global Nearest Neighbor problem: + Munkres munkres; + cost_matrix_ = munkres.solve(cost_matrix_, false); // rows: targets (tracks), cols: detections + + updateDetectedTracks(); + fillUnassociatedDetections(); + updateLostTracks(); + createNewTracks(); + } + +// void Tracker::drawRgb() +// { +// for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) +// (*it)->draw(vertical_); +// } + + void + Tracker::toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg) + { + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + t->createMarker(msg); + } + } + + void + Tracker::toMsg(opt_msgs::TrackArray::Ptr& msg) + { + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + + opt_msgs::Track track; + t->toMsg(track, vertical_); + msg->tracks.push_back(track); + } + } + + void + Tracker::toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id) + { + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + if (strcmp(t->getDetectionSource()->getFrameId().c_str(), source_frame_id.c_str()) == 0) // if strings are equal + { + opt_msgs::Track track; + t->toMsg(track, vertical_); + +// // For publishing only not occluded tracks: +// if (track.visibility < 2) +// msg->tracks.push_back(track); + + // For publishing all tracks: + msg->tracks.push_back(track); + } + } + } + + void + Tracker::getAliveIDs (opt_msgs::IDArray::Ptr& msg) + { + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + msg->ids.push_back ((*it)->getId()); + } + msg->max_ID = tracks_counter_; + } + + size_t + Tracker::appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, size_t starting_index, size_t max_size) + { + for(size_t i = 0; i < tracks_.size() && pointcloud->size() < max_size; i++) + { + pcl::PointXYZRGB point; + pointcloud->push_back(point); + } + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + if(t->getPointXYZRGB(pointcloud->points[starting_index])) + starting_index = (starting_index + 1) % max_size; + + } + return starting_index; + } + + /************************ protected methods ************************/ + + int + Tracker::createNewTrack(open_ptrack::detection::Detection& detection) + { + if(detection.getConfidence() < min_confidence_) + return -1; + + open_ptrack::tracking::Track* t; + t = new open_ptrack::tracking::Track( + ++tracks_counter_, + world_frame_id_, + position_variance_, + acceleration_variance_, + period_, + velocity_in_motion_term_ ); + + t->init(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1),detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), detection.getSource()); + + bool first_update = true; + t->update(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1), detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), 0.0, + detection.getConfidence(), min_confidence_, min_confidence_detections_, detection.getSource(), first_update); + + ROS_INFO("Created %d", t->getId()); + + tracks_.push_back(t); + return tracks_counter_; + } + + void + Tracker::createDistanceMatrix() + { + distance_matrix_ = cv::Mat_(tracks_.size(), detections_.size()); + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + //double x, y, height, vx, vz; + Track* t = *it; + //t->predict(x, y, height, vx, vz); + int measure = 0; + for(std::vector::iterator dit = detections_.begin(); dit != detections_.end(); dit++) + { + double detector_likelihood; + + // Compute detector likelihood: + if (detector_likelihood_) + { + detector_likelihood = dit->getConfidence(); + // detector_likelihood = log((dit->getConfidence() + 3) / 6); + } + else + { + detector_likelihood = 0; + } + + // Compute motion likelihood: + double motion_likelihood = t->getMahalanobisDistance( + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + dit->getSource()->getTime()); + + // Compute joint likelihood and put it in the distance matrix: + + distance_matrix_(track, measure++) = likelihood_weights_[0] * detector_likelihood + likelihood_weights_[1] * motion_likelihood; + + // Remove NaN and inf: + if (isnan(distance_matrix_(track, measure-1)) | (not std::isfinite(distance_matrix_(track, measure-1)))) + distance_matrix_(track, measure-1) = 2*gate_distance_; + +// std::cout << (*it)->getId() << ": " << "Motion likelihood: " << likelihood_weights_[0] * motion_likelihood << std::endl; +// if (detector_likelihood_) +// std::cout << (*it)->getId() << ": " << "Detector likelihood: " << likelihood_weights_[1] * dit->getConfidence() << std::endl; +// std::cout << (*it)->getId() << ": " << "JOINT LIKELIHOOD: " << distance_matrix_(track, measure-1) << std::endl; + + /*ROS_INFO("%d(%f, %f) = %f", t->getId(), + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + distance_matrix_(track, measure - 1));*/ + } + track++; + } + +// std::cout << "Distance matrix:" << std::endl; +// for(int row = 0; row < distance_matrix_.rows; row++) +// { +// for(int col = 0; col < distance_matrix_.cols; col++) +// { +// std::cout.width(8); +// std::cout << distance_matrix_(row,col) << ","; +// } +// std::cout << std::endl; +// } +// std::cout << std::endl; + } + + void + Tracker::createCostMatrix() + { + cost_matrix_ = distance_matrix_.clone(); + for(int i = 0; i < distance_matrix_.rows; i++) + { + for(int j = 0; j < distance_matrix_.cols; j++) + { + if(distance_matrix_(i, j) > gate_distance_) + cost_matrix_(i, j) = 1000000.0; + } + } + + +// std::cout << "Munkres input matrix:" << std::endl; +// for(int row = 0; row < cost_matrix_.rows; row++) +// { +// for(int col = 0; col < cost_matrix_.cols; col++) +// { +// std::cout.width(8); +// std::cout << cost_matrix_(row,col) << ","; +// } +// std::cout << std::endl; +// } +// std::cout << std::endl; + } + + void + Tracker::updateDetectedTracks() + { +// std::cout << "Munkres output matrix:" << std::endl; +// for(int row = 0; row < cost_matrix_.rows; row++) { +// for(int col = 0; col < cost_matrix_.cols; col++) { +// std::cout.width(1); +// std::cout << cost_matrix_(row,col) << ","; +// } +// std::cout << std::endl; +// } +// std::cout << std::endl; + + // Iterate over every track: + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + bool updated = false; + open_ptrack::tracking::Track* t = *it; + + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + // If a detection<->track association has been found: + if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) + { + open_ptrack::detection::Detection& d = detections_[measure]; + + // If the detection has enough confidence in the current frame or in a recent past: +// if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) + if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) + { + // Update track with the associated detection: + bool first_update = false; + t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), + d.getDistance(), distance_matrix_(track, measure), + d.getConfidence(), min_confidence_, min_confidence_detections_, + d.getSource(), first_update); + + t->setVisibility(d.isOccluded() ? Track::OCCLUDED : Track::VISIBLE); + updated = true; + break; + } + else + { + //std::cout << "Id: " << t->getId() << ", lowConfConsFrames: " << t->getLowConfidenceConsecutiveFrames() << ", newConf: " << d.getConfidence()<< std::endl; + } + } + } + if(!updated) + { + if(t->getVisibility() != Track::NOT_VISIBLE) + { + t->setVisibility(Track::NOT_VISIBLE); + //t->update(); + } + } + track++; + } + // std::cout << std::endl; + } + + void + Tracker::fillUnassociatedDetections() + { + // Fill a list with detections not associated to any track: + if(cost_matrix_.cols == 0 && detections_.size() > 0) + { + for(size_t measure = 0; measure < detections_.size(); measure++) + unassociated_detections_.push_back(detections_[measure]); + } + else + { + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + bool associated = false; + for(int track = 0; track < cost_matrix_.rows; track++) + { + if(cost_matrix_(track, measure) == 0.0) + { + if(distance_matrix_(track, measure) > gate_distance_) + break; + associated = true; + } + } + if(!associated/* && detections_[measure].getConfidence() > min_confidence_*/) + { + unassociated_detections_.push_back(detections_[measure]); + } + } + } + } + + void + Tracker::updateLostTracks() + { + //for(std::list::iterator it = lost_tracks_.begin(); it != lost_tracks_.end(); it++) + // (*it)->update(); + } + + void + Tracker::createNewTracks() + { + for(std::list::iterator dit = unassociated_detections_.begin(); + dit != unassociated_detections_.end(); dit++) + { + createNewTrack(*dit); + } + } + + void + Tracker::setMinConfidenceForTrackInitialization (double min_confidence) + { + min_confidence_ = min_confidence; + } + + void + Tracker::setSecBeforeOld (double sec_before_old) + { + sec_before_old_ = sec_before_old; + } + + void + Tracker::setSecBeforeFake (double sec_before_fake) + { + sec_before_fake_ = sec_before_fake; + } + + void + Tracker::setSecRemainNew (double sec_remain_new) + { + sec_remain_new_ = sec_remain_new; + } + + void + Tracker::setDetectionsToValidate (int detections_to_validate) + { + detections_to_validate_ = detections_to_validate; + } + + void + Tracker::setDetectorLikelihood (bool detector_likelihood) + { + detector_likelihood_ = detector_likelihood; + } + + void + Tracker::setLikelihoodWeights (double detector_weight, double motion_weight) + { + likelihood_weights_[0] = detector_weight; + likelihood_weights_[1] = motion_weight; + } + + void + Tracker::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) + { + velocity_in_motion_term_ = velocity_in_motion_term; + acceleration_variance_ = acceleration_variance; + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + t->setVelocityInMotionTerm (velocity_in_motion_term_, acceleration_variance_, position_variance_); + } + } + + void + Tracker::setAccelerationVariance (double acceleration_variance) + { + acceleration_variance_ = acceleration_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + t->setAccelerationVariance (acceleration_variance_); + } + } + + void + Tracker::setPositionVariance (double position_variance) + { + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track* t = *it; + t->setPositionVariance (position_variance_); + } + } + + void + Tracker::setGateDistance (double gate_distance) + { + gate_distance_ = gate_distance; + } + } /* namespace tracking */ +} /* namespace open_ptrack */ From bc63550c289336b4db8a7599b0f722a48b9bddd4 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:08:44 -0800 Subject: [PATCH 18/28] Delete CMakeLists.txt~ --- zed_ros_wrapper/CMakeLists.txt~ | 101 -------------------------------- 1 file changed, 101 deletions(-) delete mode 100644 zed_ros_wrapper/CMakeLists.txt~ diff --git a/zed_ros_wrapper/CMakeLists.txt~ b/zed_ros_wrapper/CMakeLists.txt~ deleted file mode 100644 index 3d258bf..0000000 --- a/zed_ros_wrapper/CMakeLists.txt~ +++ /dev/null @@ -1,101 +0,0 @@ -cmake_minimum_required(VERSION 2.8.7) - -project(zed_wrapper) - -# if CMAKE_BUILD_TYPE is not specified, take 'Release' as default -IF(NOT CMAKE_BUILD_TYPE) - SET(CMAKE_BUILD_TYPE Release) -ENDIF(NOT CMAKE_BUILD_TYPE) - -find_package(ZED 1.0 REQUIRED) - -##For Jetson, OpenCV4Tegra is based on OpenCV2.4 -exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) -if ( CMAKE_SYSTEM_NAME2 MATCHES "aarch64" ) # X1 - SET(OCV_VERSION "2.4") - SET(CUDA_VERSION "7.0") -elseif(CMAKE_SYSTEM_NAME2 MATCHES "armv7l" ) # K1 - SET(OCV_VERSION "2.4") - SET(CUDA_VERSION "6.5") -else() # Desktop - SET(OCV_VERSION "3.1") - SET(CUDA_VERSION "7.5") -endif() - -find_package(OpenCV ${OCV_VERSION} COMPONENTS core highgui imgproc REQUIRED) -find_package(CUDA ${CUDA_VERSION} REQUIRED) - -find_package(PCL REQUIRED) - -find_package(catkin REQUIRED COMPONENTS - image_transport - roscpp - rosconsole - sensor_msgs - dynamic_reconfigure - tf2_ros - pcl_conversions -) - -generate_dynamic_reconfigure_options( - cfg/Zed.cfg -) - -catkin_package( - CATKIN_DEPENDS - roscpp - rosconsole - sensor_msgs - opencv - image_transport - dynamic_reconfigure - tf2_ros - pcl_conversions -) - -############################################################################### -# INCLUDES - -# Specify locations of header files. -include_directories( - ${catkin_INCLUDE_DIRS} - ${CUDA_INCLUDE_DIRS} - ${ZED_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${PCL_INCLUDE_DIRS} -) - -link_directories(${ZED_LIBRARY_DIR}) -link_directories(${CUDA_LIBRARY_DIRS}) -link_directories(${OpenCV_LIBRARY_DIRS}) -link_directories(${PCL_LIBRARY_DIRS}) - -############################################################################### - -############################################################################### -# EXECUTABLE - -add_definitions(-std=c++11)# -m64) #-Wall) - - -add_executable( - zed_wrapper_node - src/zed_wrapper_node.cpp -) - -target_link_libraries( - zed_wrapper_node - ${catkin_LIBRARIES} - ${ZED_LIBRARIES} - ${CUDA_LIBRARIES} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} - ${OpenCV_LIBS} - ${PCL_LIBRARIES} - ) - -add_dependencies(zed_wrapper_node ${PROJECT_NAME}_gencfg) -############################################################################### - -#Add all files in subdirectories of the project in -# a dummy_target so qtcreator have access to all files -FILE(GLOB_RECURSE extra_files ${CMAKE_SOURCE_DIR}/*) -add_custom_target(dummy_${PROJECT_NAME} SOURCES ${extra_files}) From 6565321b2c1c7dc0fbba4519de91a158219f9e51 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:08:56 -0800 Subject: [PATCH 19/28] Delete zed_wrapper_node.cpp~ --- zed_ros_wrapper/src/zed_wrapper_node.cpp~ | 707 ---------------------- 1 file changed, 707 deletions(-) delete mode 100644 zed_ros_wrapper/src/zed_wrapper_node.cpp~ diff --git a/zed_ros_wrapper/src/zed_wrapper_node.cpp~ b/zed_ros_wrapper/src/zed_wrapper_node.cpp~ deleted file mode 100644 index 7df17ee..0000000 --- a/zed_ros_wrapper/src/zed_wrapper_node.cpp~ +++ /dev/null @@ -1,707 +0,0 @@ -/////////////////////////////////////////////////////////////////////////// -// -// Copyright (c) 2015, STEREOLABS. -// -// All rights reserved. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////// - - - - -/**************************************************************************************************** - ** This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. ** - ** You can publish Left+Depth or Left+Right images and camera info on the topics of your choice. ** - ** ** - ** A set of parameters can be specified in the launch file. ** - ****************************************************************************************************/ - -//standard includes -#include -#include -#include -#include -#include -#include -#include -#include // file exists - -//ROS includes -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -//opencv includes -#include -#include -#include -#include - -//Boost includes -#include - -//PCL includes -#include -#include -#include -#include -#include - -//ZED Includes -#include - -using namespace sl::zed; -using namespace std; - - -int confidence; -bool computeDepth; -int openniDepthMode = 0; // 16 bit UC data in mm else 32F in m, for more info http://www.ros.org/reps/rep-0118.html - -// Point cloud thread variables -float* cloud; -bool pointCloudThreadRunning = true; -bool point_cloud_data_processing = false; -string point_cloud_frame_id = ""; -ros::Time point_cloud_time; - -/* \brief Test if a file exist - * \param name : the path to the file - */ -bool file_exist(const std::string& name) { - struct stat buffer; - return (stat(name.c_str(), &buffer) == 0); -} - -/* \brief Image to ros message conversion - * \param img : the image to publish - * \param encodingType : the sensor_msgs::image_encodings encoding type - * \param frameId : the id of the reference frame of the image - * \param t : the ros::Time to stamp the image -*/ -sensor_msgs::ImagePtr imageToROSmsg(cv::Mat img, const std::string encodingType, std::string frameId, ros::Time t){ - sensor_msgs::ImagePtr ptr = boost::make_shared(); - sensor_msgs::Image& imgMessage = *ptr; - imgMessage.header.stamp = t; - imgMessage.header.frame_id = frameId; - imgMessage.height = img.rows; - imgMessage.width = img.cols; - imgMessage.encoding = encodingType; - int num = 1; //for endianness detection - imgMessage.is_bigendian = !(*(char *)&num == 1); - imgMessage.step = img.cols * img.elemSize(); - size_t size = imgMessage.step * img.rows; - imgMessage.data.resize(size); - - if (img.isContinuous()) - memcpy((char*)(&imgMessage.data[0]), img.data, size); - else { - uchar* opencvData = img.data; - uchar* rosData = (uchar*)(&imgMessage.data[0]); - for (unsigned int i = 0; i < img.rows; i++) { - memcpy(rosData, opencvData, imgMessage.step); - rosData += imgMessage.step; - opencvData += img.step; - } - } - return ptr; -} - -/* \brief Publish the pose of the camera with a ros Publisher - * \param Path : the 4x4 matrix representing the camera pose - * \param pub_odom : the publisher object to use - * \param odom_frame_id : the id of the reference frame of the pose - * \param t : the ros::Time to stamp the image - */ -void publishOdom(Eigen::Matrix4f Path, ros::Publisher &pub_odom, string odom_frame_id, ros::Time t) { - nav_msgs::Odometry odom; - odom.header.stamp = t; - odom.header.frame_id = odom_frame_id; - //odom.child_frame_id = "zed_optical_frame"; - - odom.pose.pose.position.y = -Path(0, 3); - odom.pose.pose.position.z = Path(1, 3); - odom.pose.pose.position.x = -Path(2, 3); - Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); - odom.pose.pose.orientation.x = -quat.z(); - odom.pose.pose.orientation.y = -quat.x(); - odom.pose.pose.orientation.z = quat.y(); - odom.pose.pose.orientation.w = quat.w(); - pub_odom.publish(odom); -} - -/* \brief Publish the pose of the camera as a transformation - * \param Path : the 4x4 matrix representing the camera pose - * \param trans_br : the TransformBroadcaster object to use - * \param odometry_transform_frame_id : the id of the transformation - * \param t : the ros::Time to stamp the image - */ -void publishTrackedFrame(Eigen::Matrix4f Path, tf2_ros::TransformBroadcaster &trans_br, string odometry_transform_frame_id, ros::Time t) { - - geometry_msgs::TransformStamped transformStamped; - transformStamped.header.stamp = ros::Time::now(); - transformStamped.header.frame_id = "zed_initial_frame"; - transformStamped.child_frame_id = odometry_transform_frame_id; - transformStamped.transform.translation.x = -Path(2, 3); - transformStamped.transform.translation.y = -Path(0, 3); - transformStamped.transform.translation.z = Path(1, 3); - Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); - transformStamped.transform.rotation.x = -quat.z(); - transformStamped.transform.rotation.y = -quat.x(); - transformStamped.transform.rotation.z = quat.y(); - transformStamped.transform.rotation.w = quat.w(); - trans_br.sendTransform(transformStamped); -} - -/* \brief Publish a cv::Mat image with a ros Publisher - * \param img : the image to publish - * \param pub_img : the publisher object to use - * \param img_frame_id : the id of the reference frame of the image - * \param t : the ros::Time to stamp the image - */ -void publishImage(cv::Mat img, image_transport::Publisher &pub_img, string img_frame_id, ros::Time t) { - pub_img.publish(imageToROSmsg(img - , sensor_msgs::image_encodings::BGR8 - , img_frame_id - , t )); -} - -/* \brief Publish a cv::Mat depth image with a ros Publisher - * \param depth : the depth image to publish - * \param pub_depth : the publisher object to use - * \param depth_frame_id : the id of the reference frame of the depth image - * \param t : the ros::Time to stamp the depth image - */ -void publishDepth(cv::Mat depth, image_transport::Publisher &pub_depth, string depth_frame_id, ros::Time t) { - string encoding; - if(openniDepthMode){ - depth *= 1000.0f; - depth.convertTo(depth, CV_16UC1); // in mm, rounded - encoding = sensor_msgs::image_encodings::TYPE_16UC1; - } - else { - encoding = sensor_msgs::image_encodings::TYPE_32FC1; - } - pub_depth.publish(imageToROSmsg(depth - , encoding - , depth_frame_id - , t )); -} - -/* \brief Publish a pointCloud with a ros Publisher - * \param p_could : the float pointer to point cloud datas - * \param width : the width of the point cloud - * \param height : the height of the point cloud - * \param pub_cloud : the publisher object to use - * \param cloud_frame_id : the id of the reference frame of the point cloud - * \param t : the ros::Time to stamp the point cloud - */ -void publishPointCloud(int width, int height, ros::Publisher &pub_cloud) { - while (pointCloudThreadRunning) { // check if the thread has to continue - if (!point_cloud_data_processing) { // check if datas are available - std::this_thread::sleep_for(std::chrono::milliseconds(2)); // No data, we just wait - continue; - } - pcl::PointCloud point_cloud; - point_cloud.width = width; - point_cloud.height = height; - int size = width*height; - point_cloud.points.resize(size); - int index4 = 0; - float color; - for (int i = 0; i < size; i++) { - if (cloud[index4 + 2] > 0) { // Check if it's an unvalid point, the depth is more than 0 - index4 += 4; - continue; - } - point_cloud.points[i].x = cloud[index4++]; - point_cloud.points[i].y = -cloud[index4++]; - point_cloud.points[i].z = -cloud[index4++]; - color = cloud[index4++]; - uint32_t color_uint = *(uint32_t*) & color; // Convert the color - unsigned char* color_uchar = (unsigned char*) &color_uint; - color_uint = ((uint32_t) color_uchar[0] << 16 | (uint32_t) color_uchar[1] << 8 | (uint32_t) color_uchar[2]); - point_cloud.points[i].rgb = *reinterpret_cast (&color_uint); - } - - //pcl::PointCloud point_cloud_output; - //std::vector indices; - //pcl::removeNaNFromPointCloud(point_cloud, point_cloud_output, indices); - - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(point_cloud, output); // Convert the point cloud to a ROS message - output.header.frame_id = point_cloud_frame_id; // Set the header values of the ROS message - output.header.stamp = point_cloud_time; - pub_cloud.publish(output); - point_cloud_data_processing = false; - } -} - -/* \brief Publish the informations of a camera with a ros Publisher - * \param cam_info_msg : the information message to publish - * \param pub_cam_info : the publisher object to use - * \param t : the ros::Time to stamp the message - */ -void publishCamInfo(sensor_msgs::CameraInfoPtr cam_info_msg, ros::Publisher pub_cam_info, ros::Time t) { - static int seq = 0; - cam_info_msg->header.stamp = t; - cam_info_msg->header.seq = seq; - pub_cam_info.publish(cam_info_msg); - seq++; -} - -/* \brief Get the information of the ZED cameras and store them in an information message - * \param zed : the sl::zed::Camera* pointer to an instance - * \param left_cam_info_msg : the information message to fill with the left camera informations - * \param right_cam_info_msg : the information message to fill with the right camera informations - * \param left_frame_id : the id of the reference frame of the left camera - * \param right_frame_id : the id of the reference frame of the right camera - */ -void fillCamInfo(Camera* zed, sensor_msgs::CameraInfoPtr left_cam_info_msg, sensor_msgs::CameraInfoPtr right_cam_info_msg, - string left_frame_id, string right_frame_id) { - - int width = zed->getImageSize().width; - int height = zed->getImageSize().height; - - sl::zed::StereoParameters* zedParam = zed->getParameters(); - - float baseline = zedParam->baseline * 0.001; // baseline converted in meters - - float fx = zedParam->LeftCam.fx; - float fy = zedParam->LeftCam.fy; - float cx = zedParam->LeftCam.cx; - float cy = zedParam->LeftCam.cy; - - // There is no distorsions since the images are rectified - double k1 = 0; - double k2 = 0; - double k3 = 0; - double p1 = 0; - double p2 = 0; - - left_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; - right_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; - - left_cam_info_msg->D.resize(5); - right_cam_info_msg->D.resize(5); - left_cam_info_msg->D[0] = right_cam_info_msg->D[0] = k1; - left_cam_info_msg->D[1] = right_cam_info_msg->D[1] = k2; - left_cam_info_msg->D[2] = right_cam_info_msg->D[2] = k3; - left_cam_info_msg->D[3] = right_cam_info_msg->D[3] = p1; - left_cam_info_msg->D[4] = right_cam_info_msg->D[4] = p2; - - left_cam_info_msg->K.fill(0.0); - right_cam_info_msg->K.fill(0.0); - left_cam_info_msg->K[0] = right_cam_info_msg->K[0] = fx; - left_cam_info_msg->K[2] = right_cam_info_msg->K[2] = cx; - left_cam_info_msg->K[4] = right_cam_info_msg->K[4] = fy; - left_cam_info_msg->K[5] = right_cam_info_msg->K[5] = cy; - left_cam_info_msg->K[8] = right_cam_info_msg->K[8] = 1.0; - - - - left_cam_info_msg->R.fill(0.0); - right_cam_info_msg->R.fill(0.0); - - left_cam_info_msg->P.fill(0.0); - right_cam_info_msg->P.fill(0.0); - left_cam_info_msg->P[0] = right_cam_info_msg->P[0] = fx; - left_cam_info_msg->P[2] = right_cam_info_msg->P[2] = cx; - left_cam_info_msg->P[5] = right_cam_info_msg->P[5] = fy; - left_cam_info_msg->P[6] = right_cam_info_msg->P[6] = cy; - left_cam_info_msg->P[10] = right_cam_info_msg->P[10] = 1.0; - right_cam_info_msg->P[3] = (-1 * fx * baseline); - - left_cam_info_msg->width = right_cam_info_msg->width = width; - left_cam_info_msg->height = right_cam_info_msg->height = height; - - left_cam_info_msg->header.frame_id = left_frame_id; - right_cam_info_msg->header.frame_id = right_frame_id; -} - -void callback(zed_ros_wrapper::ZedConfig &config, uint32_t level) { - ROS_INFO("Reconfigure confidence : %d", config.confidence); - confidence = config.confidence; -} - -int main(int argc, char **argv) { - // Launch file parameters - int resolution = sl::zed::HD720; - int quality = sl::zed::MODE::PERFORMANCE; - int sensing_mode = sl::zed::SENSING_MODE::STANDARD; - int rate = 30; - string odometry_DB = ""; - - std::string img_topic = "image_rect"; - - // Set the default topic names - string rgb_topic = "rgb/" + img_topic; - string rgb_cam_info_topic = "rgb/camera_info"; - string rgb_frame_id = "/zed_tracked_frame"; - - string left_topic = "left/" + img_topic; - string left_cam_info_topic = "left/camera_info"; - string left_frame_id = "/zed_tracked_frame"; - - string right_topic = "right/" + img_topic; - string right_cam_info_topic = "right/camera_info"; - string right_frame_id = "/zed_tracked_frame"; - - string depth_topic = "depth/"; - if (openniDepthMode) - depth_topic += "image_raw"; - else - depth_topic += img_topic; - - string depth_cam_info_topic = "depth/camera_info"; - string depth_frame_id = "/zed_depth_optical_frame"; - - string point_cloud_topic = "point_cloud/" + img_topic; - string cloud_frame_id = "/zed_tracked_frame"; - - string odometry_topic = "odom"; - string odometry_frame_id = "/zed_initial_frame"; - string odometry_transform_frame_id = "/zed_tracked_frame"; - - ros::init(argc, argv, "zed_depth_stereo_wrapper_node"); - ROS_INFO("ZED_WRAPPER Node initialized"); - - ros::NodeHandle nh; - ros::NodeHandle nh_ns("~"); - - // Get parameters from launch file - nh_ns.getParam("resolution", resolution); - nh_ns.getParam("quality", quality); - nh_ns.getParam("sensing_mode", sensing_mode); - nh_ns.getParam("frame_rate", rate); - nh_ns.getParam("odometry_DB", odometry_DB); - nh_ns.getParam("openni_depth_mode", openniDepthMode); - if (openniDepthMode) - ROS_INFO_STREAM("Openni depth mode activated"); - - nh_ns.getParam("rgb_topic", rgb_topic); - nh_ns.getParam("rgb_cam_info_topic", rgb_cam_info_topic); - nh_ns.getParam("rgb_frame_id", rgb_frame_id); - - nh_ns.getParam("left_topic", left_topic); - nh_ns.getParam("left_cam_info_topic", left_cam_info_topic); - nh_ns.getParam("left_frame_id", left_frame_id); - - nh_ns.getParam("right_topic", right_topic); - nh_ns.getParam("right_cam_info_topic", right_cam_info_topic); - nh_ns.getParam("right_frame_id", right_frame_id); - - nh_ns.getParam("depth_topic", depth_topic); - nh_ns.getParam("depth_cam_info_topic", depth_cam_info_topic); - nh_ns.getParam("depth_frame_id", depth_frame_id); - - nh_ns.getParam("point_cloud_topic", point_cloud_topic); - nh_ns.getParam("cloud_frame_id", cloud_frame_id); - - nh_ns.getParam("odometry_topic", odometry_topic); - nh_ns.getParam("odometry_frame_id", odometry_frame_id); - nh_ns.getParam("odometry_transform_frame_id", odometry_transform_frame_id); - - // Create the ZED object - std::unique_ptr zed; - if (argc == 2) { - zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file - ROS_INFO_STREAM("Reading SVO file : " << argv[1]); - } else { - zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); - ROS_INFO_STREAM("Using ZED Camera"); - } - - // Try to initialize the ZED - sl::zed::InitParams param; - param.unit = UNIT::METER; - param.coordinate = COORDINATE_SYSTEM::RIGHT_HANDED; - param.mode = static_cast (quality); - param.verbose = true; - - ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; - while (err != SUCCESS) { - err = zed->init(param); - ROS_INFO_STREAM(errcode2str(err)); - std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - } - - zed->grab(static_cast (sensing_mode), true, true, true); //call the first grab - - //Tracking variables - sl::zed::TRACKING_STATE track_state; - sl::zed::TRACKING_FRAME_STATE frame_state; - Eigen::Matrix4f Path; - Path.setIdentity(4, 4); - - //ERRCODE display - dynamic_reconfigure::Server server; - dynamic_reconfigure::Server::CallbackType f; - - f = boost::bind(&callback, _1, _2); - server.setCallback(f); - //confidence was previously 80 - confidence = 100; - - // Get the parameters of the ZED images - int width = zed->getImageSize().width; - int height = zed->getImageSize().height; - ROS_DEBUG_STREAM("Image size : " << width << "x" << height); - - cv::Size cvSize(width, height); - cv::Mat leftImRGB(cvSize, CV_8UC3); - cv::Mat rightImRGB(cvSize, CV_8UC3); - - - // Create all the publishers - // Image publishers - image_transport::ImageTransport it_zed(nh); - image_transport::Publisher pub_rgb = it_zed.advertise(rgb_topic, 1); //rgb - ROS_INFO_STREAM("Advertized on topic " << rgb_topic); - image_transport::Publisher pub_left = it_zed.advertise(left_topic, 1); //left - ROS_INFO_STREAM("Advertized on topic " << left_topic); - image_transport::Publisher pub_right = it_zed.advertise(right_topic, 1); //right - ROS_INFO_STREAM("Advertized on topic " << right_topic); - image_transport::Publisher pub_depth = it_zed.advertise(depth_topic, 1); //depth - ROS_INFO_STREAM("Advertized on topic " << depth_topic); - - //PointCloud publisher - ros::Publisher pub_cloud = nh.advertise (point_cloud_topic, 1); - ROS_INFO_STREAM("Advertized on topic " << point_cloud_topic); - - // Camera info publishers - ros::Publisher pub_rgb_cam_info = nh.advertise(rgb_cam_info_topic, 1); //rgb - ROS_INFO_STREAM("Advertized on topic " << rgb_cam_info_topic); - ros::Publisher pub_left_cam_info = nh.advertise(left_cam_info_topic, 1); //left - ROS_INFO_STREAM("Advertized on topic " << left_cam_info_topic); - ros::Publisher pub_right_cam_info = nh.advertise(right_cam_info_topic, 1); //right - ROS_INFO_STREAM("Advertized on topic " << right_cam_info_topic); - ros::Publisher pub_depth_cam_info = nh.advertise(depth_cam_info_topic, 1); //depth - ROS_INFO_STREAM("Advertized on topic " << depth_cam_info_topic); - - //Odometry publisher - ros::Publisher pub_odom = nh.advertise(odometry_topic, 1); - tf2_ros::TransformBroadcaster transform_odom_broadcaster; - ROS_INFO_STREAM("Advertized on topic " << odometry_topic); - - // Create and fill the camera information messages - sensor_msgs::CameraInfoPtr rgb_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr left_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr right_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr depth_cam_info_msg(new sensor_msgs::CameraInfo()); - fillCamInfo(zed.get(), left_cam_info_msg, right_cam_info_msg, left_frame_id, right_frame_id); - rgb_cam_info_msg = depth_cam_info_msg = left_cam_info_msg; // the reference camera is the Left one (next to the ZED logo) - - ros::Rate loop_rate(rate); - ros::Time old_t = ros::Time::now(); - bool old_image = false; - std::unique_ptr pointCloudThread = nullptr; - pointCloudThread.reset(new std::thread(&publishPointCloud, width, height, std::ref(pub_cloud))); - bool tracking_activated = false; - - try - { - // Main loop - while (ros::ok()) - { - // Check for subscribers - int rgb_SubNumber = pub_rgb.getNumSubscribers(); - int left_SubNumber = pub_left.getNumSubscribers(); - int right_SubNumber = pub_right.getNumSubscribers(); - int depth_SubNumber = pub_depth.getNumSubscribers(); - int cloud_SubNumber = pub_cloud.getNumSubscribers(); - int odom_SubNumber = pub_odom.getNumSubscribers(); - bool runLoop = (rgb_SubNumber + left_SubNumber + right_SubNumber + depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; - // Run the loop only if there is some subscribers - if (true) - { - if (odom_SubNumber > 0 && !tracking_activated) - { //Start the tracking - if (odometry_DB != "" && !file_exist(odometry_DB)) - { - odometry_DB = ""; - ROS_WARN("odometry_DB path doesn't exist or is unreachable."); - } - zed->enableTracking(Path, true, odometry_DB); - tracking_activated = true; - } - else if (odom_SubNumber == 0 && tracking_activated) - { //Stop the tracking - zed->stopTracking(); - tracking_activated = false; - } - computeDepth = (depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; // Detect if one of the subscriber need to have the depth information - ros::Time t = ros::Time::now(); // Get current time - - if (computeDepth) - { - int actual_confidence = zed->getConfidenceThreshold(); - if (actual_confidence != confidence) - { - zed->setConfidenceThreshold(confidence); - } - - old_image = zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); // Ask to compute the depth - } - else - { - old_image = zed->grab(static_cast (sensing_mode), false, false); // Ask to not compute the depth - } - - - if (old_image) - { // Detect if a error occurred (for example: the zed have been disconnected) and re-initialize the ZED - ROS_DEBUG("Wait for a new image to proceed"); - std::this_thread::sleep_for(std::chrono::milliseconds(2)); - if ((t - old_t).toSec() > 5) - { - // delete the old object before constructing a new one - zed.reset(); - if (argc == 2) - { - zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file - ROS_INFO_STREAM("Reading SVO file : " << argv[1]); - } - else - { - zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); - ROS_INFO_STREAM("Using ZED Camera"); - } - - ROS_INFO("Reinit camera"); - ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; - - while (err != SUCCESS) - { - err = zed->init(param); // Try to initialize the ZED - ROS_INFO_STREAM(errcode2str(err)); - std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - } - - zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); //call the first grab - tracking_activated = false; - - if (odom_SubNumber > 0) { //Start the tracking - if (odometry_DB != "" && !file_exist(odometry_DB)) - { - odometry_DB = ""; - ROS_WARN("odometry_DB path doesn't exist or is unreachable."); - } - - zed->enableTracking(Path, true, odometry_DB); - tracking_activated = true; - } - } - - continue; - } - - old_t = ros::Time::now(); - - // Publish the left == rgb image if someone has subscribed to - //if (left_SubNumber > 0 || rgb_SubNumber > 0) { - // Retrieve RGBA Left image - cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::LEFT)), leftImRGB, CV_RGBA2RGB); // Convert to RGB - // if (left_SubNumber > 0) { - //cout << "Fx: " << left_cam_info_msg->K[0] << "Cx: " << left_cam_info_msg->K[2] << "Fy: " << left_cam_info_msg->K[4] << "Cy: " << left_cam_info_msg->K[5] << endl; - publishCamInfo(left_cam_info_msg, pub_left_cam_info, t); - publishImage(leftImRGB, pub_left, left_frame_id, t); - // } - // if (rgb_SubNumber > 0) { - publishCamInfo(rgb_cam_info_msg, pub_rgb_cam_info, t); - publishImage(leftImRGB, pub_rgb, rgb_frame_id, t); // rgb is the left image - //} - //} - - // Publish the right image if someone has subscribed to - if (right_SubNumber > 0) - { - // Retrieve RGBA Right image - cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::RIGHT)), rightImRGB, CV_RGBA2RGB); // Convert to RGB - publishCamInfo(right_cam_info_msg, pub_right_cam_info, t); - publishImage(rightImRGB, pub_right, right_frame_id, t); - } - - // Publish the depth image if someone has subscribed to - if (depth_SubNumber > 0) - { - publishCamInfo(depth_cam_info_msg, pub_depth_cam_info, t); - publishDepth(slMat2cvMat(zed->retrieveMeasure(sl::zed::MEASURE::DEPTH)), pub_depth, depth_frame_id, t); // in meters - } - - // Publish the point cloud if someone has subscribed to - if ( point_cloud_data_processing == false) //cloud_SubNumber > 0 && - { - // Run the point cloud convertion asynchronously to avoid slowing down all the program - // Retrieve raw pointCloud data - cloud = (float*) zed->retrieveMeasure(sl::zed::MEASURE::XYZRGBA).data; - point_cloud_frame_id = cloud_frame_id; - point_cloud_time = t; - point_cloud_data_processing = true; - } - - // Publish the odometry if someone has subscribed to - if (odom_SubNumber > 0) - { - track_state = zed->getPosition(Path, MAT_TRACKING_TYPE::PATH); - publishOdom(Path, pub_odom, odometry_frame_id, t); - } - - //Note, the frame is published, but its values will only change if someone has subscribed to odom - publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, t); //publish the tracked Frame - - ros::spinOnce(); - loop_rate.sleep(); - } - else - { - - publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, ros::Time::now()); //publish the tracked Frame before the sleep - std::this_thread::sleep_for(std::chrono::milliseconds(10)); // No subscribers, we just wait - } - } - } - catch (...) - { - if (pointCloudThread && pointCloudThreadRunning) - { - pointCloudThreadRunning = false; - pointCloudThread->join(); - } - ROS_ERROR("Unknown error."); - return 1; - } - - if (pointCloudThread && pointCloudThreadRunning) - { - pointCloudThreadRunning = false; - pointCloudThread->join(); - } - - ROS_INFO("Quitting zed_depth_stereo_wrapper_node ...\n"); - - return 0; -} From b1a09ed20e6b8d24e7d654c99b85121776d83fd7 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 12 Dec 2016 18:09:09 -0800 Subject: [PATCH 20/28] Delete Zed.cfg~ --- zed_ros_wrapper/cfg/Zed.cfg~ | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 zed_ros_wrapper/cfg/Zed.cfg~ diff --git a/zed_ros_wrapper/cfg/Zed.cfg~ b/zed_ros_wrapper/cfg/Zed.cfg~ deleted file mode 100644 index cad5bc2..0000000 --- a/zed_ros_wrapper/cfg/Zed.cfg~ +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python -PACKAGE = "zed_ros_wrapper" - -from dynamic_reconfigure.parameter_generator_catkin import * - -gen = ParameterGenerator() - -gen.add("confidence", int_t, 0, "Confidence threshold, the lower the better", 80, 1, 100) - -exit(gen.generate(PACKAGE, "zed_ros_wrapper", "Zed")) From c40249f999b5406d28a9facab294e1b99a704201 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 9 Jan 2017 09:18:45 -0800 Subject: [PATCH 21/28] Re-included SR support --- detection/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index 5b70b79..c5c43a7 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -52,9 +52,9 @@ add_executable(ground_based_people_detector apps/ground_based_people_detector_no SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) -#add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) -#SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) -#target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) +add_executable(ground_based_people_detector_sr apps/ground_based_people_detector_node_sr.cpp) +SET_TARGET_PROPERTIES(ground_based_people_detector_sr PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) +target_link_libraries(ground_based_people_detector_sr ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) add_library(haar_disp_ada src/haardispada.cpp apps/haardispada_nodelet.cpp) target_link_libraries(haar_disp_ada ${catkin_LIBRARIES}) From 34930ccd3f82a0b461560a9a7e1a9eb61c5a1ecf Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 9 Jan 2017 09:19:24 -0800 Subject: [PATCH 22/28] Delete ground_.txt --- detection/conf/ground_.txt | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 detection/conf/ground_.txt diff --git a/detection/conf/ground_.txt b/detection/conf/ground_.txt deleted file mode 100644 index 07e4b47..0000000 --- a/detection/conf/ground_.txt +++ /dev/null @@ -1,4 +0,0 @@ -1.59075e-37 -0 -1.4013e-45 -1.45 \ No newline at end of file From 1769c14007962987dd64da5b79a1226833f20ac3 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 9 Jan 2017 09:27:18 -0800 Subject: [PATCH 23/28] Debug Lines Removed --- .../ground_based_people_detection_app.hpp | 33 +++++-------------- 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp index 8e31e86..e6e612f 100644 --- a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp +++ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp @@ -319,7 +319,7 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateGround(Eige } template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr -open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud)//, void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) +open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud) { // Downsample of sampling_factor in every dimension: PointCloudPtr cloud_downsampled(new PointCloud); @@ -370,38 +370,31 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud ( PointCloudPtr cloud_filtered(new PointCloud); pcl::VoxelGrid voxel_grid_filter_object; if (apply_denoising_) - { - //std::cout << "APPLY DENOISING" << std::endl; + { voxel_grid_filter_object.setInputCloud(cloud_denoised); - } + } else { if (sampling_factor_ != 1) - { - //std::cout << "APPLY DownSampled" << std::endl; + { voxel_grid_filter_object.setInputCloud(cloud_downsampled); - } + } else - { - //std::cout << "Use Input Cloud" << std::endl; + { voxel_grid_filter_object.setInputCloud(input_cloud); - } + } } - //std::cout << "Voxel Size " << voxel_size_ << " Max Distance " << max_distance_ << std::endl; - voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); voxel_grid_filter_object.setFilterFieldName("z"); voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); voxel_grid_filter_object.filter (*cloud_filtered); - - //(*publishBeforeVoxelFilterCloud)(cloud_filtered); return cloud_filtered; } template bool -open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters)//, void (*publishExtractRGB)(PointCloudPtr&), void (*publishPreProcessed)(PointCloudPtr&), void (*publishGroundRemoval)(PointCloudPtr&), void (*publishBeforeVoxelFilterCloud)(PointCloudPtr&)) +open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters) { frame_counter_++; @@ -448,16 +441,10 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec // Fill rgb image: rgb_image_->points.clear(); // clear RGB pointcloud extractRGBFromPointCloud(cloud_, rgb_image_); // fill RGB pointcloud - - //TODO publish here extractedRGB - //(*publishExtractRGB)(cloud_); // Point cloud pre-processing (downsampling and filtering): PointCloudPtr cloud_filtered(new PointCloud); - cloud_filtered = preprocessCloud (cloud_);//, publishBeforeVoxelFilterCloud); - - //TODO publish here preProcessed - //(*publishPreProcessed)(cloud_filtered); + cloud_filtered = preprocessCloud (cloud_); if (use_rgb_) { @@ -497,8 +484,6 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec } } - //TODO publish here groundRemoval - //(*publishGroundRemoval)(no_ground_cloud_); // Background Subtraction (optional): if (background_subtraction_) From 8b5d31dff96fe1572f688e5eb61163e1a57f0926 Mon Sep 17 00:00:00 2001 From: remap Date: Mon, 9 Jan 2017 09:58:08 -0800 Subject: [PATCH 24/28] Removed ZED Wrapper - Did some clean up --- .../swissranger_camera/SwissRangerConfig.h | 531 ++++++++++++ .../src/swissranger_camera/__init__.py | 0 .../cfg/SwissRangerConfig.py | 38 + .../src/swissranger_camera/cfg/__init__.py | 0 .../launch/detection_and_tracking_zed.launch | 0 tracking/launch/tracking/CMakeLists.txt | 39 - .../apps/moving_average_filter_node.cpp | 501 ----------- .../tracking/apps/tracker_filter_node.cpp | 500 ----------- .../launch/tracking/apps/tracker_node.cpp | 810 ------------------ .../tracking/cfg/MovingAverageSmoother.cfg | 40 - tracking/launch/tracking/cfg/Tracker.cfg | 78 -- .../launch/tracking/cfg/TrackerSmoother.cfg | 42 - .../tracking/conf/moving_average_filter.yaml | 26 - .../tracking/conf/multicamera_tracking.rviz | 180 ---- tracking/launch/tracking/conf/tracker.yaml | 74 -- .../launch/tracking/conf/tracker_filter.yaml | 28 - .../tracking/conf/tracker_multicamera.yaml | 78 -- tracking/launch/tracking/conf/tracker_sr.yaml | 74 -- tracking/launch/tracking/conf/tracking.rviz | 249 ------ .../open_ptrack/tracking/kalman_filter.h | 317 ------- .../include/open_ptrack/tracking/munkres.h | 133 --- .../include/open_ptrack/tracking/track.h | 402 --------- .../include/open_ptrack/tracking/tracker.h | 332 ------- .../launch/detection_and_tracking.launch | 15 - .../detection_and_tracking_kinect2.launch | 15 - .../launch/detection_and_tracking_sr.launch | 15 - .../detection_and_tracking_stereo.launch | 15 - .../launch/detection_and_tracking_zed.launch | 15 - .../launch/tracking/launch/tracker.launch | 21 - .../tracking/launch/tracker_network.launch | 24 - .../launch/tracking/launch/tracker_sr.launch | 22 - .../tracking/launch/tracking_node.launch | 15 - tracking/launch/tracking/package.xml | 46 - .../launch/tracking/src/kalman_filter.cpp | 367 -------- tracking/launch/tracking/src/munkres.cpp | 410 --------- tracking/launch/tracking/src/track.cpp | 637 -------------- tracking/launch/tracking/src/tracker.cpp | 566 ------------ zed_ros_wrapper/CMakeLists.txt | 100 --- zed_ros_wrapper/LICENSE | 28 - zed_ros_wrapper/README.md | 105 --- zed_ros_wrapper/cfg/Zed.cfg | 10 - zed_ros_wrapper/launch/zed.launch | 43 - zed_ros_wrapper/launch/zed.launch~ | 43 - zed_ros_wrapper/launch/zed_tf.launch | 15 - zed_ros_wrapper/package.xml | 34 - zed_ros_wrapper/records/record_depth.sh | 2 - zed_ros_wrapper/records/record_stereo.sh | 2 - zed_ros_wrapper/src/zed_wrapper_node.cpp | 707 --------------- 48 files changed, 569 insertions(+), 7175 deletions(-) create mode 100644 swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h create mode 100644 swissranger_camera/src/swissranger_camera/__init__.py create mode 100644 swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py create mode 100644 swissranger_camera/src/swissranger_camera/cfg/__init__.py rename detection_and_tracking_zed.launch => tracking/launch/detection_and_tracking_zed.launch (100%) delete mode 100644 tracking/launch/tracking/CMakeLists.txt delete mode 100644 tracking/launch/tracking/apps/moving_average_filter_node.cpp delete mode 100644 tracking/launch/tracking/apps/tracker_filter_node.cpp delete mode 100644 tracking/launch/tracking/apps/tracker_node.cpp delete mode 100644 tracking/launch/tracking/cfg/MovingAverageSmoother.cfg delete mode 100644 tracking/launch/tracking/cfg/Tracker.cfg delete mode 100644 tracking/launch/tracking/cfg/TrackerSmoother.cfg delete mode 100644 tracking/launch/tracking/conf/moving_average_filter.yaml delete mode 100644 tracking/launch/tracking/conf/multicamera_tracking.rviz delete mode 100644 tracking/launch/tracking/conf/tracker.yaml delete mode 100644 tracking/launch/tracking/conf/tracker_filter.yaml delete mode 100644 tracking/launch/tracking/conf/tracker_multicamera.yaml delete mode 100644 tracking/launch/tracking/conf/tracker_sr.yaml delete mode 100644 tracking/launch/tracking/conf/tracking.rviz delete mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h delete mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/munkres.h delete mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/track.h delete mode 100644 tracking/launch/tracking/include/open_ptrack/tracking/tracker.h delete mode 100644 tracking/launch/tracking/launch/detection_and_tracking.launch delete mode 100644 tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch delete mode 100644 tracking/launch/tracking/launch/detection_and_tracking_sr.launch delete mode 100644 tracking/launch/tracking/launch/detection_and_tracking_stereo.launch delete mode 100644 tracking/launch/tracking/launch/detection_and_tracking_zed.launch delete mode 100644 tracking/launch/tracking/launch/tracker.launch delete mode 100644 tracking/launch/tracking/launch/tracker_network.launch delete mode 100644 tracking/launch/tracking/launch/tracker_sr.launch delete mode 100644 tracking/launch/tracking/launch/tracking_node.launch delete mode 100644 tracking/launch/tracking/package.xml delete mode 100644 tracking/launch/tracking/src/kalman_filter.cpp delete mode 100644 tracking/launch/tracking/src/munkres.cpp delete mode 100644 tracking/launch/tracking/src/track.cpp delete mode 100644 tracking/launch/tracking/src/tracker.cpp delete mode 100644 zed_ros_wrapper/CMakeLists.txt delete mode 100644 zed_ros_wrapper/LICENSE delete mode 100644 zed_ros_wrapper/README.md delete mode 100644 zed_ros_wrapper/cfg/Zed.cfg delete mode 100644 zed_ros_wrapper/launch/zed.launch delete mode 100644 zed_ros_wrapper/launch/zed.launch~ delete mode 100644 zed_ros_wrapper/launch/zed_tf.launch delete mode 100644 zed_ros_wrapper/package.xml delete mode 100644 zed_ros_wrapper/records/record_depth.sh delete mode 100644 zed_ros_wrapper/records/record_stereo.sh delete mode 100644 zed_ros_wrapper/src/zed_wrapper_node.cpp diff --git a/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h new file mode 100644 index 0000000..fa8d0ae --- /dev/null +++ b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h @@ -0,0 +1,531 @@ +//#line 2 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" +// ********************************************************* +// +// File autogenerated for the swissranger_camera package +// by the dynamic_reconfigure package. +// Please do not edit. +// +// ********************************************************/ + +#ifndef __swissranger_camera__SWISSRANGERCONFIG_H__ +#define __swissranger_camera__SWISSRANGERCONFIG_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace swissranger_camera +{ + class SwissRangerConfigStatics; + + class SwissRangerConfig + { + public: + class AbstractParamDescription : public dynamic_reconfigure::ParamDescription + { + public: + AbstractParamDescription(std::string n, std::string t, uint32_t l, + std::string d, std::string e) + { + name = n; + type = t; + level = l; + description = d; + edit_method = e; + } + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const = 0; + virtual void calcLevel(uint32_t &level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const = 0; + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const = 0; + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const = 0; + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const = 0; + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const = 0; + }; + + typedef boost::shared_ptr AbstractParamDescriptionPtr; + typedef boost::shared_ptr AbstractParamDescriptionConstPtr; + + template + class ParamDescription : public AbstractParamDescription + { + public: + ParamDescription(std::string name, std::string type, uint32_t level, + std::string description, std::string edit_method, T SwissRangerConfig::* f) : + AbstractParamDescription(name, type, level, description, edit_method), + field(f) + {} + + T (SwissRangerConfig::* field); + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + if (config.*field > max.*field) + config.*field = max.*field; + + if (config.*field < min.*field) + config.*field = min.*field; + } + + virtual void calcLevel(uint32_t &comb_level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const + { + if (config1.*field != config2.*field) + comb_level |= level; + } + + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const + { + nh.getParam(name, config.*field); + } + + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const + { + nh.setParam(name, config.*field); + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const + { + return dynamic_reconfigure::ConfigTools::getParameter(msg, name, config.*field); + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const + { + dynamic_reconfigure::ConfigTools::appendParameter(msg, name, config.*field); + } + + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const + { + val = config.*field; + } + }; + + class AbstractGroupDescription : public dynamic_reconfigure::Group + { + public: + AbstractGroupDescription(std::string n, std::string t, int p, int i, bool s) + { + name = n; + type = t; + parent = p; + state = s; + id = i; + } + + std::vector abstract_parameters; + bool state; + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &config) const =0; + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const= 0; + virtual void setInitialState(boost::any &cfg) const = 0; + + + void convertParams() + { + for(std::vector::const_iterator i = abstract_parameters.begin(); i != abstract_parameters.end(); ++i) + { + parameters.push_back(dynamic_reconfigure::ParamDescription(**i)); + } + } + }; + + typedef boost::shared_ptr AbstractGroupDescriptionPtr; + typedef boost::shared_ptr AbstractGroupDescriptionConstPtr; + + template + class GroupDescription : public AbstractGroupDescription + { + public: + GroupDescription(std::string name, std::string type, int parent, int id, bool s, T PT::* f) : AbstractGroupDescription(name, type, parent, id, s), field(f) + { + } + + GroupDescription(const GroupDescription& g): AbstractGroupDescription(g.name, g.type, g.parent, g.id, g.state), field(g.field), groups(g.groups) + { + parameters = g.parameters; + abstract_parameters = g.abstract_parameters; + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + if(!dynamic_reconfigure::ConfigTools::getGroupState(msg, name, (*config).*field)) + return false; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + if(!(*i)->fromMessage(msg, n)) + return false; + } + + return true; + } + + virtual void setInitialState(boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + T* group = &((*config).*field); + group->state = state; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = boost::any(&((*config).*field)); + (*i)->setInitialState(n); + } + + } + + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const + { + PT* config = boost::any_cast(cfg); + + T* f = &((*config).*field); + f->setParams(top, abstract_parameters); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + (*i)->updateParams(n, top); + } + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &cfg) const + { + const PT config = boost::any_cast(cfg); + dynamic_reconfigure::ConfigTools::appendGroup(msg, name, id, parent, config.*field); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + (*i)->toMessage(msg, config.*field); + } + } + + T (PT::* field); + std::vector groups; + }; + +class DEFAULT +{ + public: + DEFAULT() + { + state = true; + name = "Default"; + } + + void setParams(SwissRangerConfig &config, const std::vector params) + { + for (std::vector::const_iterator _i = params.begin(); _i != params.end(); ++_i) + { + boost::any val; + (*_i)->getValue(config, val); + + if("frame_id"==(*_i)->name){frame_id = boost::any_cast(val);} + if("camera_info_url"==(*_i)->name){camera_info_url = boost::any_cast(val);} + if("auto_exposure"==(*_i)->name){auto_exposure = boost::any_cast(val);} + if("integration_time"==(*_i)->name){integration_time = boost::any_cast(val);} + if("amp_threshold"==(*_i)->name){amp_threshold = boost::any_cast(val);} + } + } + + std::string frame_id; +std::string camera_info_url; +int auto_exposure; +int integration_time; +int amp_threshold; + + bool state; + std::string name; + + +}groups; + + + +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string frame_id; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string camera_info_url; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int auto_exposure; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int integration_time; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int amp_threshold; +//#line 218 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + bool __fromMessage__(dynamic_reconfigure::Config &msg) + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + + int count = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + if ((*i)->fromMessage(msg, *this)) + count++; + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i ++) + { + if ((*i)->id == 0) + { + boost::any n = boost::any(this); + (*i)->updateParams(n, *this); + (*i)->fromMessage(msg, n); + } + } + + if (count != dynamic_reconfigure::ConfigTools::size(msg)) + { + ROS_ERROR("SwissRangerConfig::__fromMessage__ called with an unexpected parameter."); + ROS_ERROR("Booleans:"); + for (unsigned int i = 0; i < msg.bools.size(); i++) + ROS_ERROR(" %s", msg.bools[i].name.c_str()); + ROS_ERROR("Integers:"); + for (unsigned int i = 0; i < msg.ints.size(); i++) + ROS_ERROR(" %s", msg.ints[i].name.c_str()); + ROS_ERROR("Doubles:"); + for (unsigned int i = 0; i < msg.doubles.size(); i++) + ROS_ERROR(" %s", msg.doubles[i].name.c_str()); + ROS_ERROR("Strings:"); + for (unsigned int i = 0; i < msg.strs.size(); i++) + ROS_ERROR(" %s", msg.strs[i].name.c_str()); + // @todo Check that there are no duplicates. Make this error more + // explicit. + return false; + } + return true; + } + + // This version of __toMessage__ is used during initialization of + // statics when __getParamDescriptions__ can't be called yet. + void __toMessage__(dynamic_reconfigure::Config &msg, const std::vector &__param_descriptions__, const std::vector &__group_descriptions__) const + { + dynamic_reconfigure::ConfigTools::clear(msg); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toMessage(msg, *this); + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + if((*i)->id == 0) + { + (*i)->toMessage(msg, *this); + } + } + } + + void __toMessage__(dynamic_reconfigure::Config &msg) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + __toMessage__(msg, __param_descriptions__, __group_descriptions__); + } + + void __toServer__(const ros::NodeHandle &nh) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toServer(nh, *this); + } + + void __fromServer__(const ros::NodeHandle &nh) + { + static bool setup=false; + + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->fromServer(nh, *this); + + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i++){ + if (!setup && (*i)->id == 0) { + setup = true; + boost::any n = boost::any(this); + (*i)->setInitialState(n); + } + } + } + + void __clamp__() + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const SwissRangerConfig &__max__ = __getMax__(); + const SwissRangerConfig &__min__ = __getMin__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->clamp(*this, __max__, __min__); + } + + uint32_t __level__(const SwissRangerConfig &config) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + uint32_t level = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->calcLevel(level, config, *this); + return level; + } + + static const dynamic_reconfigure::ConfigDescription &__getDescriptionMessage__(); + static const SwissRangerConfig &__getDefault__(); + static const SwissRangerConfig &__getMax__(); + static const SwissRangerConfig &__getMin__(); + static const std::vector &__getParamDescriptions__(); + static const std::vector &__getGroupDescriptions__(); + + private: + static const SwissRangerConfigStatics *__get_statics__(); + }; + + template <> // Max and min are ignored for strings. + inline void SwissRangerConfig::ParamDescription::clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + return; + } + + class SwissRangerConfigStatics + { + friend class SwissRangerConfig; + + SwissRangerConfigStatics() + { +SwissRangerConfig::GroupDescription Default("Default", "", 0, 0, true, &SwissRangerConfig::groups); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.frame_id = "camera"; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.auto_exposure = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.integration_time = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.integration_time = 255; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.integration_time = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.amp_threshold = 65535; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.convertParams(); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __group_descriptions__.push_back(SwissRangerConfig::AbstractGroupDescriptionConstPtr(new SwissRangerConfig::GroupDescription(Default))); +//#line 353 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + __description_message__.groups.push_back(**i); + } + __max__.__toMessage__(__description_message__.max, __param_descriptions__, __group_descriptions__); + __min__.__toMessage__(__description_message__.min, __param_descriptions__, __group_descriptions__); + __default__.__toMessage__(__description_message__.dflt, __param_descriptions__, __group_descriptions__); + } + std::vector __param_descriptions__; + std::vector __group_descriptions__; + SwissRangerConfig __max__; + SwissRangerConfig __min__; + SwissRangerConfig __default__; + dynamic_reconfigure::ConfigDescription __description_message__; + + static const SwissRangerConfigStatics *get_instance() + { + // Split this off in a separate function because I know that + // instance will get initialized the first time get_instance is + // called, and I am guaranteeing that get_instance gets called at + // most once. + static SwissRangerConfigStatics instance; + return &instance; + } + }; + + inline const dynamic_reconfigure::ConfigDescription &SwissRangerConfig::__getDescriptionMessage__() + { + return __get_statics__()->__description_message__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getDefault__() + { + return __get_statics__()->__default__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMax__() + { + return __get_statics__()->__max__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMin__() + { + return __get_statics__()->__min__; + } + + inline const std::vector &SwissRangerConfig::__getParamDescriptions__() + { + return __get_statics__()->__param_descriptions__; + } + + inline const std::vector &SwissRangerConfig::__getGroupDescriptions__() + { + return __get_statics__()->__group_descriptions__; + } + + inline const SwissRangerConfigStatics *SwissRangerConfig::__get_statics__() + { + const static SwissRangerConfigStatics *statics; + + if (statics) // Common case + return statics; + + boost::mutex::scoped_lock lock(dynamic_reconfigure::__init_mutex__); + + if (statics) // In case we lost a race. + return statics; + + statics = SwissRangerConfigStatics::get_instance(); + + return statics; + } + +//#line 54 "/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Off = 0; +//#line 55 "/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Auto = 1; +} + +#endif // __SWISSRANGERRECONFIGURATOR_H__ diff --git a/swissranger_camera/src/swissranger_camera/__init__.py b/swissranger_camera/src/swissranger_camera/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py new file mode 100644 index 0000000..26d0aad --- /dev/null +++ b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py @@ -0,0 +1,38 @@ +## ********************************************************* +## +## File autogenerated for the swissranger_camera package +## by the dynamic_reconfigure package. +## Please do not edit. +## +## ********************************************************/ + +from dynamic_reconfigure.encoding import extract_params + +inf = float('inf') + +config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 259, 'description': 'ROS tf frame of reference, resolved with tf_prefix unless absolute.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'frame_id', 'edit_method': '', 'default': 'camera', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Camera calibration URL for this video_mode (uncalibrated if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'camera_info_url', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Auto exposure control state.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'auto_exposure', 'edit_method': "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/remap/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'integration_time control.', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'integration_time', 'edit_method': '', 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'amplitude_threshold control.', 'max': 65535, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'amp_threshold', 'edit_method': '', 'default': -1, 'level': 0, 'min': -1, 'type': 'int'}], 'type': '', 'id': 0} + +min = {} +max = {} +defaults = {} +level = {} +type = {} +all_level = 0 + +#def extract_params(config): +# params = [] +# params.extend(config['parameters']) +# for group in config['groups']: +# params.extend(extract_params(group)) +# return params + +for param in extract_params(config_description): + min[param['name']] = param['min'] + max[param['name']] = param['max'] + defaults[param['name']] = param['default'] + level[param['name']] = param['level'] + type[param['name']] = param['type'] + all_level = all_level | param['level'] + +SwissRanger_Off = 0 +SwissRanger_Auto = 1 diff --git a/swissranger_camera/src/swissranger_camera/cfg/__init__.py b/swissranger_camera/src/swissranger_camera/cfg/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/detection_and_tracking_zed.launch b/tracking/launch/detection_and_tracking_zed.launch similarity index 100% rename from detection_and_tracking_zed.launch rename to tracking/launch/detection_and_tracking_zed.launch diff --git a/tracking/launch/tracking/CMakeLists.txt b/tracking/launch/tracking/CMakeLists.txt deleted file mode 100644 index 496bda8..0000000 --- a/tracking/launch/tracking/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tracking) -set(CMAKE_BUILD_TYPE RelWithDebInfo) -find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rosconsole image_transport tf tf_conversions bayes pcl_ros detection opt_msgs opt_utils dynamic_reconfigure) - -find_package(OpenCV 2.4 REQUIRED) -include_directories(${OpenCV_INCLUDE_DIRS}) -link_directories(${OpenCV_LIB_DIR}) - -find_package(Eigen REQUIRED) -include_directories(${Eigen_INCLUDE_DIRS} include ${catkin_INCLUDE_DIRS}) - -# Dynamic reconfigure support -generate_dynamic_reconfigure_options(cfg/Tracker.cfg - #cfg/TrackerSmoother.cfg - cfg/MovingAverageSmoother.cfg -) - -include_directories(cfg/cpp) - -catkin_package( - INCLUDE_DIRS - LIBRARIES ${PROJECT_NAME} - CATKIN_DEPENDS roscpp bayes opencv2 pcl_ros detection tf tf_conversions opt_msgs opt_utils -) - -add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp) -target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}) - -add_executable(tracker apps/tracker_node.cpp) -target_link_libraries(tracker ${PROJECT_NAME} ${catkin_LIBRARIES}) - -#add_executable(tracker_filter apps/tracker_filter_node.cpp) -#add_dependencies(tracker_filter ${PROJECT_NAME}_gencfg) -#target_link_libraries(tracker_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) - -add_executable(moving_average_filter apps/moving_average_filter_node.cpp) -add_dependencies(moving_average_filter ${PROJECT_NAME}_gencfg) -target_link_libraries(moving_average_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) diff --git a/tracking/launch/tracking/apps/moving_average_filter_node.cpp b/tracking/launch/tracking/apps/moving_average_filter_node.cpp deleted file mode 100644 index 1521260..0000000 --- a/tracking/launch/tracking/apps/moving_average_filter_node.cpp +++ /dev/null @@ -1,501 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2015-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Filippo Basso [bassofil@dei.unipd.it] - * Matteo Munaro [matteo.munaro@dei.unipd.it] - * - */ - -#include -#include -#include -#include -#include - -#include - -#include -#include - -namespace open_ptrack -{ -namespace tracking -{ - -typedef ::tracking::MovingAverageSmootherConfig Config; -typedef ::dynamic_reconfigure::Server ReconfigureServer; - - -struct TrackPositionNode -{ - typedef boost::shared_ptr Ptr; - - TrackPositionNode(const Eigen::Array2d & position, const ros::Time & time) : position_(position), time_(time) {} - - const Eigen::Array2d position_; - const ros::Time time_; - - EIGEN_MAKE_ALIGNED_OPERATOR_NEW -}; - -class TrackFilter -{ -public: - - typedef boost::shared_ptr Ptr; - - TrackFilter(int window_size) : window_size_(window_size), last_positions_sum_(Eigen::Array2d::Zero()) {} - - Eigen::Array2d movingAveragePosition() - { - return last_positions_sum_ / last_positions_.size(); - } - - void addPosition(const Eigen::Array2d & position, const ros::Time & time) - { - TrackPositionNode::Ptr node = boost::make_shared(position, time); - if (last_positions_.size() < window_size_) - { - last_positions_.push_front(node); - last_positions_sum_ += node->position_; - } - else - { - const TrackPositionNode::Ptr & remove_node = last_positions_.back(); - last_positions_sum_ -= remove_node->position_; - last_positions_.pop_back(); - last_positions_.push_front(node); - last_positions_sum_ += node->position_; - } - } - - void setWindowSize(int new_size) - { - if (last_positions_.size() > new_size) - { - while (last_positions_.size() > new_size) - { - const TrackPositionNode::Ptr & remove_node = last_positions_.back(); - last_positions_sum_ -= remove_node->position_; - last_positions_.pop_back(); - } - } - window_size_ = new_size; - } - - void removeOldPositions(const ros::Time & min_allowed_time) - { - bool removed = false; - while (not removed) - { - const TrackPositionNode::Ptr & test_node = last_positions_.back(); - if (test_node->time_ < min_allowed_time and last_positions_.size() > 1) - { - last_positions_sum_ -= test_node->position_; - last_positions_.pop_back(); - } - else - { - removed = true; - } - } - } - - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - -protected: - - std::list last_positions_; - int window_size_; - - Eigen::Array2d last_positions_sum_; - -}; - -struct TrackInfo -{ - TrackFilter::Ptr track_; - opt_msgs::Track::Ptr last_msg_; - ros::Time last_msg_time_; -}; - -class MovingAverageSmoother -{ - -public: - - MovingAverageSmoother(ros::NodeHandle & node_handle) - : node_handle_(node_handle), - reconfigure_server_(new ReconfigureServer(config_mutex_, node_handle_)), - rate_(ros::Rate(30.0)) - { - double rate_d, heartbeat_time; - - node_handle_.param("rate", rate_d, 30.0); - node_handle_.param("publish_empty", publish_empty_, true); - node_handle_.param("heartbeat_time", heartbeat_time, 5.0); - node_handle_.param("max_history_size", max_history_size_, 1000); - - node_handle_.param("window_size", window_size_, 5); - - double position_lifetime_d, track_lifetime_d; - node_handle_.param("track_lifetime_with_no_detections", track_lifetime_d, 5.0); - node_handle_.param("position_lifetime", position_lifetime_d, 0.5); - track_lifetime_ = ros::Duration(track_lifetime_d); - position_lifetime_ = ros::Duration(position_lifetime_d); - - generateColors(); - history_cloud_ = boost::make_shared >(); - history_cloud_index_ = 0; - history_cloud_->header.frame_id = "world"; - - heartbeat_time_duration_ = ros::Duration(heartbeat_time); - - tracking_sub_ = node_handle_.subscribe("input", 100, &MovingAverageSmoother::trackingCallback, this); - tracking_pub_ = node_handle_.advertise("output", 1); - marker_array_pub_ = node_handle_.advertise("markers_array", 1); - history_pub_ = node_handle_.advertise >("history", 1); - - rate_ = ros::Rate(rate_d); - - ReconfigureServer::CallbackType callback = boost::bind(&MovingAverageSmoother::configCallback, this, _1, _2); - reconfigure_server_->setCallback(callback); - - } - - void configCallback(Config & config, uint32_t level) - { - //rate_ = ros::Rate(config.rate); - - config.track_lifetime_with_no_detections = std::max(config.track_lifetime_with_no_detections, rate_.expectedCycleTime().toSec()); - track_lifetime_ = ros::Duration(config.track_lifetime_with_no_detections); - - heartbeat_time_duration_ = ros::Duration(config.heartbeat_time); - publish_empty_ = config.publish_empty; - - window_size_ = config.window_size; - for (std::map::iterator it = track_map_.begin(); it != track_map_.end(); ++it) - { - TrackInfo & track_info = it->second; - track_info.track_->setWindowSize(window_size_); - } - - config.position_lifetime = std::max(config.position_lifetime, rate_.expectedCycleTime().toSec()); - position_lifetime_ = ros::Duration(config.position_lifetime); - - max_history_size_ = config.max_history_size; - - } - - void trackingCallback(const opt_msgs::TrackArray::ConstPtr & msg) - { - for (size_t i = 0; i < msg->tracks.size(); ++i) - { - const opt_msgs::Track & track_msg = msg->tracks[i]; - - std::map::iterator it = track_map_.find(track_msg.id); - - if (it == track_map_.end()) // Track does not exist - { - TrackInfo track_info; - track_info.track_ = boost::make_shared(window_size_); - track_map_[track_msg.id] = track_info; - } - - TrackInfo & track_info = track_map_[track_msg.id]; - - track_info.track_->addPosition(Eigen::Array2d(track_msg.x, track_msg.y), msg->header.stamp); - track_info.last_msg_ = boost::make_shared(track_msg); - if (track_info.last_msg_->visibility < 2) - track_info.last_msg_time_ = msg->header.stamp; - } - } - - void spin() - { - ros::Time last_heartbeat_time = ros::Time::now(); - - while (ros::ok()) - { - ros::spinOnce(); - - opt_msgs::TrackArray track_msg; - visualization_msgs::MarkerArray marker_msg; - - int n = createMsg(track_msg, marker_msg); - ros::Time current_time = ros::Time::now(); - - config_mutex_.lock(); - if (publish_empty_ or n > 0) - { - tracking_pub_.publish(track_msg); - marker_array_pub_.publish(marker_msg); - history_pub_.publish(history_cloud_); - last_heartbeat_time = current_time; - } - else if (not publish_empty_) - { - // Publish a heartbeat message every 'heartbeat_time' seconds - if ((current_time - last_heartbeat_time) > heartbeat_time_duration_) - { - opt_msgs::TrackArray heartbeat_msg; - heartbeat_msg.header.stamp = current_time; - heartbeat_msg.header.frame_id = "heartbeat"; - tracking_pub_.publish(heartbeat_msg); - marker_array_pub_.publish(marker_msg); - history_pub_.publish(history_cloud_); - last_heartbeat_time = current_time; - } - } - config_mutex_.unlock(); - - rate_.sleep(); - } - } - -private: - - void generateColors() - { - for (size_t i = 0; i <= 4; ++i) - for (size_t j = 0; j <= 4; ++j) - for (size_t k = 0; k <= 4; ++k) - color_set_.push_back(Eigen::Vector3f(i * 0.25f, j * 0.25f, k * 0.25f)); - - std::random_shuffle(color_set_.begin(), color_set_.end()); - } - - void appendToHistory(const opt_msgs::Track & track_msg) - { -// if (data.last_msg.tracks[0].visibility == opt_msgs::Track::NOT_VISIBLE) -// return; - - config_mutex_.lock(); - if (history_cloud_->size() < max_history_size_) - { - pcl::PointXYZRGB point; - history_cloud_->push_back(point); - } - config_mutex_.unlock(); - - toPointXYZRGB(history_cloud_->points[history_cloud_index_], track_msg); - history_cloud_index_ = (history_cloud_index_ + 1) % max_history_size_; - } - - int createMsg(opt_msgs::TrackArray & track_msg, - visualization_msgs::MarkerArray & marker_msg) - { - int added = 0; - ros::Time now = ros::Time::now(); - - track_msg.header.stamp = now; - track_msg.header.frame_id = "world"; - - history_cloud_->header.stamp = now.toNSec() / 1000; - std::vector to_remove; - - for (std::map::iterator it = track_map_.begin(); it != track_map_.end(); ++it) - { - TrackInfo & track_info = it->second; - - config_mutex_.lock(); - bool ok = (now - track_info.last_msg_time_) < track_lifetime_; - config_mutex_.unlock(); - - if (ok) - { - track_info.track_->removeOldPositions(now - position_lifetime_); - - Eigen::Array2d position = track_info.track_->movingAveragePosition(); - track_info.last_msg_->x = position[0]; - track_info.last_msg_->y = position[1]; - - track_msg.tracks.push_back(*track_info.last_msg_); - createMarker(marker_msg, *track_info.last_msg_, track_msg.header); - appendToHistory(*track_info.last_msg_); - ++added; - - } - else - { - to_remove.push_back(it->first); - } - } - - for (size_t i = 0; i < to_remove.size(); ++i) - track_map_.erase(to_remove[i]); - - - return added; - } - - - void createMarker(visualization_msgs::MarkerArray & msg, - const opt_msgs::Track & track_msg, - const std_msgs::Header & header) - { -// if(track.visibility == Track::NOT_VISIBLE) -// return; - - visualization_msgs::Marker marker; - - marker.header.frame_id = header.frame_id; - marker.header.stamp = header.stamp; - - marker.ns = "people"; - marker.id = track_msg.id; - - marker.type = visualization_msgs::Marker::SPHERE; - marker.action = visualization_msgs::Marker::ADD; - - marker.pose.position.x = track_msg.x; - marker.pose.position.y = track_msg.y; - marker.pose.position.z = 3 * track_msg.height / 4; - marker.pose.orientation.x = 0.0; - marker.pose.orientation.y = 0.0; - marker.pose.orientation.z = 0.0; - marker.pose.orientation.w = 1.0; - - marker.scale.x = 0.1; - marker.scale.y = 0.1; - marker.scale.z = 0.1; - - marker.color.r = color_set_[track_msg.id % color_set_.size()](2); - marker.color.g = color_set_[track_msg.id % color_set_.size()](1); - marker.color.b = color_set_[track_msg.id % color_set_.size()](0); - marker.color.a = 1.0; - - marker.lifetime = ros::Duration(0.2); - - msg.markers.push_back(marker); - - //------------------------------------ - - visualization_msgs::Marker text_marker; - - text_marker.header.frame_id = header.frame_id; - text_marker.header.stamp = header.stamp; - - text_marker.ns = "numbers"; - text_marker.id = track_msg.id; - - text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; - text_marker.action = visualization_msgs::Marker::ADD; - - std::stringstream ss; - ss << track_msg.id; - text_marker.text = ss.str(); - - text_marker.pose.position.x = track_msg.x; - text_marker.pose.position.y = track_msg.y; - text_marker.pose.position.z = track_msg.height + 0.1; - text_marker.pose.orientation.x = 0.0; - text_marker.pose.orientation.y = 0.0; - text_marker.pose.orientation.z = 0.0; - text_marker.pose.orientation.w = 1.0; - - text_marker.scale.x = 0.2; - text_marker.scale.y = 0.2; - text_marker.scale.z = 0.2; - - text_marker.color.r = color_set_[track_msg.id % color_set_.size()](2); - text_marker.color.g = color_set_[track_msg.id % color_set_.size()](1); - text_marker.color.b = color_set_[track_msg.id % color_set_.size()](0); - text_marker.color.a = 1.0; - - text_marker.lifetime = ros::Duration(0.2); - - msg.markers.push_back(text_marker); - } - - void toPointXYZRGB(pcl::PointXYZRGB & p, - const opt_msgs::Track & track_msg) - { -// if(track.visibility == Track::NOT_VISIBLE) -// return; - - p.x = float(track_msg.x); - p.y = float(track_msg.y); - p.z = float(3 * track_msg.height / 4); - uchar * rgb_ptr = reinterpret_cast(&p.rgb); - *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](0) * 255.0f); - *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](1) * 255.0f); - *rgb_ptr++ = uchar(color_set_[track_msg.id % color_set_.size()](2) * 255.0f); - } - - ros::NodeHandle node_handle_; - - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - ros::Subscriber tracking_sub_; - ros::Publisher tracking_pub_; - ros::Publisher marker_array_pub_; - ros::Publisher history_pub_; - - bool publish_empty_; - ros::Duration heartbeat_time_duration_; - - std::vector > color_set_; - - pcl::PointCloud::Ptr history_cloud_; - int max_history_size_; - size_t history_cloud_index_; - - ros::Rate rate_; - int window_size_; - - ros::Duration track_lifetime_; - ros::Duration position_lifetime_; - - std::map track_map_; - -}; - -} // namespace tracking -} // namespace open_ptrack - -using open_ptrack::tracking::MovingAverageSmoother; - -int main(int argc, char **argv) -{ - // Initialization: - ros::init(argc, argv, "tracking_filter"); - ros::NodeHandle nh("~"); - - MovingAverageSmoother smoother(nh); - smoother.spin(); - - return 0; -} diff --git a/tracking/launch/tracking/apps/tracker_filter_node.cpp b/tracking/launch/tracking/apps/tracker_filter_node.cpp deleted file mode 100644 index cfebfff..0000000 --- a/tracking/launch/tracking/apps/tracker_filter_node.cpp +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2014-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Filippo Basso [bassofil@dei.unipd.it] - * Matteo Munaro [matteo.munaro@dei.unipd.it] - * - */ - -#include -#include -#include -#include -#include - -#include - -#include -#include - -namespace open_ptrack -{ -namespace tracking -{ - -typedef ::tracking::TrackerSmootherConfig Config; -typedef ::dynamic_reconfigure::Server ReconfigureServer; - -struct LastData -{ - ros::Time last_visible_time; - opt_msgs::TrackArray last_msg; -}; - -struct TrackState -{ - Eigen::Array2d position_, velocity_, acceleration_; - - void predict(double delta_t, Eigen::Array2d & position, Eigen::Array2d & velocity, Eigen::Array2d & acceleration) - { - position = position_ + delta_t * (velocity_ + delta_t * 0.5 * acceleration_); - velocity = velocity_ + delta_t * acceleration_; - acceleration = acceleration_; - } - - void predict(double delta_t) - { - position_ += delta_t * (velocity_ + delta_t * 0.5 * acceleration_); - velocity_ += delta_t * acceleration_; - } - - void update(const Eigen::Array2d & measured_position, double delta_t, double alpha, double beta, double phi) - { - Eigen::Array2d residual = measured_position - position_; - position_ += alpha * residual; - velocity_ += beta / delta_t * residual; - acceleration_ += phi * 0.5 / (delta_t * delta_t) * residual; - } - - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - -}; - -class TrackSmoother -{ - -public: - - TrackSmoother(ros::NodeHandle & node_handle) - : node_handle_(node_handle), - reconfigure_server_(new ReconfigureServer(config_mutex_, node_handle_)), - rate_(ros::Rate(30.0)) - { - double rate_d, track_lifetime_with_no_detections, heartbeat_time; - - node_handle_.param("rate", rate_d, 30.0); - node_handle_.param("track_lifetime_with_no_detections", track_lifetime_with_no_detections, 1.0); - node_handle_.param("publish_empty", publish_empty_, true); - node_handle_.param("heartbeat_time", heartbeat_time, 5.0); - node_handle_.param("max_history_size", max_history_size_, 1000); - - double sigma_process, sigma_noise; - node_handle_.param("sigma_process", sigma_process, 1.0); - node_handle_.param("sigma_noise", sigma_noise, 0.0); - computeWeights(sigma_process, sigma_noise); - - double prediction_duration_d; - node_handle_.param("prediction_duration", prediction_duration_d, 0.1); - prediction_duration_ = ros::Duration(prediction_duration_d); - - generateColors(); - history_cloud_ = boost::make_shared >(); - history_cloud_index_ = 0; - history_cloud_->header.frame_id = "world"; - - time_alive_ = ros::Duration(track_lifetime_with_no_detections); - heartbeat_time_duration_ = ros::Duration(heartbeat_time); - - tracking_sub_ = node_handle_.subscribe("input", 100, &TrackSmoother::trackingCallback, this); - tracking_pub_ = node_handle_.advertise("output", 1); - marker_array_pub_ = node_handle_.advertise("markers_array", 1); - history_pub_ = node_handle_.advertise >("history", 1); - - rate_ = ros::Rate(rate_d); - - ReconfigureServer::CallbackType callback = boost::bind(&TrackSmoother::configCallback, this, _1, _2); - reconfigure_server_->setCallback(callback); - - } - - void configCallback(Config & config, uint32_t level) - { - //rate_ = ros::Rate(config.rate); - - config.track_lifetime_with_no_detections = std::max(config.track_lifetime_with_no_detections, rate_.expectedCycleTime().toSec()); - time_alive_ = ros::Duration(config.track_lifetime_with_no_detections); - - heartbeat_time_duration_ = ros::Duration(config.heartbeat_time); - publish_empty_ = config.publish_empty; - - computeWeights(config.sigma_process, config.sigma_noise); - prediction_duration_ = ros::Duration(config.prediction_duration); - - max_history_size_ = config.max_history_size; - - } - - void trackingCallback(const opt_msgs::TrackArray::ConstPtr & tracking_msg) - { - for (size_t i = 0; i < tracking_msg->tracks.size(); ++i) - { - opt_msgs::Track msg_track = tracking_msg->tracks[i]; - opt_msgs::TrackArray msg; - msg.header = tracking_msg->header; - - std::map::iterator it = last_data_map_.find(msg_track.id); - - if (it != last_data_map_.end()) // Track exists - { - LastData & last_data = it->second; - double delta_t = (msg.header.stamp - last_data.last_msg.header.stamp).toSec(); - if (delta_t <= 0) - break; - - if (state_map_.find(msg_track.id) != state_map_.end()) - { - TrackState & state = *state_map_[msg_track.id]; - state.predict(delta_t); - config_mutex_.lock(); - state.update(Eigen::Array2d(msg_track.x, msg_track.y), delta_t, alpha_, beta_, gamma_); - config_mutex_.unlock(); - } - else - { - boost::shared_ptr state = boost::make_shared(); - state->position_ = Eigen::Array2d(msg_track.x, msg_track.y); - state->velocity_ = Eigen::Array2d(0, 0);//(state->position_ - Eigen::Array2d(last_data.last_msg.tracks[0].x, last_data.last_msg.tracks[0].y)) / delta_t; - state->acceleration_ = Eigen::Array2d(0, 0); - state_map_[msg_track.id] = state; - } - - TrackState & state = *state_map_[msg_track.id]; - msg_track.x = state.position_[0]; - msg_track.y = state.position_[1]; - msg.tracks.push_back(msg_track); - - last_data.last_msg = msg; - if (msg_track.visibility != opt_msgs::Track::NOT_VISIBLE) - last_data.last_visible_time = msg.header.stamp; - - } - else// if (msg_track.visibility != opt_msgs::Track::NOT_VISIBLE) - { - LastData data; - msg.tracks.push_back(msg_track); - data.last_msg = msg; - data.last_visible_time = msg.header.stamp; - last_data_map_[msg_track.id] = data; - } - } - } - - void spin() - { - ros::Time last_heartbeat_time = ros::Time::now(); - - while (ros::ok()) - { - ros::spinOnce(); - - opt_msgs::TrackArray track_msg; - visualization_msgs::MarkerArray marker_msg; - - int n = createMsg(track_msg, marker_msg); - ros::Time current_time = ros::Time::now(); - - config_mutex_.lock(); - if (publish_empty_ or n > 0) - { - tracking_pub_.publish(track_msg); - marker_array_pub_.publish(marker_msg); - history_pub_.publish(history_cloud_); - last_heartbeat_time = current_time; - } - else if (not publish_empty_) - { - // Publish a heartbeat message every 'heartbeat_time' seconds - if ((current_time - last_heartbeat_time) > heartbeat_time_duration_) - { - opt_msgs::TrackArray heartbeat_msg; - heartbeat_msg.header.stamp = current_time; - heartbeat_msg.header.frame_id = "heartbeat"; - tracking_pub_.publish(heartbeat_msg); - marker_array_pub_.publish(marker_msg); - history_pub_.publish(history_cloud_); - last_heartbeat_time = current_time; - } - } - config_mutex_.unlock(); - - rate_.sleep(); - } - } - -private: - - void computeWeights(double sigma_process, double sigma_noise) - { - if (sigma_noise < 1e-5) - { - alpha_ = 1.0; - beta_ = gamma_ = 0.0; - } - else - { - double t = rate_.expectedCycleTime().toSec(); - double lambda = sigma_process * t * t / sigma_noise; - double b = lambda / 2.0 - 3.0; - double c = lambda / 2.0 + 3.0; - double p = c - b * b / 3.0; - double q = 2.0 * b * b * b / 27.0 - b * c / 3.0 - 1.0; - double v = std::sqrt(q * q + 4 * p * p * p / 27.0); - double z = -std::pow(q + v / 2.0, 1.0 / 3.0); - double s = z - p / (3.0 * z) - b / 3.0; - alpha_ = 1.0 - s * s; - beta_ = 2.0 * (1.0 - s) * (1.0 - s); - gamma_ = beta_ * beta_ / (2 * alpha_); - } - } - - void generateColors() - { - for (size_t i = 0; i <= 4; ++i) - for (size_t j = 0; j <= 4; ++j) - for (size_t k = 0; k <= 4; ++k) - color_set_.push_back(Eigen::Vector3f(i * 0.25f, j * 0.25f, k * 0.25f)); - - std::random_shuffle(color_set_.begin(), color_set_.end()); - } - - void appendToHistory(const LastData & data) - { -// if (data.last_msg.tracks[0].visibility == opt_msgs::Track::NOT_VISIBLE) -// return; - - config_mutex_.lock(); - if (history_cloud_->size() < max_history_size_) - { - pcl::PointXYZRGB point; - history_cloud_->push_back(point); - } - config_mutex_.unlock(); - - toPointXYZRGB(history_cloud_->points[history_cloud_index_], data); - history_cloud_index_ = (history_cloud_index_ + 1) % max_history_size_; - } - - int createMsg(opt_msgs::TrackArray & track_msg, - visualization_msgs::MarkerArray & marker_msg) - { - int added = 0; - ros::Time now = ros::Time::now(); - - track_msg.header.stamp = now; - history_cloud_->header.stamp = now.toNSec() / 1000; - std::vector to_remove; - - for (std::map::iterator it = last_data_map_.begin(); it != last_data_map_.end(); ++it) - { - opt_msgs::TrackArray & saved_msg = it->second.last_msg; - const ros::Time & time = it->second.last_visible_time; - - config_mutex_.lock(); - bool ok = (now - time) < time_alive_; - bool prediction = (now - time) < prediction_duration_; - config_mutex_.unlock(); - - if (ok) - { - if (state_map_.find(it->first) != state_map_.end() and prediction) - { - Eigen::Array2d position, velocity, acceleration; - state_map_[it->first]->predict((now - time).toSec(), position, velocity, acceleration); - saved_msg.tracks[0].x = position[0]; - saved_msg.tracks[0].y = position[1]; - track_msg.tracks.push_back(saved_msg.tracks[0]); - track_msg.header.frame_id = "world"; - createMarker(marker_msg, it->second); - appendToHistory(it->second); - ++added; - } - } - else - { - to_remove.push_back(saved_msg.tracks[0].id); - } - } - - for (size_t i = 0; i < to_remove.size(); ++i) - { - last_data_map_.erase(to_remove[i]); - state_map_.erase(to_remove[i]); - } - - return added; - } - - - void createMarker(visualization_msgs::MarkerArray & msg, - const LastData & data) - { - const opt_msgs::Track & track = data.last_msg.tracks[0]; - -// if(track.visibility == Track::NOT_VISIBLE) -// return; - - visualization_msgs::Marker marker; - - marker.header.frame_id = data.last_msg.header.frame_id; - marker.header.stamp = data.last_msg.header.stamp; - - marker.ns = "people"; - marker.id = track.id; - - marker.type = visualization_msgs::Marker::SPHERE; - marker.action = visualization_msgs::Marker::ADD; - - marker.pose.position.x = track.x; - marker.pose.position.y = track.y; - marker.pose.position.z = track.height / 2; - marker.pose.orientation.x = 0.0; - marker.pose.orientation.y = 0.0; - marker.pose.orientation.z = 0.0; - marker.pose.orientation.w = 1.0; - - marker.scale.x = 0.1; - marker.scale.y = 0.1; - marker.scale.z = 0.1; - - marker.color.r = color_set_[track.id % color_set_.size()](2); - marker.color.g = color_set_[track.id % color_set_.size()](1); - marker.color.b = color_set_[track.id % color_set_.size()](0); - marker.color.a = 1.0; - - marker.lifetime = ros::Duration(0.2); - - msg.markers.push_back(marker); - - //------------------------------------ - - visualization_msgs::Marker text_marker; - - text_marker.header.frame_id = data.last_msg.header.frame_id; - text_marker.header.stamp = data.last_msg.header.stamp; - - text_marker.ns = "numbers"; - text_marker.id = track.id; - - text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; - text_marker.action = visualization_msgs::Marker::ADD; - - std::stringstream ss; - ss << track.id; - text_marker.text = ss.str(); - - text_marker.pose.position.x = track.x; - text_marker.pose.position.y = track.y; - text_marker.pose.position.z = track.height + 0.1; - text_marker.pose.orientation.x = 0.0; - text_marker.pose.orientation.y = 0.0; - text_marker.pose.orientation.z = 0.0; - text_marker.pose.orientation.w = 1.0; - - text_marker.scale.x = 0.2; - text_marker.scale.y = 0.2; - text_marker.scale.z = 0.2; - - text_marker.color.r = color_set_[track.id % color_set_.size()](2); - text_marker.color.g = color_set_[track.id % color_set_.size()](1); - text_marker.color.b = color_set_[track.id % color_set_.size()](0); - text_marker.color.a = 1.0; - - text_marker.lifetime = ros::Duration(0.2); - - msg.markers.push_back(text_marker); - } - - void toPointXYZRGB(pcl::PointXYZRGB & p, - const LastData & data) - { - const opt_msgs::Track & track = data.last_msg.tracks[0]; - -// if(track.visibility == Track::NOT_VISIBLE) -// return; - - p.x = float(track.x); - p.y = float(track.y); - p.z = float(track.height / 2); - uchar * rgb_ptr = reinterpret_cast(&p.rgb); - *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](0) * 255.0f); - *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](1) * 255.0f); - *rgb_ptr++ = uchar(color_set_[track.id % color_set_.size()](2) * 255.0f); - } - - ros::NodeHandle node_handle_; - - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - ros::Subscriber tracking_sub_; - ros::Publisher tracking_pub_; - ros::Publisher marker_array_pub_; - ros::Publisher history_pub_; - - ros::Duration heartbeat_time_duration_; - - bool publish_empty_; - - std::map last_data_map_; - ros::Duration time_alive_; - std::vector > color_set_; - - pcl::PointCloud::Ptr history_cloud_; - int max_history_size_; - size_t history_cloud_index_; - - ros::Rate rate_; - double alpha_, beta_, gamma_; - ros::Duration prediction_duration_; - - std::map > state_map_; - -}; - -} // namespace tracking -} // namespace open_ptrack - -using open_ptrack::tracking::TrackSmoother; - -int main(int argc, char **argv) -{ - // Initialization: - ros::init(argc, argv, "tracking_filter"); - ros::NodeHandle nh("~"); - - TrackSmoother smoother(nh); - smoother.spin(); - - return 0; -} diff --git a/tracking/launch/tracking/apps/tracker_node.cpp b/tracking/launch/tracking/apps/tracker_node.cpp deleted file mode 100644 index 0111bd8..0000000 --- a/tracking/launch/tracking/apps/tracker_node.cpp +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -//#include - -// Dynamic reconfigure: -#include -#include - -typedef tracking::TrackerConfig Config; -typedef dynamic_reconfigure::Server ReconfigureServer; - -// Global variables: -std::map detection_sources_map; -tf::TransformListener* tf_listener; -std::string world_frame_id; -bool output_history_pointcloud; -int output_history_size; -int detection_history_size; -bool output_markers; -bool output_image_rgb; -bool output_tracking_results; -bool output_detection_results; // Enables/disables the publishing of detection positions to be visualized in RViz -bool vertical; -ros::Publisher results_pub; -ros::Publisher marker_pub_tmp; -ros::Publisher marker_pub; -ros::Publisher pointcloud_pub; -ros::Publisher detection_marker_pub; -ros::Publisher detection_trajectory_pub; -ros::Publisher alive_ids_pub; -size_t starting_index; -size_t detection_insert_index; -tf::Transform camera_frame_to_world_transform; -tf::Transform world_to_camera_frame_transform; -bool extrinsic_calibration; -double period; -open_ptrack::tracking::Tracker* tracker; -pcl::PointCloud::Ptr history_pointcloud(new pcl::PointCloud); -pcl::PointCloud::Ptr detection_history_pointcloud(new pcl::PointCloud); -bool swissranger; -double min_confidence; -double min_confidence_sr; -double min_confidence_detections; -double min_confidence_detections_sr; -std::vector camera_colors; // vector containing colors to use to identify cameras in the network -std::map color_map; // map between camera frame_id and color -// Chi square distribution -std::map chi_map; -bool velocity_in_motion_term; -double acceleration_variance; -double position_variance_weight; -double voxel_size; -double gate_distance; -bool calibration_refinement; -std::map registration_matrices; -double max_detection_delay; -ros::Time latest_time; - -std::map last_received_detection_; -ros::Duration max_time_between_detections_; - -std::map > number_messages_delay_map_; - -/** - * \brief Create marker to be visualized in RViz - * - * \param[in] id The marker ID. - * \param[in] frame_id The marker reference frame. - * \param[in] position The marker position. - * \param[in] color The marker color. - */ -visualization_msgs::Marker -createMarker (int id, std::string frame_id, ros::Time stamp, Eigen::Vector3d position, cv::Vec3f color) -{ - visualization_msgs::Marker marker; - - marker.header.frame_id = world_frame_id; - marker.header.stamp = stamp; - marker.ns = frame_id; - marker.id = id; - marker.type = visualization_msgs::Marker::SPHERE; - marker.action = visualization_msgs::Marker::ADD; - marker.pose.position.x = position(0); - marker.pose.position.y = position(1); - marker.pose.position.z = position(2); - marker.scale.x = 0.1; - marker.scale.y = 0.1; - marker.scale.z = 0.1; - marker.color.r = color(0); - marker.color.g = color(1); - marker.color.b = color(2); - marker.color.a = 1.0; - marker.lifetime = ros::Duration(0.2); - - return marker; -} - -void -plotCameraLegend (std::map curr_color_map) -{ - // Compose camera legend: - cv::Mat legend_image = cv::Mat::zeros(500, 500, CV_8UC3); - for(std::map::iterator colormap_iterator = curr_color_map.begin(); colormap_iterator != curr_color_map.end(); colormap_iterator++) - { - int color_index = colormap_iterator->second; - cv::Vec3f color = camera_colors[color_index]; - int y_coord = color_index * legend_image.rows / (curr_color_map.size()+1) + 0.5 * legend_image.rows / (curr_color_map.size()+1); - cv::line(legend_image, cv::Point(0,y_coord), cv::Point(100,y_coord), cv::Scalar(255*color(2), 255*color(1), 255*color(0)), 8); - cv::putText(legend_image, colormap_iterator->first, cv::Point(110,y_coord), 1, 1, cv::Scalar(255, 255, 255), 1); - } - - // Display the cv image - cv::imshow("Camera legend", legend_image); - cv::waitKey(1); -} - -Eigen::Matrix4d -readMatrixFromFile (std::string filename) -{ - Eigen::Matrix4d matrix; - std::string line; - std::ifstream myfile (filename.c_str()); - if (myfile.is_open()) - { - int k = 0; - std::string number; - while (myfile >> number) - { - matrix(int(k/4), int(k%4)) = std::atof(number.c_str()); - k++; - } - myfile.close(); - } - - std::cout << matrix << std::endl; - - return matrix; -} - -/** - * \brief Read the DetectionArray message and use the detections for creating/updating/deleting tracks - * - * \param[in] msg the DetectionArray message. - */ -void -detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) -{ - // Read message header information: - std::string frame_id = msg->header.frame_id; - ros::Time frame_time = msg->header.stamp; - - std::string frame_id_tmp = frame_id; - int pos = frame_id_tmp.find("_rgb_optical_frame"); - if (pos != std::string::npos) - frame_id_tmp.replace(pos, std::string("_rgb_optical_frame").size(), ""); - pos = frame_id_tmp.find("_depth_optical_frame"); - if (pos != std::string::npos) - frame_id_tmp.replace(pos, std::string("_depth_optical_frame").size(), ""); - last_received_detection_[frame_id_tmp] = frame_time; - - // Compute delay of detection message, if any: - double time_delay = 0.0; - if (frame_time > latest_time) - { - latest_time = frame_time; - time_delay = 0.0; - } - else - { - time_delay = (latest_time - frame_time).toSec(); - } - - tf::StampedTransform transform; - tf::StampedTransform inverse_transform; - // cv_bridge::CvImage::Ptr cvPtr; - - try - { - // Read transforms between camera frame and world frame: - if (!extrinsic_calibration) - { - static tf::TransformBroadcaster world_to_camera_tf_publisher; -// world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(camera_frame_to_world_transform, ros::Time::now(), world_frame_id, frame_id)); - world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(world_to_camera_frame_transform, ros::Time::now(), frame_id, world_frame_id)); - } - - //Calculate direct and inverse transforms between camera and world frame: - tf_listener->lookupTransform(world_frame_id, frame_id, ros::Time(0), transform); - tf_listener->lookupTransform(frame_id, world_frame_id, ros::Time(0), inverse_transform); - - // cvPtr = cv_bridge::toCvCopy(msg->image, sensor_msgs::image_encodings::BGR8); - - // Read camera intrinsic parameters: - Eigen::Matrix3d intrinsic_matrix; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - intrinsic_matrix(i, j) = msg->intrinsic_matrix[i * 3 + j]; - - // Add a new DetectionSource or update an existing one: - if(detection_sources_map.find(frame_id) == detection_sources_map.end()) - { - detection_sources_map[frame_id] = new open_ptrack::detection::DetectionSource(cv::Mat(0, 0, CV_8UC3), - transform, inverse_transform, intrinsic_matrix, frame_time, frame_id); - } - else - { - detection_sources_map[frame_id]->update(cv::Mat(0, 0, CV_8UC3), transform, inverse_transform, - intrinsic_matrix, frame_time, frame_id); - double d = detection_sources_map[frame_id]->getDuration().toSec() / period; - int lostFrames = int(round(d)) - 1; - } - open_ptrack::detection::DetectionSource* source = detection_sources_map[frame_id]; - - // Create a Detection object for every detection in the detection message: - std::vector detections_vector; - for(std::vector::const_iterator it = msg->detections.begin(); - it != msg->detections.end(); it++) - { - detections_vector.push_back(open_ptrack::detection::Detection(*it, source)); - } - - // Convert HOG+SVM confidences to HAAR+ADABOOST-like people detection confidences: - if (not std::strcmp(msg->confidence_type.c_str(), "hog+svm")) - { - for(unsigned int i = 0; i < detections_vector.size(); i++) - { -// double new_confidence = detections_vector[i].getConfidence(); -// new_confidence = (new_confidence - min_confidence_detections_sr) / (min_confidence_sr - min_confidence_detections_sr) * -// (min_confidence - min_confidence_detections) + min_confidence_detections; -// detections_vector[i].setConfidence(new_confidence+2); - - double new_confidence = detections_vector[i].getConfidence(); - new_confidence = (new_confidence - (-3)) / 3 * 4 + 2; - detections_vector[i].setConfidence(new_confidence); - - //std::cout << detections_vector[i].getConfidence() << std::endl; - } - } - - // Detection correction by means of calibration refinement: - if (calibration_refinement) - { - if (strcmp(frame_id.substr(0,1).c_str(), "/") == 0) - { - frame_id = frame_id.substr(1, frame_id.size() - 1); - } - - Eigen::Matrix4d registration_matrix; - std::map::iterator registration_matrices_iterator = registration_matrices.find(frame_id); - if (registration_matrices_iterator != registration_matrices.end()) - { // camera already present - registration_matrix = registration_matrices_iterator->second; - } - else - { // camera not present - std::cout << "Reading refinement matrix of " << frame_id << " from file." << std::endl; - std::string refinement_filename = ros::package::getPath("opt_calibration") + "/conf/registration_" + frame_id + ".txt"; - std::ifstream f(refinement_filename.c_str()); - if (f.good()) // if the file exists - { - f.close(); - registration_matrix = readMatrixFromFile (refinement_filename); - registration_matrices.insert(std::pair (frame_id, registration_matrix)); - } - else // if the file does not exist - { - // insert the identity matrix - std::cout << "Refinement file not found! Not doing refinement for this sensor." << std::endl; - registration_matrices.insert(std::pair (frame_id, Eigen::Matrix4d::Identity())); - } - } - - if(detections_vector.size() > 0) - { - // Apply detection refinement: - for(unsigned int i = 0; i < detections_vector.size(); i++) - { - Eigen::Vector3d old_centroid = detections_vector[i].getWorldCentroid(); - -// std::cout << frame_id << std::endl; -// std::cout << registration_matrix << std::endl; -// std::cout << "old_centroid: " << old_centroid.transpose() << std::endl; - Eigen::Vector4d old_centroid_homogeneous(old_centroid(0), old_centroid(1), old_centroid(2), 1.0); - Eigen::Vector4d refined_centroid = registration_matrix * old_centroid_homogeneous; - detections_vector[i].setWorldCentroid(Eigen::Vector3d(refined_centroid(0), refined_centroid(1), refined_centroid(2))); - - Eigen::Vector3d refined_centroid2 = detections_vector[i].getWorldCentroid(); -// std::cout << "refined_centroid2: " << refined_centroid2.transpose() << std::endl; -// std::cout << "difference: " << (refined_centroid2 - old_centroid).transpose() << std::endl << std::endl; - } - } - } - - // If at least one detection has been received: - if((detections_vector.size() > 0) && (time_delay < max_detection_delay)) - { - // Perform detection-track association: - tracker->newFrame(detections_vector); - tracker->updateTracks(); - - // Create a TrackingResult message with the output of the tracking process - if(output_tracking_results) - { - opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); - tracking_results_msg->header.stamp = ros::Time::now();//frame_time; - tracking_results_msg->header.frame_id = world_frame_id; - tracker->toMsg(tracking_results_msg); - // Publish tracking message: - results_pub.publish(tracking_results_msg); - } - -// //Show the tracking process' results as an image -// if(output_image_rgb) -// { -// tracker->drawRgb(); -// for(std::map::iterator -// it = detection_sources_map.begin(); it != detection_sources_map.end(); it++) -// { -// cv::Mat image_to_show = it->second->getImage(); -// if (not vertical) -// { -// //cv::imshow("TRACKER " + it->first, image_to_show); -// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras -// } -// else -// { -// cv::flip(image_to_show.t(), image_to_show, -1); -// cv::flip(image_to_show, image_to_show, 1); -// //cv::imshow("TRACKER " + it->first, image_to_show); -// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras -// } -// cv::waitKey(2); -// } -// } - - // Publish IDs of active tracks: - opt_msgs::IDArray::Ptr alive_ids_msg(new opt_msgs::IDArray); - alive_ids_msg->header.stamp = ros::Time::now(); - alive_ids_msg->header.frame_id = world_frame_id; - tracker->getAliveIDs (alive_ids_msg); - alive_ids_pub.publish (alive_ids_msg); - - // Show the pose of each tracked object with a 3D marker (to be visualized with ROS RViz) - if(output_markers) - { - visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); - tracker->toMarkerArray(marker_msg); - marker_pub.publish(marker_msg); - } - - // Show the history of the movements in 3D (3D trajectory) of each tracked object as a PointCloud (which can be visualized in RViz) - if(output_history_pointcloud) - { - history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us - history_pointcloud->header.frame_id = world_frame_id; - starting_index = tracker->appendToPointCloud(history_pointcloud, starting_index, - output_history_size); - pointcloud_pub.publish(history_pointcloud); - } - - // Create message for showing detection positions in RViz: - if (output_detection_results) - { - visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); - detection_history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us - detection_history_pointcloud->header.frame_id = world_frame_id; - std::string frame_id = detections_vector[0].getSource()->getFrameId(); - - // Define color: - int color_index; - std::map::iterator colormap_iterator = color_map.find(frame_id); - if (colormap_iterator != color_map.end()) - { // camera already present - color_index = colormap_iterator->second; - } - else - { // camera not present - color_index = color_map.size(); - color_map.insert(std::pair (frame_id, color_index)); - - // Plot legend with camera names and colors: - plotCameraLegend (color_map); - } - for (unsigned int i = 0; i < detections_vector.size(); i++) - { - // Create marker and add it to message: - Eigen::Vector3d centroid = detections_vector[i].getWorldCentroid(); - visualization_msgs::Marker marker = createMarker (i, frame_id, frame_time, centroid, camera_colors[color_index]); - marker_msg->markers.push_back(marker); - - // Point cloud: - pcl::PointXYZRGB point; - point.x = marker.pose.position.x; - point.y = marker.pose.position.y; - point.z = marker.pose.position.z; - point.r = marker.color.r * 255.0f; - point.g = marker.color.g * 255.0f; - point.b = marker.color.b * 255.0f; - detection_insert_index = (detection_insert_index + 1) % detection_history_size; - detection_history_pointcloud->points[detection_insert_index] = point; - } - detection_marker_pub.publish(marker_msg); // publish marker message - detection_trajectory_pub.publish(detection_history_pointcloud); // publish trajectory message - } - } - else // if no detections have been received or detection_delay is above max_detection_delay - { - if(output_tracking_results) - { // Publish an empty tracking message - opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); - tracking_results_msg->header.stamp = frame_time; - tracking_results_msg->header.frame_id = world_frame_id; - results_pub.publish(tracking_results_msg); - } - if((detections_vector.size() > 0) && (time_delay >= max_detection_delay)) - { - if (number_messages_delay_map_.find(msg->header.frame_id) == number_messages_delay_map_.end()) - number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); - - number_messages_delay_map_[msg->header.frame_id].first += time_delay; - number_messages_delay_map_[msg->header.frame_id].second++; - - if (number_messages_delay_map_[msg->header.frame_id].second == 100) - { - double avg = number_messages_delay_map_[msg->header.frame_id].first / number_messages_delay_map_[msg->header.frame_id].second; - ROS_WARN_STREAM("[" << msg->header.frame_id << "] received 100 detections with average delay " << avg << " > " << max_detection_delay); - number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); - } - } - } - } -// catch(cv_bridge::Exception& ex) -// { -// ROS_ERROR("cv_bridge exception: %s", ex.what()); -// } - catch(tf::TransformException& ex) - { - ROS_ERROR("transform exception: %s", ex.what()); - } -} - -void -generateColors(int colors_number, std::vector& colors) -{ - for (unsigned int i = 0; i < colors_number; i++) - { - colors.push_back(cv::Vec3f( - float(rand() % 256) / 255, - float(rand() % 256) / 255, - float(rand() % 256) / 255)); - } -} - -void -fillChiMap(std::map& chi_map, bool velocity_in_motion_term) -{ - if (velocity_in_motion_term) // chi square values with state dimension = 4 - { - chi_map[0.5] = 3.357; - chi_map[0.75] = 5.385; - chi_map[0.8] = 5.989; - chi_map[0.9] = 7.779; - chi_map[0.95] = 9.488; - chi_map[0.98] = 11.668; - chi_map[0.99] = 13.277; - chi_map[0.995] = 14.860; - chi_map[0.998] = 16.924; - chi_map[0.999] = 18.467; - } - else // chi square values with state dimension = 2 - { - chi_map[0.5] = 1.386; - chi_map[0.75] = 2.773; - chi_map[0.8] = 3.219; - chi_map[0.9] = 4.605; - chi_map[0.95] = 5.991; - chi_map[0.98] = 7.824; - chi_map[0.99] = 9.210; - chi_map[0.995] = 10.597; - chi_map[0.998] = 12.429; - chi_map[0.999] = 13.816; - } -} - -void -configCb(Config &config, uint32_t level) -{ - tracker->setMinConfidenceForTrackInitialization (config.min_confidence_initialization); - max_detection_delay = config.max_detection_delay; - calibration_refinement = config.calibration_refinement; - tracker->setSecBeforeOld (config.sec_before_old); - tracker->setSecBeforeFake (config.sec_before_fake); - tracker->setSecRemainNew (config.sec_remain_new); - tracker->setDetectionsToValidate (config.detections_to_validate); - tracker->setDetectorLikelihood (config.detector_likelihood); - tracker->setLikelihoodWeights (config.detector_weight*chi_map[0.999]/18.467, config.motion_weight); - -// if (config.velocity_in_motion_term != velocity_in_motion_term) -// { -// // Take chi square values with regards to the state dimension: -// fillChiMap(chi_map, config.velocity_in_motion_term); -// -// double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; -// tracker->setVelocityInMotionTerm (config.velocity_in_motion_term, config.acceleration_variance, position_variance); -// } -// else -// { - if (config.acceleration_variance != acceleration_variance) - { - tracker->setAccelerationVariance (config.acceleration_variance); - } - - if (config.position_variance_weight != position_variance_weight) - { - double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; - tracker->setPositionVariance (position_variance); - } -// } - - gate_distance = chi_map.find(config.gate_distance_probability) != chi_map.end() ? chi_map[config.gate_distance_probability] : chi_map[0.999]; - tracker->setGateDistance (config.gate_distance_probability); -} - -int -main(int argc, char** argv) -{ - - ros::init(argc, argv, "tracker"); - ros::NodeHandle nh("~"); - - // Subscribers/Publishers: - ros::Subscriber input_sub = nh.subscribe("input", 5, detection_cb); - marker_pub_tmp = nh.advertise("/tracker/markers", 1); - marker_pub = nh.advertise("/tracker/markers_array", 1); - pointcloud_pub = nh.advertise >("/tracker/history", 1); - results_pub = nh.advertise("/tracker/tracks", 100); - detection_marker_pub = nh.advertise("/detector/markers_array", 1); - detection_trajectory_pub = nh.advertise >("/detector/history", 1); - alive_ids_pub = nh.advertise("/tracker/alive_ids", 1); - - // Dynamic reconfigure - boost::recursive_mutex config_mutex_; - boost::shared_ptr reconfigure_server_; - - tf_listener = new tf::TransformListener(); - - // Read tracking parameters: - nh.param("world_frame_id", world_frame_id, std::string("/odom")); - - nh.param("orientation/vertical", vertical, false); - nh.param("extrinsic_calibration", extrinsic_calibration, false); - - nh.param("voxel_size", voxel_size, 0.06); - - double rate; - nh.param("rate", rate, 30.0); - -// double min_confidence; - nh.param("min_confidence_initialization", min_confidence, -2.5); //0.0); - - double chi_value; - nh.param("gate_distance_probability", chi_value, 0.9); - - nh.param("acceleration_variance", acceleration_variance, 1.0); - - nh.param("position_variance_weight", position_variance_weight, 1.0); - - bool detector_likelihood; - nh.param("detector_likelihood", detector_likelihood, false); - - nh.param("velocity_in_motion_term", velocity_in_motion_term, false); - - double detector_weight; - nh.param("detector_weight", detector_weight, -1.0); - - double motion_weight; - nh.param("motion_weight", motion_weight, 0.5); - - double sec_before_old; - nh.param("sec_before_old", sec_before_old, 3.6); - - double sec_before_fake; - nh.param("sec_before_fake", sec_before_fake, 2.4); - - double sec_remain_new; - nh.param("sec_remain_new", sec_remain_new, 1.2); - - int detections_to_validate; - nh.param("detections_to_validate", detections_to_validate, 5); - - double haar_disp_ada_min_confidence, ground_based_people_detection_min_confidence; - nh.param("haar_disp_ada_min_confidence", haar_disp_ada_min_confidence, -2.5); //0.0); - nh.param("ground_based_people_detection_min_confidence", ground_based_people_detection_min_confidence, -2.5); //0.0); - - nh.param("swissranger", swissranger, false); - - nh.param("ground_based_people_detection_min_confidence_sr", min_confidence_detections_sr, -1.5); - nh.param("min_confidence_initialization_sr", min_confidence_sr, -1.1); - - nh.param("history_pointcloud", output_history_pointcloud, false); - nh.param("history_size", output_history_size, 1000); - nh.param("markers", output_markers, true); - nh.param("image_rgb", output_image_rgb, true); - nh.param("tracking_results", output_tracking_results, true); - - nh.param("detection_debug", output_detection_results, true); - nh.param("detection_history_size", detection_history_size, 1000); - - bool debug_mode; - nh.param("debug_active", debug_mode, false); - - nh.param("calibration_refinement", calibration_refinement, false); - nh.param("max_detection_delay", max_detection_delay, 3.0); - - double max_time_between_detections_d; - nh.param("max_time_between_detections", max_time_between_detections_d, 10.0); - max_time_between_detections_ = ros::Duration(max_time_between_detections_d); - - // Read number of sensors in the network: - int num_cameras = 1; - if (extrinsic_calibration) - { - num_cameras = 0; - XmlRpc::XmlRpcValue network; - nh.getParam("network", network); - for (unsigned i = 0; i < network.size(); i++) - { - num_cameras += network[i]["sensors"].size(); - for (unsigned j = 0; j < network[i]["sensors"].size(); j++) - { - std::string frame_id = network[i]["sensors"][j]["id"]; - last_received_detection_["/" + frame_id] = ros::Time(0); - } - } - } - - // Set min_confidence_detections variable based on sensor type: - if (swissranger) - min_confidence_detections = ground_based_people_detection_min_confidence; - else - min_confidence_detections = haar_disp_ada_min_confidence; - - // Take chi square values with regards to the state dimension: - fillChiMap(chi_map, velocity_in_motion_term); - - // Compute additional parameters: - period = 1.0 / rate; - gate_distance = chi_map.find(chi_value) != chi_map.end() ? chi_map[chi_value] : chi_map[0.999]; - - double position_variance; -// position_variance = 3*std::pow(2 * voxel_size, 2) / 12.0; // DEFAULT - position_variance = position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; - std::vector likelihood_weights; - likelihood_weights.push_back(detector_weight*chi_map[0.999]/18.467); - likelihood_weights.push_back(motion_weight); - - // Generate colors used to identify different cameras: - generateColors(num_cameras, camera_colors); - - // Initialize point cloud containing detections trajectory: - pcl::PointXYZRGB nan_point; - nan_point.x = std::numeric_limits::quiet_NaN(); - nan_point.y = std::numeric_limits::quiet_NaN(); - nan_point.z = std::numeric_limits::quiet_NaN(); - detection_history_pointcloud->points.resize(detection_history_size, nan_point); - - ros::Rate hz(num_cameras*rate); - -// cv::namedWindow("TRACKER ", CV_WINDOW_NORMAL); - - // Initialize an instance of the Tracker object: - tracker = new open_ptrack::tracking::Tracker( - gate_distance, - detector_likelihood, - likelihood_weights, - velocity_in_motion_term, - min_confidence, - min_confidence_detections, - sec_before_old, - sec_before_fake, - sec_remain_new, - detections_to_validate, - period, - position_variance, - acceleration_variance, - world_frame_id, - debug_mode, - vertical); - - starting_index = 0; - - // Set up dynamic reconfiguration - ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); - reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); - reconfigure_server_->setCallback(f); - - // If extrinsic calibration is not available: - if (!extrinsic_calibration) - { // Set fixed transformation from rgb frame and base_link - tf::Vector3 fixed_translation(0, 0, 0); // camera_rgb_optical_frame -> world - tf::Quaternion fixed_rotation(-0.5, 0.5, -0.5, -0.5); // camera_rgb_optical_frame -> world - tf::Vector3 inv_fixed_translation(0.0, 0.0, 0); // world -> camera_rgb_optical_frame - tf::Quaternion inv_fixed_rotation(-0.5, 0.5, -0.5, 0.5); // world -> camera_rgb_optical_frame - world_to_camera_frame_transform = tf::Transform(fixed_rotation, fixed_translation); - camera_frame_to_world_transform = tf::Transform(inv_fixed_rotation, inv_fixed_translation); - } - - // Spin and execute callbacks: -// ros::spin(); - - std::map last_message; - for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) - last_message[it->first] = ros::Time::now(); - - ros::Time last_camera_legend_update = ros::Time::now(); // last time when the camera legend has been updated - - while (ros::ok()) - { - ros::spinOnce(); - ros::Time now = ros::Time::now(); - for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) - { - ros::Duration duration(now - it->second); - if (duration > max_time_between_detections_) - { - if (it->second > ros::Time(0) and now - last_message[it->first] > max_time_between_detections_) - { - ROS_WARN_STREAM("[" << it->first << "] last detection was " << duration.toSec() << " seconds ago"); - last_message[it->first] = now; - } - else if (now - last_message[it->first] > max_time_between_detections_) - { - ROS_WARN_STREAM("[" << it->first << "] still waiting for detection messages..."); - last_message[it->first] = now; - } - } - - // Update camera legend every second: - if ((now - last_camera_legend_update) > ros::Duration(1.0)) // if more than one second passed since last update - { // update OpenCV image with a waitKey: - cv::waitKey(1); - last_camera_legend_update = now; - } - } - hz.sleep(); - } - - return 0; -} diff --git a/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg b/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg deleted file mode 100644 index 8e9739a..0000000 --- a/tracking/launch/tracking/cfg/MovingAverageSmoother.cfg +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python - -# Declare parameters that control the track smoother - -PACKAGE='tracking' - -from dynamic_reconfigure.parameter_generator_catkin import * - -gen = ParameterGenerator() - -######################### -## Tracking parameters ## -######################### -# Rate at which messages are published: -#gen.add("rate", double_t, 0, "Rate at which messages are published", 30.0, 0.0, 100.0) -# Time lapse (after last detection) before a track is removed: -gen.add("track_lifetime_with_no_detections", double_t, 0, "Time lapse (after last detection) before a track is removed", 0.5, 0.0, 5.0) -# Interval between two heartbeat messages: -gen.add("heartbeat_time", double_t, 0, "Interval between two heartbeat messages", 5.0, 1.0, 20.0) -# Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published): -gen.add("publish_empty", bool_t, 0, "Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published)", True) - -########################## -## Smoothing parameters ## -########################## -# Number of positions to account when calculating the average position: -gen.add("window_size", int_t, 0, "Number of positions to account when calculating the average position", 10, 1, 100) -# Time lapse before a position is removed from the position list. This option removes old data even if they are within the averaging window: -gen.add("position_lifetime", double_t, 0, "Time lapse before a position is removed from the position list", 1.0, 0.0, 5.0) - -########### -## Debug ## -########### -# Dimension of the point cloud containing track trajectories: -gen.add("max_history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) - -# First string value is node name, used only for generating documentation -# Second string value ("TrackerSmoother") is name of class and generated -# .h file, with "Config" added, so class TrackerSmootherConfig -exit(gen.generate(PACKAGE, "moving_average_smoother", "MovingAverageSmoother")) diff --git a/tracking/launch/tracking/cfg/Tracker.cfg b/tracking/launch/tracking/cfg/Tracker.cfg deleted file mode 100644 index b515734..0000000 --- a/tracking/launch/tracking/cfg/Tracker.cfg +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python - -# Declare parameters that control people tracking - -PACKAGE='tracking' - -from dynamic_reconfigure.parameter_generator_catkin import * - -gen = ParameterGenerator() - -######################### -## Tracking parameters ## -######################### -# Mininum confidence for track initialization: -gen.add("min_confidence_initialization", double_t, 0, "Mininum confidence for track initialization", 4.0, -10.0, 10.0) -# Maximum delay that a detection message can have in order to be considered for tracking: -gen.add("max_detection_delay", double_t, 0, "Maximum delay that a detection message can have in order to be considered for tracking", 2.0, 0.0, 3.0) -# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: -gen.add("calibration_refinement", bool_t, 0, "Flag stating if the results of a calibration refinement procedure should be used to correct detection positions", False) - -####################### -## Motion parameters ## -####################### -# The higher is this, the higher motion variance is allowed -# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 -gate_distance_enum = gen.enum([ gen.const("0", double_t, 0.5, "0.5"), - gen.const("1", double_t, 0.75, "0.75"), - gen.const("2", double_t, 0.8, "0.8"), - gen.const("3", double_t, 0.9, "0.9"), - gen.const("4", double_t, 0.95, "0.95"), - gen.const("5", double_t, 0.98, "0.98"), - gen.const("6", double_t, 0.99, "0.99"), - gen.const("7", double_t, 0.995, "0.995"), - gen.const("8", double_t, 0.998, "0.998"), - gen.const("9", double_t, 0.999, "0.999")], - "An enum to set gate_distance_probability") -gen.add("gate_distance_probability", double_t, 0, "The higher is this, the higher motion variance is allowed", 0.99, 0.5, 0.999, edit_method=gate_distance_enum) -# Acceleration variance in people motion model: -gen.add("acceleration_variance", double_t, 0, "Acceleration variance in people motion model", 100, 0.1, 1000) -# Additional weight for position variance in people motion model: -gen.add("position_variance_weight", double_t, 0, "Additional weight for position variance in people motion model", 30, 1, 100) - -################################# -## Data association parameters ## -################################# -# Flag stating if detection likelihood should be used in data association: -gen.add("detector_likelihood", bool_t, 0, "Flag stating if detection likelihood should be used in data association", True) -# Weight of detection likelihood in data association: -gen.add("detector_weight", double_t, 0, "Weight of detection likelihood in data association", -0.25, -10.0, 0.0) -# Flag stating if track velocity should be considered in data association: -#gen.add("velocity_in_motion_term", bool_t, 0, " Flag stating if track velocity should be considered in data association", True) -# Weight of motion likelihood in data association: -gen.add("motion_weight", double_t, 0, "Weight of motion likelihood in data association", 0.25, 0.0, 10.0) - -################################ -## Tracking policy parameters ## -################################ -# Track duration (seconds) after last valid detection: -gen.add("sec_before_old", double_t, 0, "Track duration (seconds) after last valid detection", 8.0, 0.0, 100.0) -# Seconds within which a track should be validated (otherwise it is discarded): -gen.add("sec_before_fake", double_t, 0, "Seconds within which a track should be validated (otherwise it is discarded)", 2.4, 0.0, 10.0) -# Seconds after which a visible track obtain NORMAL status: -gen.add("sec_remain_new", double_t, 0, "Seconds after which a visible track obtain NORMAL status", 1.2, 0.0, 10.0) -# Minimum number of detection<->track associations needed for validating a track: -gen.add("detections_to_validate", int_t, 0, "Minimum number of detection<->track associations needed for validating a track", 3, 1, 20) - -########### -## Debug ## -########### -# Dimension of the point cloud containing track trajectories: -#gen.add("history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) -# Dimension of the point cloud containing detection trajectories: -#gen.add("detection_history_size", int_t, 0, "Dimension of the point cloud containing detection trajectories", 500, 1, 10000) - -# First string value is node name, used only for generating documentation -# Second string value ("Tracker") is name of class and generated -# .h file, with "Config" added, so class TrackerConfig -exit(gen.generate(PACKAGE, "tracking", "Tracker")) diff --git a/tracking/launch/tracking/cfg/TrackerSmoother.cfg b/tracking/launch/tracking/cfg/TrackerSmoother.cfg deleted file mode 100644 index 0344c49..0000000 --- a/tracking/launch/tracking/cfg/TrackerSmoother.cfg +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python - -# Declare parameters that control the track smoother - -PACKAGE='tracking' - -from dynamic_reconfigure.parameter_generator_catkin import * - -gen = ParameterGenerator() - -######################### -## Tracking parameters ## -######################### -# Rate at which messages are published: -#gen.add("rate", double_t, 0, "Rate at which messages are published", 30.0, 0.0, 100.0) -# Time lapse (after last detection) before a track is removed: -gen.add("track_lifetime_with_no_detections", double_t, 0, "Time lapse (after last detection) before a track is removed", 0.5, 0.0, 5.0) -# Interval between two heartbeat messages: -gen.add("heartbeat_time", double_t, 0, "Interval between two heartbeat messages", 5.0, 1.0, 20.0) -# Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published): -gen.add("publish_empty", bool_t, 0, "Flag stating if empty tracking messages should be published or not (if False, an heartbeat message is published)", True) - -########################## -## Smoothing parameters ## -########################## -# Error of the measurements provided by the tracker (0.0 means no error and therefore no smoothing will be applied): -gen.add("sigma_noise", double_t, 0, "Error of the measurements provided by the tracker (0.0 = no error: no smoothing will be applied)", 0.20, 0.0, 1.0) -# How much the acceleration of a track can vary (small values = high smoothing): -gen.add("sigma_process", double_t, 0, "How much the acceleration of a track can vary (small values = high smoothing)", 0.15, 0.0001, 1.0) -# Time lapse (after last detection) that a the position of a track is predicted and published: -gen.add("prediction_duration", double_t, 0, "Time lapse (after last detection) that a the position of a track is predicted and published", 0.1, 0.0, 10.0) - -########### -## Debug ## -########### -# Dimension of the point cloud containing track trajectories: -gen.add("max_history_size", int_t, 0, "Dimension of the point cloud containing track trajectories", 500, 1, 10000) - -# First string value is node name, used only for generating documentation -# Second string value ("TrackerSmoother") is name of class and generated -# .h file, with "Config" added, so class TrackerSmootherConfig -exit(gen.generate(PACKAGE, "tracker_smoother", "TrackerSmoother")) diff --git a/tracking/launch/tracking/conf/moving_average_filter.yaml b/tracking/launch/tracking/conf/moving_average_filter.yaml deleted file mode 100644 index 7c0e001..0000000 --- a/tracking/launch/tracking/conf/moving_average_filter.yaml +++ /dev/null @@ -1,26 +0,0 @@ -##################### -## Node parameters ## -##################### -# Tracking output frame rate: -rate: 30.0 -# Time lapse (after last detection) before a track is removed: -track_lifetime_with_no_detections: 1.0 -# Interval between two heartbeat messages: -heartbeat_time: 5.0 -# Flag stating if empty tracking messages should be published or not: -publish_empty: true - -########################## -## Smoothing parameters ## -########################## -# Number of positions to account when calculating the average position: -window_size: 10 -# Time lapse before a position is removed from the position list. This option removes old data even if they are within the averaging window: -position_lifetime: 1.0 - -########### -## Debug ## -########### -# Max size of the history pointcloud: -max_history_size: 1000 - diff --git a/tracking/launch/tracking/conf/multicamera_tracking.rviz b/tracking/launch/tracking/conf/multicamera_tracking.rviz deleted file mode 100644 index 868f31a..0000000 --- a/tracking/launch/tracking/conf/multicamera_tracking.rviz +++ /dev/null @@ -1,180 +0,0 @@ -Panels: - - Class: rviz/Displays - Help Height: 78 - Name: Displays - Property Tree Widget: - Expanded: - - /Global Options1 - - /Status1 - - /PointCloud21 - - /MarkerArray1 - Splitter Ratio: 0.526846 - Tree Height: 775 - - Class: rviz/Selection - Name: Selection - - Class: rviz/Tool Properties - Expanded: - - /2D Pose Estimate1 - - /2D Nav Goal1 - - /Publish Point1 - Name: Tool Properties - Splitter Ratio: 0.588679 - - Class: rviz/Views - Expanded: - - /Current View1 - Name: Views - Splitter Ratio: 0.5 - - Class: rviz/Time - Experimental: false - Name: Time - SyncMode: 0 - SyncSource: "" -Visualization Manager: - Class: "" - Displays: - - Alpha: 0.5 - Cell Size: 1 - Class: rviz/Grid - Color: 160; 160; 164 - Enabled: true - Line Style: - Line Width: 0.03 - Value: Lines - Name: Grid - Normal Cell Count: 0 - Offset: - X: 0 - Y: 0 - Z: 0 - Plane: XY - Plane Cell Count: 10 - Reference Frame: - Value: true - - Alpha: 1 - Autocompute Intensity Bounds: true - Autocompute Value Bounds: - Max Value: 10 - Min Value: -10 - Value: true - Axis: Z - Channel Name: intensity - Class: rviz/PointCloud2 - Color: 255; 255; 255 - Color Transformer: RGB8 - Decay Time: 0 - Enabled: false - Invert Rainbow: false - Max Color: 255; 255; 255 - Max Intensity: 4096 - Min Color: 0; 0; 0 - Min Intensity: 0 - Name: PointCloud2 - Position Transformer: XYZ - Queue Size: 10 - Selectable: true - Size (Pixels): 5 - Size (m): 0.01 - Style: Points - Topic: /tracker/history_smoothed - Use Fixed Frame: true - Use rainbow: true - Value: false - - Class: rviz/MarkerArray - Enabled: true - Marker Topic: /tracker/markers_array_smoothed - Name: MarkerArray - Namespaces: - numbers: true - people: true - Queue Size: 100 - Value: true - - Class: rviz/TF - Enabled: true - Frame Timeout: 15 - Frames: - All Enabled: false - Kinect: - Value: true - Kinect_depth_frame: - Value: true - Kinect_depth_optical_frame: - Value: true - Kinect_link: - Value: true - Kinect_rgb_frame: - Value: true - Kinect_rgb_optical_frame: - Value: true - world: - Value: true - Marker Scale: 1 - Name: TF - Show Arrows: true - Show Axes: true - Show Names: true - Tree: - world: - Kinect: - Kinect_link: - Kinect_depth_frame: - Kinect_depth_optical_frame: - {} - Kinect_rgb_frame: - Kinect_rgb_optical_frame: - {} - Update Interval: 0 - Value: true - Enabled: true - Global Options: - Background Color: 48; 48; 48 - Fixed Frame: world - Frame Rate: 60 - Name: root - Tools: - - Class: rviz/Interact - Hide Inactive Objects: true - - Class: rviz/MoveCamera - - Class: rviz/Select - - Class: rviz/FocusCamera - - Class: rviz/Measure - - Class: rviz/SetInitialPose - Topic: /initialpose - - Class: rviz/SetGoal - Topic: /move_base_simple/goal - - Class: rviz/PublishPoint - Single click: true - Topic: /clicked_point - Value: true - Views: - Current: - Class: rviz/Orbit - Distance: 7.62407 - Focal Point: - X: -0.326729 - Y: 0.467386 - Z: 1.2193 - Name: Current View - Near Clip Distance: 0.01 - Pitch: 1.5698 - Target Frame: - Value: Orbit (rviz) - Yaw: 1.5704 - Saved: ~ -Window Geometry: - Displays: - collapsed: false - Height: 1056 - Hide Left Dock: false - Hide Right Dock: true - QMainWindow State: 000000ff00000000fd00000004000000000000013c00000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d0065010000000000000780000002f600fffffffb0000000800540069006d006501000000000000045000000000000000000000063e0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 - Selection: - collapsed: false - Time: - collapsed: false - Tool Properties: - collapsed: false - Views: - collapsed: true - Width: 1920 - X: 0 - Y: 24 diff --git a/tracking/launch/tracking/conf/tracker.yaml b/tracking/launch/tracking/conf/tracker.yaml deleted file mode 100644 index c637686..0000000 --- a/tracking/launch/tracking/conf/tracker.yaml +++ /dev/null @@ -1,74 +0,0 @@ -######################### -## Tracking parameters ## -######################### -# Mininum confidence for track initialization: -min_confidence_initialization: 4.0 -# Tracking maximum frame rate per sensor: -rate: 30 -# Tracking reference frame: -world_frame_id: "/world" -# Voxel size used for people detection: -voxel_size: 0.06 -# Flag stating if extrinsic (multicamera) calibration has been performed or not: -extrinsic_calibration: false - -######################## -## Sensor orientation ## -######################## -# Flag stating if the sensor is vertically placed, or not: -orientation: - vertical: false - -####################### -## Motion parameters ## -####################### -# The higher is this, the higher motion variance is allowed -# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 -gate_distance_probability: 0.999 -# Acceleration variance in people motion model: -acceleration_variance: 100 -# Additional weight for position variance in people motion model: -position_variance_weight: 30 - -################################# -## Data association parameters ## -################################# -# Flag stating if detection likelihood should be used in data association: -detector_likelihood: true -# Flag stating if track velocity should be considered in data association: -velocity_in_motion_term: true -# Weight of detection likelihood in data association: -detector_weight: -0.25 -# Weight of motion likelihood in data association: -motion_weight: 0.25 - -################################ -## Tracking policy parameters ## -################################ -# Track duration (seconds) after last valid detection: -sec_before_old: 8 -# Seconds within which a track should be validated (otherwise it is discarded): -sec_before_fake: 2.4 -# Seconds after which a visible track obtain NORMAL status: -sec_remain_new: 1.2 -# Minimum number of detection<->track associations needed for validating a track: -detections_to_validate: 3 - -########### -## Debug ## -########### -# Flag activating debug/visualization options: -debug_active: true -# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: -history_pointcloud: true -# Dimension of the point cloud containing track trajectories: -history_size: 500 -# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: -markers: true -# Flag stating if tracking results should be sent over the network: -tracking_results: true -# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: -detection_debug: true -# Dimension of the point cloud containing detection trajectories: -detection_history_size: 500 - diff --git a/tracking/launch/tracking/conf/tracker_filter.yaml b/tracking/launch/tracking/conf/tracker_filter.yaml deleted file mode 100644 index 54415e0..0000000 --- a/tracking/launch/tracking/conf/tracker_filter.yaml +++ /dev/null @@ -1,28 +0,0 @@ -##################### -## Node parameters ## -##################### -# Tracking output frame rate: -rate: 30.0 -# Time lapse (after last detection) before a track is removed: -track_lifetime_with_no_detections: 1.0 -# Interval between two heartbeat messages: -heartbeat_time: 5.0 -# Flag stating if empty tracking messages should be published or not: -publish_empty: true - -########################## -## Smoothing parameters ## -########################## -# Error of the measurements provided by the tracker (0.0 means no error, i.e. no smoothing will be applied): -sigma_noise: 0.0 #0.30 -# How much the acceleration of a track can vary (small values = high smoothing): -sigma_process: 0.05 -# Time lapse (after last detection) that a the position of a track is predicted and published: -prediction_duration: 0.1 - -########### -## Debug ## -########### -# Max size of the history pointcloud: -max_history_size: 1000 - diff --git a/tracking/launch/tracking/conf/tracker_multicamera.yaml b/tracking/launch/tracking/conf/tracker_multicamera.yaml deleted file mode 100644 index 2cb5a99..0000000 --- a/tracking/launch/tracking/conf/tracker_multicamera.yaml +++ /dev/null @@ -1,78 +0,0 @@ -######################### -## Tracking parameters ## -######################### -# Mininum confidence for track initialization: -min_confidence_initialization: 4.0 -# Tracking maximum frame rate per sensor: -rate: 30 -# Tracking reference frame: -world_frame_id: "/world" -# Voxel size used for people detection: -voxel_size: 0.06 -# Flag stating if extrinsic (multicamera) calibration has been performed or not: -extrinsic_calibration: true -# Maximum delay that a detection message can have in order to be considered for tracking: -max_detection_delay: 2.0 -# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: -calibration_refinement: true - -######################## -## Sensor orientation ## -######################## -# Flag stating if the sensor is vertically placed, or not: -orientation: - vertical: false - -####################### -## Motion parameters ## -####################### -# The higher is this, the higher motion variance is allowed -# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 -gate_distance_probability: 0.99 -# Acceleration variance in people motion model: -acceleration_variance: 100 -# Additional weight for position variance in people motion model: -position_variance_weight: 30 - -################################# -## Data association parameters ## -################################# -# Flag stating if detection likelihood should be used in data association: -detector_likelihood: true -# Flag stating if track velocity should be considered in data association: -velocity_in_motion_term: true -# Weight of detection likelihood in data association: -detector_weight: -0.25 -# Weight of motion likelihood in data association: -motion_weight: 0.25 - -################################ -## Tracking policy parameters ## -################################ -# Track duration (seconds) after last valid detection: -sec_before_old: 8 -# Seconds within which a track should be validated (otherwise it is discarded): -sec_before_fake: 2.4 -# Seconds after which a visible track obtain NORMAL status: -sec_remain_new: 1.2 -# Minimum number of detection<->track associations needed for validating a track: -detections_to_validate: 3 - -########### -## Debug ## -########### -# Flag activating debug/visualization options: -debug_active: true -# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: -history_pointcloud: true -# Dimension of the point cloud containing track trajectories: -history_size: 500 -# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: -markers: true -# Flag stating if tracking results should be sent over the network: -tracking_results: true -# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: -detection_debug: true -# Dimension of the point cloud containing detection trajectories: -detection_history_size: 500 - diff --git a/tracking/launch/tracking/conf/tracker_sr.yaml b/tracking/launch/tracking/conf/tracker_sr.yaml deleted file mode 100644 index c637686..0000000 --- a/tracking/launch/tracking/conf/tracker_sr.yaml +++ /dev/null @@ -1,74 +0,0 @@ -######################### -## Tracking parameters ## -######################### -# Mininum confidence for track initialization: -min_confidence_initialization: 4.0 -# Tracking maximum frame rate per sensor: -rate: 30 -# Tracking reference frame: -world_frame_id: "/world" -# Voxel size used for people detection: -voxel_size: 0.06 -# Flag stating if extrinsic (multicamera) calibration has been performed or not: -extrinsic_calibration: false - -######################## -## Sensor orientation ## -######################## -# Flag stating if the sensor is vertically placed, or not: -orientation: - vertical: false - -####################### -## Motion parameters ## -####################### -# The higher is this, the higher motion variance is allowed -# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 -gate_distance_probability: 0.999 -# Acceleration variance in people motion model: -acceleration_variance: 100 -# Additional weight for position variance in people motion model: -position_variance_weight: 30 - -################################# -## Data association parameters ## -################################# -# Flag stating if detection likelihood should be used in data association: -detector_likelihood: true -# Flag stating if track velocity should be considered in data association: -velocity_in_motion_term: true -# Weight of detection likelihood in data association: -detector_weight: -0.25 -# Weight of motion likelihood in data association: -motion_weight: 0.25 - -################################ -## Tracking policy parameters ## -################################ -# Track duration (seconds) after last valid detection: -sec_before_old: 8 -# Seconds within which a track should be validated (otherwise it is discarded): -sec_before_fake: 2.4 -# Seconds after which a visible track obtain NORMAL status: -sec_remain_new: 1.2 -# Minimum number of detection<->track associations needed for validating a track: -detections_to_validate: 3 - -########### -## Debug ## -########### -# Flag activating debug/visualization options: -debug_active: true -# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: -history_pointcloud: true -# Dimension of the point cloud containing track trajectories: -history_size: 500 -# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: -markers: true -# Flag stating if tracking results should be sent over the network: -tracking_results: true -# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: -detection_debug: true -# Dimension of the point cloud containing detection trajectories: -detection_history_size: 500 - diff --git a/tracking/launch/tracking/conf/tracking.rviz b/tracking/launch/tracking/conf/tracking.rviz deleted file mode 100644 index 72ff906..0000000 --- a/tracking/launch/tracking/conf/tracking.rviz +++ /dev/null @@ -1,249 +0,0 @@ -Panels: - - Class: rviz/Displays - Help Height: 78 - Name: Displays - Property Tree Widget: - Expanded: - - /Global Options1 - - /Status1 - - /PointCloud21 - - /Marker1 - - /MarkerArray1 - - /DepthCloud1/Auto Size1 - Splitter Ratio: 0.526846 - Tree Height: 405 - - Class: rviz/Selection - Name: Selection - - Class: rviz/Tool Properties - Expanded: - - /2D Pose Estimate1 - - /2D Nav Goal1 - - /Publish Point1 - Name: Tool Properties - Splitter Ratio: 0.588679 - - Class: rviz/Views - Expanded: - - /Current View1 - Name: Views - Splitter Ratio: 0.5 - - Class: rviz/Time - Experimental: false - Name: Time - SyncMode: 0 - SyncSource: Image -Visualization Manager: - Class: "" - Displays: - - Alpha: 0.5 - Cell Size: 1 - Class: rviz/Grid - Color: 160; 160; 164 - Enabled: true - Line Style: - Line Width: 0.03 - Value: Lines - Name: Grid - Normal Cell Count: 0 - Offset: - X: 0 - Y: 0 - Z: 0 - Plane: XY - Plane Cell Count: 10 - Reference Frame: - Value: true - - Alpha: 1 - Autocompute Intensity Bounds: true - Autocompute Value Bounds: - Max Value: 10 - Min Value: -10 - Value: true - Axis: Z - Channel Name: intensity - Class: rviz/PointCloud2 - Color: 255; 255; 255 - Color Transformer: RGB8 - Decay Time: 0 - Enabled: true - Invert Rainbow: false - Max Color: 255; 255; 255 - Max Intensity: 4096 - Min Color: 0; 0; 0 - Min Intensity: 0 - Name: PointCloud2 - Position Transformer: XYZ - Queue Size: 10 - Selectable: true - Size (Pixels): 5 - Size (m): 0.01 - Style: Points - Topic: /tracker/history_smoothed - Unreliable: false - Use Fixed Frame: true - Use rainbow: true - Value: true - - Class: rviz/Marker - Enabled: true - Marker Topic: /tracker/markers - Name: Marker - Namespaces: - {} - Queue Size: 100 - Value: true - - Class: rviz/MarkerArray - Enabled: true - Marker Topic: /tracker/markers_array - Name: MarkerArray - Namespaces: - {} - Queue Size: 100 - Value: true - - Class: rviz/TF - Enabled: true - Frame Timeout: 15 - Frames: - All Enabled: false - map: - Value: true - world: - Value: true - zed_initial_frame: - Value: true - zed_tracked_frame: - Value: true - Marker Scale: 1 - Name: TF - Show Arrows: true - Show Axes: true - Show Names: true - Tree: - map: - zed_initial_frame: - zed_tracked_frame: - world: - {} - Update Interval: 0 - Value: true - - Alpha: 1 - Auto Size: - Auto Size Factor: 1 - Value: true - Autocompute Intensity Bounds: true - Autocompute Value Bounds: - Max Value: 10 - Min Value: -10 - Value: true - Axis: Z - Channel Name: intensity - Class: rviz/DepthCloud - Color: 255; 255; 255 - Color Image Topic: "" - Color Transformer: RGB8 - Color Transport Hint: raw - Decay Time: 0 - Depth Map Topic: /camera/depth/image_rect_color - Depth Map Transport Hint: raw - Enabled: false - Invert Rainbow: false - Max Color: 255; 255; 255 - Max Intensity: 4096 - Min Color: 0; 0; 0 - Min Intensity: 0 - Name: DepthCloud - Occlusion Compensation: - Occlusion Time-Out: 30 - Value: false - Position Transformer: XYZ - Queue Size: 5 - Selectable: true - Size (Pixels): 3 - Style: Flat Squares - Topic Filter: true - Use Fixed Frame: true - Use rainbow: true - Value: false - - Class: rviz/Image - Enabled: true - Image Topic: /camera/rgb/image_rect_color - Max Value: 1 - Median window: 5 - Min Value: 0 - Name: Image - Normalize Range: true - Queue Size: 2 - Transport Hint: raw - Unreliable: false - Value: true - - Class: rviz/Image - Enabled: true - Image Topic: /camera/depth/image_rect_color - Max Value: 1 - Median window: 5 - Min Value: 0 - Name: Image - Normalize Range: true - Queue Size: 2 - Transport Hint: raw - Unreliable: false - Value: true - Enabled: true - Global Options: - Background Color: 48; 48; 48 - Fixed Frame: world - Frame Rate: 60 - Name: root - Tools: - - Class: rviz/Interact - Hide Inactive Objects: true - - Class: rviz/MoveCamera - - Class: rviz/Select - - Class: rviz/FocusCamera - - Class: rviz/Measure - - Class: rviz/SetInitialPose - Topic: /initialpose - - Class: rviz/SetGoal - Topic: /move_base_simple/goal - - Class: rviz/PublishPoint - Single click: true - Topic: /clicked_point - Value: true - Views: - Current: - Class: rviz/Orbit - Distance: 11.4858 - Enable Stereo Rendering: - Stereo Eye Separation: 0.06 - Stereo Focal Distance: 1 - Swap Stereo Eyes: false - Value: false - Focal Point: - X: -0.222038 - Y: -0.00615923 - Z: 2.37807 - Name: Current View - Near Clip Distance: 0.01 - Pitch: 0.709796 - Target Frame: - Value: Orbit (rviz) - Yaw: 3.0004 - Saved: ~ -Window Geometry: - Displays: - collapsed: false - Height: 1056 - Hide Left Dock: false - Hide Right Dock: true - Image: - collapsed: false - QMainWindow State: 000000ff00000000fd00000004000000000000016a00000396fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000224000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000002520000009c0000001600fffffffb0000000a0049006d00610067006501000002f4000000ca0000001600ffffff000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f0000003efc0100000002fb0000000800540069006d006501000000000000073f000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000005cf0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 - Selection: - collapsed: false - Time: - collapsed: false - Tool Properties: - collapsed: false - Views: - collapsed: true - Width: 1855 - X: 65 - Y: 24 diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h b/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h deleted file mode 100644 index f143ff8..0000000 --- a/tracking/launch/tracking/include/open_ptrack/tracking/kalman_filter.h +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#ifndef OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ -#define OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ - -#include -#include -#include -#include -#include - -namespace open_ptrack -{ - namespace tracking - { - /** \brief PredictModel Prediction model (linear state predict model) */ - class PredictModel : public Bayesian_filter::Linear_predict_model - { - protected: - /* \brief time step */ - const double dt_; - - public: - /** \brief Constructor. */ - PredictModel(double dt, double acceleration_variance); - - /** \brief Destructor. */ - virtual ~PredictModel(); - }; - - /** \brief ObserveModel Observation model (linear observation is additive uncorrelated model) */ - class ObserveModel : public Bayesian_filter::Linear_uncorrelated_observe_model - { - protected: - /** \brief Position variance. */ - double position_variance_; - - public: - /** \brief Constructor. */ - ObserveModel(double position_variance, int ouput_dimension); - - /** \brief Destructor. */ - virtual ~ObserveModel(); - }; - - /** \brief MahalanobisParameters2d Contains variables for bayesian estimation with state dimension = 2. */ - class MahalanobisParameters2d - { - public: - - MahalanobisParameters2d() : SI(Bayesian_filter_matrix::Empty), x(0), y(0) - { - SI.resize(2, 2, false); - } - - /** \brief Innovation covariance matrix. */ - Bayesian_filter::FM::SymMatrix SI; - - /** \brief Position x component. */ - double x; - - /** \brief Position y component. */ - double y; - }; - - /** \brief MahalanobisParameters4d Contains variables for bayesian estimation with state dimension = 4. */ - class MahalanobisParameters4d - { - public: - - MahalanobisParameters4d() : SI(Bayesian_filter_matrix::Empty), x(0), y(0), vx(0), vy(0) - { - SI.resize(4, 4, false); - } - - /** \brief Innovation covariance matrix. */ - Bayesian_filter::FM::SymMatrix SI; - - /** \brief Position x component. */ - double x; - - /** \brief Position y component. */ - double y; - - /** \brief Velocity x component. */ - double vx; - - /** \brief Velocity y component. */ - double vy; - }; - - /** \brief KalmanFilter provides methods for bayesian estimation with Kalman Filter. */ - class KalmanFilter - { - protected: - - /** \brief Time interval.*/ - double dt_; - - /** \brief Scale factor for computing depth noise variance.*/ - double depth_multiplier_; - - /** \brief Position variance. */ - double position_variance_; - - /** \brief Acceleration variance.*/ - double acceleration_variance_; - - /** \brief State/output dimension.*/ - int output_dimension_; - - Bayesian_filter::Unscented_scheme* filter_; - - /** \brief Prediction model. */ - PredictModel* predict_model_; - - /** \brief Observation model. */ - ObserveModel* observe_model_; - - public: - - /** \brief Constructor. */ - KalmanFilter(double dt, double position_variance, double acceleration_variance, int output_dimension); - - /** \brief Constructor initializing a new KalmanFilter with another one. */ - KalmanFilter(const KalmanFilter& orig); - - /** \brief Overload of = operator for copying KalmanFilter objects. */ - KalmanFilter& operator=(const KalmanFilter& orig); - - /** \brief Destructor. */ - virtual ~KalmanFilter(); - - /** - * \brief Filter initialization procedure. - * - * \param[in] x Position x component. - * \param[in] y Position y component. - * \param[in] distance Distance from the sensor. - * \param[in] velocity_in_motion_term If true, both target position and velocity constitute the output vector. - */ - void - init(double x, double y, double distance, bool velocity_in_motion_term); - - /** - * \brief Prediction step. - */ - void - predict(); - - /** - * \brief Prediction step. - * - * \param[out] x Position x component. - * \param[out] y Position y component. - * \param[out] vx Velocity x component. - * \param[out] vy Velocity y component. - */ - void - predict(double& x, double& y, double& vx, double& vy); - - /** - * \brief Update step. - */ - void - update(); - - /** - * \brief Update step. - * - * \param[in] x Position x component. - * \param[in] y Position y component. - * \param[in] distance Distance from the sensor. - */ - void - update(double x, double y, double distance); - - /** - * \brief Update step. - * - * \param[in] x Position x component. - * \param[in] y Position y component. - * \param[in] vx Velocity x component. - * \param[in] vy Velocity y component. - * \param[in] distance Distance from the sensor. - */ - void - update(double x, double y, double vx, double vy, double distance); - - /** - * \brief Get filter state. - * - * \param[out] x Position x component. - * \param[out] y Position y component. - * \param[out] vx Velocity x component. - * \param[out] vy Velocity y component. - */ - void - getState(double& x, double& y, double& vx, double& vy); - - /** - * \brief Get filter state. - * - * \param[out] x Position x component. - * \param[out] y Position y component. - */ - void - getState(double& x, double& y); - - /** - * \brief Obtain variables for bayesian estimation with output dimension = 2. - * - * \param[out] mp Object of class MahalanobisParameters2d. - */ - void - getMahalanobisParameters(MahalanobisParameters2d& mp); - - /** - * \brief Obtain variables for bayesian estimation with output dimension = 4. - * - * \param[out] mp Object of class MahalanobisParameters4d. - */ - void - getMahalanobisParameters(MahalanobisParameters4d& mp); - - /** - * \brief Compute Mahalanobis distance between measurement and target predicted state. - * - * \param[in] x Input x position. - * \param[in] y Input y position. - * \param[in] mp Object of class MahalanobisParameters2d. - * - * \return Mahalanobis distance between measurement (x,y) and target predicted state. - */ - static double - performMahalanobisDistance(double x, double y, const MahalanobisParameters2d& mp); - - /** - * \brief Compute Mahalanobis distance between measurement and target predicted state. - * - * \param[in] x Input x position. - * \param[in] y Input y position. - * \param[in] vx Input x velocity. - * \param[in] vy Input y velocity. - * \param[in] mp Object of class MahalanobisParameters4d. - * - * \return Mahalanobis distance between measurement (x,y,vx,vy) and target predicted state. - */ - static double - performMahalanobisDistance(double x, double y, double vx, double vy, const MahalanobisParameters4d& mp); - - /** - * \brief Get filter innovation covariance. - * - * \return innovation covariance matrix. - */ - Bayesian_filter::FM::SymMatrix - getInnovationCovariance(); - - /** - * \brief Set prediction model. - * - * \param[in] acceleration_variance Acceleration variance. - */ - void - setPredictModel (double acceleration_variance); - - /** - * \brief Set observation model. - * - * \param[in] position_variance Position variance. - */ - void - setObserveModel (double position_variance); - - }; - - } /* namespace tracking */ -} /* namespace open_ptrack */ -#endif /* OPEN_PTRACK_TRACKING_KALMAN_FILTER_H_ */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h b/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h deleted file mode 100644 index 241de3c..0000000 --- a/tracking/launch/tracking/include/open_ptrack/tracking/munkres.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2013-, Matteo Munaro [matteo.munaro@dei.unipd.it] - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef OPEN_PTRACK_TRACKING_MUNKRES_H_ -#define OPEN_PTRACK_TRACKING_MUNKRES_H_ - -#include -#include -#include -#include - -namespace open_ptrack -{ - namespace tracking - { - /** \brief Munkres solves Global Nearest Neighbor problem with the Hungarian (Munkres) algorithm - * Implementation of the algorithm described here: http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html - **/ - class Munkres { - - public: - - /** - * \brief Main method for solving GNN problems. - * - * \param[in] matrix Cost matrix for the GNN problem (rows are workers, columns are jobs) - * \param[in] max If true, higher values are considered to be better. If false, lower values are better (lower cost). - */ - cv::Mat solve(cv::Mat& matrix, bool max); - - private: - - /* \brief Preprocessing for applying the Munkres algorithm. */ - double** - preprocess(cv::Mat& matrix_in, bool max); - - /* \brief First step of the Munkres algorithm. */ - void - step_one(double** matrix, int rows, int cols, int& step); - - /* \brief Second step of the hungarian algorithm. */ - void - step_two(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, int& step); - - /* \brief Third step of the hungarian algorithm. */ - void - step_three(int rows, int cols, int* colCover, double** m, int& step); - - /* \brief Utility function for step 4. */ - void - find_a_zero(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& row, int& col); - - /* \brief Utility function for step 4. */ - bool - star_in_row(double** m, int rows, int cols, int row); - - /* \brief Utility function for step 4. */ - void - find_star_in_row(double** m, int rows, int cols, int row, int& col); - - /* \brief First step of the hungarian algorithm. */ - void - step_four(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, int& path_col_0, int& step); - - /* \brief Utility function for step 5. */ - void - find_star_in_col(int rows, int cols, double** m, int c, int& r); - - /* \brief Utility function for step 5. */ - void - find_prime_in_row(int rows, int cols, double** m, int r, int& c); - - /* \brief Utility function for step 5. */ - void - augment_path(double** m, int path_count, int** path); - - /* \brief Utility function for step 5. */ - void - clear_covers(int rows, int cols, int* rowCover, int* colCover); - - /* \brief Utility function for step 5. */ - void - erase_primes(int rows, int cols, double** m); - - /* \brief Fifth step of the hungarian algorithm. */ - void - step_five(int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, int& path_col_0, int& path_count, int** path, int& step); - - /* \brief Utility function for step 6. */ - void - find_smallest(double** matrix, int rows, int cols, int* rowCover, int* colCover, double& minval); - - /* \brief Sixth step of the hungarian algorithm. */ - void - step_six(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& step); - - }; - } /* namespace tracking */ -} /* namespace open_ptrack */ -#endif /* !defined(OPEN_PTRACK_TRACKING_MUNKRES_H_) */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/track.h b/tracking/launch/tracking/include/open_ptrack/tracking/track.h deleted file mode 100644 index 6d790eb..0000000 --- a/tracking/launch/tracking/include/open_ptrack/tracking/track.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#ifndef OPEN_PTRACK_TRACKING_TRACK_H_ -#define OPEN_PTRACK_TRACKING_TRACK_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace open_ptrack -{ - namespace tracking - { - /** \brief Track represents information about a track (or target) */ - class Track - { - public: - - /** \brief A track has Status NEW if it has been recently created, otherwise it has NORMAL Status */ - enum Status - { - NEW, - NORMAL - }; - - /** \brief Visibility states if the track is currently visible by the sensor or partially occluded or totally occluded */ - enum Visibility - { - VISIBLE = 0, // No occlusion - OCCLUDED = 1, // Partially occlusion - NOT_VISIBLE = 2 // Total occlusion - }; - - protected: - - /** \brief Dimension of a circular buffer which keep tracks of filter parameters along time */ - int MAX_SIZE; - - /** \brief Track ID */ - const int id_; - - /** \brief Track frame id (frame id of the last detection associated to the track */ - const std::string frame_id_; - - /** \brief Inverse of the frame rate */ - const double period_; - - /** \brief If true, the track is validated, meaning that it has been associated with a certain number of high confidence detections */ - bool validated_; - - /** \brief Kalman filter associated to the track */ - open_ptrack::tracking::KalmanFilter* filter_; - - /** \brief Temporary copy of the Kalman filter associated to the track (used for recovery filter information when a track is re-found) */ - open_ptrack::tracking::KalmanFilter* tmp_filter_; - - /** \brief First time a detection is associated to the track */ - ros::Time first_time_detected_; - - /** \brief Last time a detection is associated to the track */ - ros::Time last_time_detected_; - - /** \brief Last time a detection with high detection confidence is associated to the track */ - ros::Time last_time_detected_with_high_confidence_; - - /** \brief Last time a prediction has been performed for the track */ - ros::Time last_time_predicted_; - - /** \brief Index in the circular buffer corresponding to the last time a prediction has been performed */ - int last_time_predicted_index_; - - /** Variables used for computing the detection/track Mahalanobis distance */ - std::vector mahalanobis_map2d_; - - /** Variables used for computing the detection/track Mahalanobis distance */ - std::vector mahalanobis_map4d_; - - /** \brief Track Status*/ - Status status_; - - /** \brief Track Visibility */ - Visibility visibility_; - - /** \brief Number of high confidence detections associated to the track */ - int updates_with_enough_confidence_; - - /** \brief Track centroid z coordinate */ - double z_; - - /** \brief Track height */ - double height_; - - /** \brief Track distance from the camera */ - double distance_; - - /** \brief Track age (in seconds) */ - double age_; - - /** \brief Confidence of the last detection associated to the track */ - double last_detector_confidence_; - - /** \brief Last data association score obtained by this track */ - double data_association_score_; - - /** \brief Color associated to the track */ - Eigen::Vector3f color_; - - /** \brief DetectionSource which provided the last detection associated to the track */ - open_ptrack::detection::DetectionSource* detection_source_; - - /** \brief If true, both position and velocity are considered in computing detection<->track Mahalanobis distance */ - bool velocity_in_motion_term_; - - /** \brief Count the number of consecutive updates with low confidence detections */ - int low_confidence_consecutive_frames_; - - public: - - /** \brief Constructor. */ - Track( - int id, - std::string frame_id, - double position_variance, - double acceleration_variance, - double period, - bool velocity_in_motion_term); - - /** \brief Destructor. */ - virtual ~Track(); - - /** \brief Track initialization with an old track. */ - void - init(const Track& old_track); - - /** - * \brief Track initialization. - * - * \param[in] x Track centroid x coordinate - * \param[in] y Track centroid y coordinate - * \param[in] z Track centroid z coordinate - * \param[in] height Track height - * \param[in] distance Track distance from the sensor - * \param[in] detection_source DetectionSource which provided the last detection associated to the track - */ - void - init( - double x, - double y, - double z, - double height, - double distance, - open_ptrack::detection::DetectionSource* detection_source); - - /** - * \brief Update track with new detection information. - * - * \param[in] x Detection centroid x coordinate - * \param[in] y Detection centroid y coordinate - * \param[in] z Detection centroid z coordinate - * \param[in] height Detection height - * \param[in] distance Detection distance from the sensor - * \param[in] confidence Detection confidence - * \param[in] min_confidence Minimum confidence for track initialization - * \param[in] min_confidence_detections Minimum confidence for detection - * \param[in] detection_source DetectionSource which provided the detection - */ - void - update( - double x, - double y, - double z, - double height, - double distance, - double data_assocation_score, - double confidence, - double min_confidence, - double min_confidence_detections, - open_ptrack::detection::DetectionSource* detection_source, - bool first_update = false); - - /** - * \brief Compute Mahalanobis distance between detection with position (x,y) and track. - * - * \param[in] x Detection centroid x coordinate. - * \param[in] y Detection centroid y coordinate. - * \param[in] when Time instant. - * - * \return the Mahalanobis distance. - */ - double - getMahalanobisDistance(double x, double y, const ros::Time& when); - - /* Validate a track */ - void - validate(); - - /** - * \brief Get track validation flag - * - * \return true if the track has been validated, false otherwise. - */ - bool - isValidated(); - - /** - * \brief Get track ID - * - * \return track ID - */ - int - getId(); - - /** - * \brief Set track status to s - * - * \param[in] s status - */ - void - setStatus(Status s); - - /** - * \brief Get track status - * - * \return track status - */ - Status - getStatus(); - - /** - * \brief Set track Visibility. - * - * \param[in] v Visibility status. - */ - void - setVisibility(Visibility v); - - /** - * \brief Get track Visibility. - * - * \return track Visibility. - */ - Visibility - getVisibility(); - - /** - * \brief Get time passed from first detection-track association. - * - * \return time passed from first detection-track association. - */ - float - getSecFromFirstDetection(ros::Time current_time); - - /** - * \brief Get time passed from last detection-track association. - * - * \return time passed from last detection-track association. - */ - float - getSecFromLastDetection(ros::Time current_time); - - /** - * \brief Get time passed from last detection-track association with a high confidence detection. - * - * \return time passed from last detection-track association with a high confidence detection. - */ - float - getSecFromLastHighConfidenceDetection(ros::Time current_time); - - /** - * \brief Get the number of consecutive updates with low confidence detections. - * - * \return the number of consecutive updates with low confidence detections. - */ - float - getLowConfidenceConsecutiveFrames(); - - /** - * \brief Get the number of updates with enough confidence detections. - * - * \return the number of updates with enough confidence detections. - */ - int - getUpdatesWithEnoughConfidence(); - - /** - * \brief Draw track bounding box in the image. - * - * \param[in] vertical States if the camera is vertically oriented (true) or not (false). - */ - void - draw(bool vertical); - - /** - * \brief Create RViz visualization marker with the track position. - * - * \param[in/out] msg Array containing markers of every track. - */ - void - createMarker(visualization_msgs::MarkerArray::Ptr& msg); - - /** - * \brief Get a XYZRGB point from a point cloud. - * - * \param[in/out] p Point containing position information and to be filled with color. - * - * \return true if track is visible, false if not visible. - */ - bool - getPointXYZRGB(pcl::PointXYZRGB& p); - - /** - * \brief Create track ROS message. - * - * \param[in/out] track_msg Track ROS message. - * \param[in] vertical States if the camera is vertically oriented (true) or not (false). - */ - void - toMsg(opt_msgs::Track& track_msg, bool vertical); - - /** - * \brief Get the DetectionSource corresponding to the last associated detection. - * - * \return the DetectionSource corresponding to the last associated detection. - */ - open_ptrack::detection::DetectionSource* - getDetectionSource(); - - /** - * \brief Set flag stating if people velocity should be used in motion term for data association - * - * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association - * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) - * \param[in] position_variance Position variance (for Kalman Filter) - */ - void - setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); - - /** - * \brief Set acceleration variance (for Kalman Filter) - * - * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) - */ - void - setAccelerationVariance (double acceleration_variance); - - /** - * \brief Set position variance (for Kalman Filter) - * - * \param[in] position_variance Position variance (for Kalman Filter) - */ - void - setPositionVariance (double position_variance); - }; - - } /* namespace tracking */ -} /* namespace open_ptrack */ -#endif /* OPEN_PTRACK_TRACKING_TRACK_H_ */ diff --git a/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h b/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h deleted file mode 100644 index a275acd..0000000 --- a/tracking/launch/tracking/include/open_ptrack/tracking/tracker.h +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#ifndef OPEN_PTRACK_TRACKING_TRACKER_H_ -#define OPEN_PTRACK_TRACKING_TRACKER_H_ - -#include -#include -#include -#include -#include -#include - -namespace open_ptrack -{ - namespace tracking - { - /** \brief Tracker performs tracking-by-detection */ - class Tracker - { - protected: - /** \brief List of all active tracks */ - std::list tracks_; - - /** \brief List of lost tracks */ - std::list lost_tracks_; - - /** \brief List of tracks with Status = NEW */ - std::list new_tracks_; - - /** \brief List of current detections */ - std::vector detections_; - - /** \brief List of current detections not associated to any track */ - std::list unassociated_detections_; - - /** \brief Track ID counter */ - int tracks_counter_; - - /** \brief World reference frame used for tracking */ - std::string world_frame_id_; - - /** \brief Minimum confidence for track initialization */ - double min_confidence_; - - /** \brief Minimum confidence of detections sent to tracking */ - const double min_confidence_detections_; - - /** \brief Minimum number of detection<->track associations needed for validating a track */ - int detections_to_validate_; - - /** \brief Time after which a not visible track becomes old */ - double sec_before_old_; - - /** \brief Time after which a visible track obtain NORMAL status */ - double sec_remain_new_; - - /** \brief Time within which a track should be validated (otherwise it is discarded) */ - double sec_before_fake_; - - /** \brief Gate distance for joint likelihood in data association */ - double gate_distance_; - - /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ - bool detector_likelihood_; - - /** \brief Weights for the single terms of the joint likelihood */ - std::vector likelihood_weights_; - - /** \brief If true, people velocity is also used in motion term for data association */ - bool velocity_in_motion_term_; - - /** \brief Minimum time period between two detections messages */ - const double period_; - - /** \brief Position variance (for Kalman Filter) */ - double position_variance_; - - /** \brief Acceleration variance (for Kalman Filter) */ - double acceleration_variance_; - - /** \brief Flag enabling debug mode */ - const bool debug_mode_; - - /** \brief Detections<->tracks distance matrix for data association */ - cv::Mat_ distance_matrix_; - - /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - cv::Mat_ cost_matrix_; - - /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ - bool vertical_; - - /** \brief Create detections<->tracks distance matrix for data association */ - void - createDistanceMatrix(); - - /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - void - createCostMatrix(); - - /** \brief Update tracks associated to a detection in the current frame */ - void - updateDetectedTracks(); - - /** \brief Fill list containing unassociated detections */ - void - fillUnassociatedDetections(); - - /** \brief Create new tracks with high confidence unassociated detections */ - void - createNewTracks(); - - /** \brief Create a new track with detection information */ - int - createNewTrack(open_ptrack::detection::Detection& detection); - - /** \brief Update lost tracks */ - void - updateLostTracks(); - - public: - /** \brief Constructor */ - Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, - double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, - double sec_remain_new, int detections_to_validate, double period, double position_variance, - double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); - - /** \brief Destructor */ - virtual ~Tracker(); - - /** - * \brief Initialization when a new set of detections arrive. - * - * \param[in] detections Vector of current detections. - * - */ - void - newFrame(const std::vector& detections); - - /** - * \brief Update the list of tracks according to the current set of detections. - */ - void - updateTracks(); - -// /** -// * \brief Draw the tracks into the RGB image given by its sensor. -// */ -// void -// drawRgb(); - - /** - * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance - * of its centroid) and its number. - * - * \param[in] msg The MarkerArray message to fill. - */ - void - toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); - - /** - * \brief Writes the state of each track into a TrackArray message. - * - * \param[in] msg The TrackArray message to fill. - */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg); - - /** - * \brief Writes the state of tracks with a given frame id into a TrackArray message. - * - * \param[in] msg The TrackArray message to fill. - * \param[in] source_frame_id Frame id of tracks that have to be written to msg. - */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); - - /** - * \brief Writes the ID of each alive track into an IDArray message. - * - * \param[in] msg The IDArray message to fill. - */ - void - getAliveIDs (opt_msgs::IDArray::Ptr& msg); - - /** - * \brief Appends the location of each track to a point cloud starting from starting_index (using - * a circular array) - * - * \param[in] pointcloud The point cloud where to append the locations. - * \param[in] starting_index The starting index of the array. - * \param[in] max_size The maximum size of the point cloud (when reached the points overwrite the initial ones) - * - * \return the new starting_index. - */ - size_t - appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, - size_t starting_index, size_t max_size); - - /** - * \brief Set minimum confidence for track initialization - * - * \param[in] min_confidence Minimum confidence for track initialization - */ - void - setMinConfidenceForTrackInitialization (double min_confidence); - - /** - * \brief Set time after which a not visible track becomes old - * - * \param[in] sec_before_old Time after which a not visible track becomes old - */ - void - setSecBeforeOld (double sec_before_old); - - /** - * \brief Set time within which a track should be validated (otherwise it is discarded) - * - * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) - */ - void - setSecBeforeFake (double sec_before_fake); - - /** - * \brief Set time after which a visible track obtain NORMAL status - * - * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status - */ - void - setSecRemainNew (double sec_remain_new); - - /** - * \brief Set minimum number of detection<->track associations needed for validating a track - * - * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track - */ - void - setDetectionsToValidate (int detections_to_validate); - - /** - * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) - * - * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) - */ - void - setDetectorLikelihood (bool detector_likelihood); - - /** - * \brief Set likelihood weights for data association - * - * \param[in] detector_weight Weight for detector likelihood - * \param[in] motion_weight Weight for motion likelihood - */ - void - setLikelihoodWeights (double detector_weight, double motion_weight); - - /** - * \brief Set flag stating if people velocity should be used in motion term for data association - * - * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association - * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) - * \param[in] position_variance Position variance (for Kalman Filter) - */ - void - setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); - - /** - * \brief Set acceleration variance (for Kalman Filter) - * - * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) - */ - void - setAccelerationVariance (double acceleration_variance); - - /** - * \brief Set position variance (for Kalman Filter) - * - * \param[in] position_variance Position variance (for Kalman Filter) - */ - void - setPositionVariance (double position_variance); - - /** - * \brief Set gate distance for joint likelihood in data association - * - * \param[in] gate_distance Gate distance for joint likelihood in data association. - */ - void - setGateDistance (double gate_distance); - }; - - } /* namespace tracking */ -} /* namespace open_ptrack */ - -#endif /* OPEN_PTRACK_TRACKING_TRACKER_H_ */ diff --git a/tracking/launch/tracking/launch/detection_and_tracking.launch b/tracking/launch/tracking/launch/detection_and_tracking.launch deleted file mode 100644 index 39f7fae..0000000 --- a/tracking/launch/tracking/launch/detection_and_tracking.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch b/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch deleted file mode 100644 index 317f7a8..0000000 --- a/tracking/launch/tracking/launch/detection_and_tracking_kinect2.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/detection_and_tracking_sr.launch b/tracking/launch/tracking/launch/detection_and_tracking_sr.launch deleted file mode 100644 index b755a9c..0000000 --- a/tracking/launch/tracking/launch/detection_and_tracking_sr.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch b/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch deleted file mode 100644 index 449f827..0000000 --- a/tracking/launch/tracking/launch/detection_and_tracking_stereo.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/detection_and_tracking_zed.launch b/tracking/launch/tracking/launch/detection_and_tracking_zed.launch deleted file mode 100644 index e5be647..0000000 --- a/tracking/launch/tracking/launch/detection_and_tracking_zed.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/tracker.launch b/tracking/launch/tracking/launch/tracker.launch deleted file mode 100644 index 57e1ccc..0000000 --- a/tracking/launch/tracking/launch/tracker.launch +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/tracker_network.launch b/tracking/launch/tracking/launch/tracker_network.launch deleted file mode 100644 index e8427d2..0000000 --- a/tracking/launch/tracking/launch/tracker_network.launch +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/tracker_sr.launch b/tracking/launch/tracking/launch/tracker_sr.launch deleted file mode 100644 index 88d56ca..0000000 --- a/tracking/launch/tracking/launch/tracker_sr.launch +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/launch/tracking_node.launch b/tracking/launch/tracking/launch/tracking_node.launch deleted file mode 100644 index a7af869..0000000 --- a/tracking/launch/tracking/launch/tracking_node.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/tracking/launch/tracking/package.xml b/tracking/launch/tracking/package.xml deleted file mode 100644 index feb082f..0000000 --- a/tracking/launch/tracking/package.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - tracking - 0.0.0 - Module containing object/people tracking algorithms. - - Matteo Munaro - - BSD - - Matteo Munaro > - - catkin - - cmake_modules - roscpp - sensor_msgs - std_msgs - opencv2 - opt_msgs - rospy - tf - tf_conversions - visualization_msgs - bayes - detection - pcl_ros - opt_utils - dynamic_reconfigure - - roscpp - sensor_msgs - std_msgs - opencv2 - opt_msgs - rospy - tf - tf_conversions - visualization_msgs - bayes - detection - pcl_ros - opt_utils - dynamic_reconfigure - - diff --git a/tracking/launch/tracking/src/kalman_filter.cpp b/tracking/launch/tracking/src/kalman_filter.cpp deleted file mode 100644 index 9efd8d1..0000000 --- a/tracking/launch/tracking/src/kalman_filter.cpp +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#include - -#include - -namespace open_ptrack -{ - namespace tracking - { - - PredictModel::PredictModel(double dt, double acceleration_variance) : - Bayesian_filter::Linear_predict_model(4, 2), dt_(dt) - { - for(size_t i = 0; i < 4; i++) - for(size_t j = 0; j < 4; j++) - Fx(i, j) = 0.0; - - Fx(0, 0) = 1.0; - Fx(1, 1) = 1.0; - Fx(2, 2) = 1.0; - Fx(3, 3) = 1.0; - Fx(0, 2) = dt; - Fx(1, 3) = dt; - - q[0] = acceleration_variance; - q[1] = acceleration_variance; - - for(size_t i = 0; i < 4; i++) - for(size_t j = 0; j < 2; j++) - G(i, j) = 0.0; - - G(0, 0) = dt * dt / 2.0; - G(1, 1) = dt * dt / 2.0; - G(2, 0) = dt; - G(3, 1) = dt; - } - - PredictModel::~PredictModel() - { - - } - - ObserveModel::ObserveModel(double position_variance, int output_dimension) : - Bayesian_filter::Linear_uncorrelated_observe_model(4, output_dimension), position_variance_(position_variance) - { - - for(size_t i = 0; i < output_dimension; i++) - for(size_t j = 0; j < 4; j++) - Hx(i, j) = 0.0; - - Hx(0, 0) = 1.0; - Hx(1, 1) = 1.0; - - Zv[0] = position_variance_; - Zv[1] = position_variance_; - if (output_dimension == 4) - { - Hx(2, 2) = 1.0; - Hx(3, 3) = 1.0; - Zv[2] = 16 * position_variance_; - Zv[3] = 16 * position_variance_; - } - - } - - ObserveModel::~ObserveModel() - { - - } - - KalmanFilter::KalmanFilter(double dt, double position_variance, double acceleration_variance, int output_dimension) : - dt_(dt), position_variance_(position_variance), depth_multiplier_(std::pow(0.005 / 1.96, 2)), - acceleration_variance_(acceleration_variance), output_dimension_(output_dimension) - { - predict_model_ = new PredictModel(dt, acceleration_variance); - observe_model_ = new ObserveModel(position_variance, output_dimension); - filter_ = new Bayesian_filter::Unscented_scheme(4); - } - - KalmanFilter::KalmanFilter(const KalmanFilter& orig) - { - *this = orig; - } - - KalmanFilter& - KalmanFilter::operator=(const KalmanFilter& orig) - { - this->dt_ = orig.dt_; - this->position_variance_ = orig.position_variance_; - this->depth_multiplier_ = orig.depth_multiplier_; - this->acceleration_variance_ = orig.acceleration_variance_; - delete this->predict_model_; - delete this->observe_model_; - delete this->filter_; - this->predict_model_ = new PredictModel(dt_, acceleration_variance_); - this->observe_model_ = new ObserveModel(position_variance_, output_dimension_); - this->filter_ = new Bayesian_filter::Unscented_scheme(4, 2); - - this->filter_->s.resize(orig.filter_->s.size(), false); - for(size_t i = 0; i < orig.filter_->s.size(); i++) - { - this->filter_->s(i) = orig.filter_->s(i); - } - - this->filter_->S.resize(orig.filter_->S.size1(), orig.filter_->S.size2(), false); - for(size_t i = 0; i < orig.filter_->S.size1(); i++) - { - for(size_t j = 0; j < orig.filter_->S.size2(); j++) - { - this->filter_->S(i, j) = orig.filter_->S(i, j); - } - } - - this->filter_->SI.resize(orig.filter_->SI.size1(), orig.filter_->SI.size2(), false); - for(size_t i = 0; i < orig.filter_->SI.size1(); i++) - { - for(size_t j = 0; j < orig.filter_->SI.size2(); j++) - { - this->filter_->SI(i, j) = orig.filter_->SI(i, j); - } - } - - this->filter_->x.resize(orig.filter_->x.size(), false); - for(size_t i = 0; i < orig.filter_->x.size(); i++) - { - this->filter_->x(i) = orig.filter_->x(i); - } - - this->filter_->X.resize(orig.filter_->X.size1(), orig.filter_->X.size2(), false); - for(size_t i = 0; i < orig.filter_->X.size1(); i++) - { - for(size_t j = 0; j < orig.filter_->X.size2(); j++) - { - this->filter_->X(i, j) = orig.filter_->X(i, j); - } - } - - this->filter_->XX.resize(orig.filter_->XX.size1(), orig.filter_->XX.size2(), false); - for(size_t i = 0; i < orig.filter_->XX.size1(); i++) - { - for(size_t j = 0; j < orig.filter_->XX.size2(); j++) - { - this->filter_->XX(i, j) = orig.filter_->XX(i, j); - } - } - - return *this; - } - - KalmanFilter::~KalmanFilter() - { - delete predict_model_; - delete observe_model_; - delete filter_; - } - - void - KalmanFilter::init(double x, double y, double distance, bool velocity_in_motion_term) - { - - Bayesian_filter_matrix::Vec state(4); - Bayesian_filter_matrix::SymMatrix cov(4, 4); - - state[0] = x; - state[1] = y; - state[2] = 0.0; - state[3] = 0.0; - - for(size_t i = 0; i < 4; i++) - for(size_t j = 0; j < 4; j++) - cov(i, j) = 0.0; - - cov(2, 2) = 100; //1000.0; - cov(3, 3) = 100; //1000.0; - - // Filter initialization: - filter_->init_kalman(state, cov); - - // First update: - if (velocity_in_motion_term) - update(x, y, 0, 0, distance); - else - update(x, y, distance); - - } - - void - KalmanFilter::predict() - { - filter_->predict(*predict_model_); - } - - void - KalmanFilter::predict(double& x, double& y, double& vx, double& vy) - { - predict(); - - x = filter_->x[0]; - y = filter_->x[1]; - vx = filter_->x[2]; - vy = filter_->x[3]; - } - - void - KalmanFilter::update() - { - filter_->update(); - //filter_->update_XX(2.0); - } - - void - KalmanFilter::update(double x, double y, double distance) - { - - Bayesian_filter_matrix::Vec observation(2); - observation[0] = x; - observation[1] = y; - - //printf("%d %f %f %f ", _id, x, y, height); - - observe_model_->Zv[0] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; - observe_model_->Zv[1] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; - - filter_->observe(*observe_model_, observation); - filter_->update(); - //filter_->update_XX(2.0); - - } - - void - KalmanFilter::update(double x, double y, double vx, double vy, double distance) - { - - Bayesian_filter_matrix::Vec observation(4); - observation[0] = x; - observation[1] = y; - observation[2] = vx; - observation[3] = vy; - - //printf("%d %f %f %f ", _id, x, y, height); - - // observe_model_->Zv[0] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; - // observe_model_->Zv[1] = position_variance_ + std::pow(distance, 4) * depth_multiplier_; - // observe_model_->Zv[2] = 16 * position_variance_ + std::pow(distance, 4) * depth_multiplier_; - // observe_model_->Zv[3] = 16 * position_variance_ + std::pow(distance, 4) * depth_multiplier_; - - filter_->observe(*observe_model_, observation); - filter_->update(); - //filter_->update_XX(2.0); - - } - - void - KalmanFilter::getMahalanobisParameters(MahalanobisParameters2d& mp) - { - mp.SI = filter_->SI; - mp.x = filter_->x[0]; - mp.y = filter_->x[1]; - } - - void - KalmanFilter::getMahalanobisParameters(MahalanobisParameters4d& mp) - { - mp.SI = filter_->SI; - mp.x = filter_->x[0]; - mp.y = filter_->x[1]; - mp.vx = filter_->x[2]; - mp.vy = filter_->x[3]; - } - - double - KalmanFilter::performMahalanobisDistance(double x, double y, const MahalanobisParameters2d& mp) - { - Bayesian_filter_matrix::Vec v(2); - v[0] = x - mp.x; - v[1] = y - mp.y; - return Bayesian_filter_matrix::prod_SPDT(v, mp.SI); - } - - double - KalmanFilter::performMahalanobisDistance(double x, double y, double vx, double vy, const MahalanobisParameters4d& mp) - { - Bayesian_filter_matrix::Vec v(4); - v[0] = x - mp.x; - v[1] = y - mp.y; - v[2] = vx - mp.vx; - v[3] = vy - mp.vy; - - // Symmetric Positive (Semi) Definite multiply: p = v'*(mp.SI)*v - return Bayesian_filter_matrix::prod_SPDT(v, mp.SI); - } - - Bayesian_filter::FM::SymMatrix - KalmanFilter::getInnovationCovariance() - { - return filter_->SI; - } - - void - KalmanFilter::getState(double& x, double& y, double& vx, double& vy) - { - x = filter_->x[0]; - y = filter_->x[1]; - vx = filter_->x[2]; - vy = filter_->x[3]; - } - - void - KalmanFilter::getState(double& x, double& y) - { - x = filter_->x[0]; - y = filter_->x[1]; - } - - void - KalmanFilter::setPredictModel (double acceleration_variance) - { - acceleration_variance_ = acceleration_variance; - predict_model_ = new PredictModel(dt_, acceleration_variance_); - } - - void - KalmanFilter::setObserveModel (double position_variance) - { - position_variance_ = position_variance; - observe_model_ = new ObserveModel(position_variance_, output_dimension_); - } - } /* namespace tracking */ -} /* namespace open_ptrack */ diff --git a/tracking/launch/tracking/src/munkres.cpp b/tracking/launch/tracking/src/munkres.cpp deleted file mode 100644 index 5445ad6..0000000 --- a/tracking/launch/tracking/src/munkres.cpp +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it] - * - */ - -#include "open_ptrack/tracking/munkres.h" - -namespace open_ptrack -{ - namespace tracking - { - using namespace std; - - void - Munkres::step_one(double** matrix, int rows, int cols, int& step) { - double min_in_row; - - for (int r = 0; r < rows; r++) { - min_in_row = matrix[r][0]; - for (int c = 0; c < cols; c++) - if (matrix[r][c] < min_in_row) - min_in_row = matrix[r][c]; - for (int c = 0; c < cols; c++) - matrix[r][c] -= min_in_row; - } - step = 2; - } - - void - Munkres::step_two(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, - int& step) { - for (int r = 0; r < rows; r++) - for (int c = 0; c < cols; c++) { - if (matrix[r][c] == 0 && rowCover[r] == 0 && colCover[c] == 0) { - m[r][c] = 1; - rowCover[r] = 1; - colCover[c] = 1; - } - } - for (int r = 0; r < rows; r++) - rowCover[r] = 0; - for (int c = 0; c < cols; c++) - colCover[c] = 0; - step = 3; - } - - void - Munkres::step_three(int rows, int cols, int* colCover, double** m, int& step) { - int colcount; - for (int r = 0; r < rows; r++) - for (int c = 0; c < cols; c++) - if (m[r][c] == 1) - colCover[c] = 1; - - colcount = 0; - for (int c = 0; c < cols; c++) - if (colCover[c] == 1) - colcount += 1; - if (colcount >= rows || colcount >= cols) - step = 7; - else - step = 4; - } - - void - Munkres::find_a_zero(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& row, - int& col) { - int r = 0; - int c; - bool done; - row = -1; - col = -1; - done = false; - while (!done) { - c = 0; - while (true) { - if (matrix[r][c] == 0 && rowCover[r] == 0 && colCover[c] == 0) { - row = r; - col = c; - done = true; - } - c += 1; - if (c >= cols || done) - break; - } - r += 1; - if (r >= rows) - done = true; - } - } - - bool - Munkres::star_in_row(double** m, int rows, int cols, int row) { - bool tmp = false; - for (int c = 0; c < cols; c++) - if (m[row][c] == 1) - tmp = true; - return tmp; - } - - void - Munkres::find_star_in_row(double** m, int rows, int cols, int row, int& col) { - col = -1; - for (int c = 0; c < cols; c++) - if (m[row][c] == 1) - col = c; - } - - void - Munkres::step_four(double** matrix, int rows, int cols, int* rowCover, int* colCover, double** m, - int& path_row_0, int& path_col_0, int& step) { - int row = -1; - int col = -1; - bool done; - - done = false; - while (!done) { - find_a_zero(matrix, rows, cols, rowCover, colCover, row, col); - if (row == -1) { - done = true; - step = 6; - } else { - m[row][col] = 2; - if (star_in_row(m, rows, cols, row)) { - find_star_in_row(m, rows, cols, row, col); - rowCover[row] = 1; - colCover[col] = 0; - } else { - done = true; - step = 5; - path_row_0 = row; - path_col_0 = col; - } - } - } - } - - void - Munkres::find_star_in_col(int rows, int cols, double** m, int c, int& r) { - r = -1; - for (int i = 0; i < rows; i++) - if (m[i][c] == 1) - r = i; - } - - void - Munkres::find_prime_in_row(int rows, int cols, double** m, int r, int& c) { - for (int j = 0; j < cols; j++) - if (m[r][j] == 2) - c = j; - } - - void - Munkres::augment_path(double** m, int path_count, int** path) { - for (int p = 0; p < path_count; p++) - if (m[path[p][0]][path[p][1]] == 1) - m[path[p][0]][path[p][1]] = 0; - else - m[path[p][0]][path[p][1]] = 1; - } - - void - Munkres::clear_covers(int rows, int cols, int* rowCover, int* colCover) { - for (int r = 0; r < rows; r++) - rowCover[r] = 0; - for (int c = 0; c < cols; c++) - colCover[c] = 0; - } - - void - Munkres::erase_primes(int rows, int cols, double** m) { - for (int r = 0; r < rows; r++) - for (int c = 0; c < cols; c++) - if (m[r][c] == 2) - m[r][c] = 0; - } - - void - Munkres::step_five(int rows, int cols, int* rowCover, int* colCover, double** m, int& path_row_0, - int& path_col_0, int& path_count, int** path, int& step) { - bool done; - int r = -1; - int c = -1; - - path_count = 1; - path[path_count - 1][0] = path_row_0; - path[path_count - 1][1] = path_col_0; - done = false; - while (!done) { - find_star_in_col(rows, cols, m, path[path_count - 1][1], r); - if (r > -1) { - path_count += 1; - path[path_count - 1][0] = r; - path[path_count - 1][1] = path[path_count - 2][1]; - } else - done = true; - if (!done) { - find_prime_in_row(rows, cols, m, path[path_count - 1][0], c); - path_count += 1; - path[path_count - 1][0] = path[path_count - 2][0]; - path[path_count - 1][1] = c; - } - } - augment_path(m, path_count, path); - clear_covers(rows, cols, rowCover, colCover); - erase_primes(rows, cols, m); - step = 3; - } - - void - Munkres::find_smallest(double** matrix, int rows, int cols, int* rowCover, int* colCover, - double& minval) { - for (int r = 0; r < rows; r++) - for (int c = 0; c < cols; c++) - if (rowCover[r] == 0 && colCover[c] == 0) - if (minval > matrix[r][c]) - minval = matrix[r][c]; - } - - void - Munkres::step_six(double** matrix, int rows, int cols, int* rowCover, int* colCover, int& step) { - double minval = INFINITY; - find_smallest(matrix, rows, cols, rowCover, colCover, minval); - for (int r = 0; r < rows; r++) - for (int c = 0; c < cols; c++) { - if (rowCover[r] == 1) - matrix[r][c] += minval; - if (colCover[c] == 0) - matrix[r][c] -= minval; - } - step = 4; - } - - double** - Munkres::preprocess(cv::Mat& matrix_in, bool max) - { - // Pad input matrix in order to make it square: - int max_dim = MAX(matrix_in.rows, matrix_in.cols); - cv::Mat matrix_in_padded(max_dim, max_dim, CV_64F, 1000000.0); // padding values are very high numbers (not valid for association) - for (int i = 0; i < matrix_in.rows; i++) - { - for (int j = 0; j < matrix_in.cols; j++) - { - matrix_in_padded.at(i,j) = matrix_in.at(i,j); - } - } - - // Conversion from cv::Mat to array: - int rows = matrix_in_padded.rows; - int cols = matrix_in_padded.cols; - double** matrix = new double*[rows]; - for (int i = 0; i < rows; i++) - { - matrix[i] = new double[cols]; - for (int j = 0; j < cols; j++) - { - matrix[i][j] = matrix_in_padded.at(i,j); - } - } - - // Change cost matrix according to the type of optimum to find (minimum or maximum): - if (max == true) { - double maxValue = matrix[0][0]; - for (int i = 0; i < rows; i++) { - for (int j = 0; j < cols; j++) { - if (matrix[i][j] > maxValue) { - maxValue = matrix[i][j]; - } - } - } - - for (int i = 0; i < rows; i++) { - for (int j = 0; j < cols; j++) { - matrix[i][j] = maxValue - matrix[i][j]; - } - } - } - - return matrix; - } - - cv::Mat - Munkres::solve(cv::Mat& matrix_in, bool max) - { - // Preprocessing: - double** matrix = preprocess(matrix_in, max); - - int max_dim = MAX(matrix_in.rows, matrix_in.cols); - int rows = max_dim; - int cols = max_dim; - - bool done = false; - int step = 1; - int* rowCover = new int[rows]; - int* colCover = new int[cols]; - double** m = new double*[rows]; - int path_row_0 = 0; - int path_col_0 = 0; - int path_count = 0; - for (int j = 0; j < cols; j++) { - colCover[j] = 0; - } - for (int i = 0; i < rows; i++) { - rowCover[i] = 0; - m[i] = new double[cols]; - for (int j = 0; j < cols; j++) { - m[i][j] = 0; - } - } - int** path = new int*[rows*cols]; - for (int i = 0; i < rows*cols; i++) { - path[i] = new int[2]; - } - - // Main loop: - while (!done) { - switch (step) { - case 1: - step_one(matrix, rows, cols, step); - break; - case 2: - step_two(matrix, rows, cols, rowCover, colCover, m, step); - break; - case 3: - step_three(rows, cols, colCover, m, step); - break; - case 4: - step_four(matrix, rows, cols, rowCover, colCover, m, path_row_0, path_col_0, - step); - break; - case 5: - step_five(rows, cols, rowCover, colCover, m, path_row_0, path_col_0, - path_count, path, step); - break; - case 6: - step_six(matrix, rows, cols, rowCover, colCover, step); - break; - case 7: - done = true; - break; - } - } - - // Conversion from array to cv::Mat: - cv::Mat matrix_out(matrix_in.rows, matrix_in.cols, CV_64F); - for (int i = 0; i < matrix_in.rows; i++) - { - for (int j = 0; j < matrix_in.cols; j++) - { - matrix_out.at(i,j) = m[i][j] - 1; - } - } - - // Releasing the memory - for(int r = 0; r < max_dim; ++r) - { - delete []matrix[r]; - } - delete []matrix; - delete []rowCover; - delete []colCover; - for (int i = 0; i < rows; i++) { - delete []m[i]; - } - delete []m; - for (int i = 0; i < rows*cols; i++) { - delete []path[i]; - } - delete []path; - - return matrix_out; - } - - } /* namespace tracking */ -} /* namespace open_ptrack */ - diff --git a/tracking/launch/tracking/src/track.cpp b/tracking/launch/tracking/src/track.cpp deleted file mode 100644 index 840c659..0000000 --- a/tracking/launch/tracking/src/track.cpp +++ /dev/null @@ -1,637 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#include - -#include - -namespace open_ptrack -{ - namespace tracking - { - - Track::Track( - int id, - std::string frame_id, - double position_variance, - double acceleration_variance, - double period, - bool velocity_in_motion_term) : - id_(id), - frame_id_(frame_id), - period_(period), - velocity_in_motion_term_(velocity_in_motion_term) - { - color_ = Eigen::Vector3f( - float(rand() % 256) / 255, - float(rand() % 256) / 255, - float(rand() % 256) / 255); - - MAX_SIZE = 90; //XXX create a parameter!!! - if (velocity_in_motion_term_) - { - filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); - tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); - mahalanobis_map4d_.resize(MAX_SIZE, MahalanobisParameters4d()); - } - else - { - filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); - tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); - mahalanobis_map2d_.resize(MAX_SIZE, MahalanobisParameters2d()); - } - - } - - Track::~Track() - { - delete filter_; - delete tmp_filter_; - } - - void - Track::init(const Track& old_track) - { - double x, y; - old_track.filter_->getState(x, y); - - filter_->init(x, y, 10, old_track.velocity_in_motion_term_); - - *tmp_filter_ = *filter_; - visibility_ = old_track.visibility_; - - ROS_INFO("%d -> %d", old_track.id_, id_); - - z_ = old_track.z_; - height_ = old_track.height_; - distance_ = old_track.distance_; - age_ = old_track.age_; - - detection_source_ = old_track.detection_source_; - velocity_in_motion_term_ = old_track.velocity_in_motion_term_; - validated_ = validated_ || old_track.validated_; - low_confidence_consecutive_frames_ = old_track.low_confidence_consecutive_frames_; - - first_time_detected_ = old_track.first_time_detected_; - last_time_detected_ = old_track.last_time_detected_; - last_time_detected_with_high_confidence_ = old_track.last_time_detected_with_high_confidence_; - last_time_predicted_ = old_track.last_time_predicted_; - last_time_predicted_index_ = old_track.last_time_predicted_index_; - - data_association_score_ = old_track.data_association_score_; - } - - void - Track::init(double x, double y, double z, double height, double distance, - open_ptrack::detection::DetectionSource* detection_source) - { - //Init Kalman filter - filter_->init(x, y, distance, velocity_in_motion_term_); - z_ = z; - height_ = height; - distance_ = distance; - status_ = NEW; - visibility_ = VISIBLE; - validated_ = false; - updates_with_enough_confidence_ = low_confidence_consecutive_frames_ = 0; - detection_source_ = detection_source; - first_time_detected_ = detection_source->getTime(); - last_time_predicted_ = last_time_detected_ = last_time_detected_with_high_confidence_ = detection_source->getTime(); - last_time_predicted_index_ = 0; - age_ = 0.0; - } - - void - Track::update( - double x, - double y, - double z, - double height, - double distance, - double data_assocation_score, - double confidence, - double min_confidence, - double min_confidence_detections, - open_ptrack::detection::DetectionSource* detection_source, - bool first_update) - { - //Update Kalman filter - int difference; - double vx, vy; - if (velocity_in_motion_term_) - { - ros::Duration d(1.0); - ros::Duration d2(2.0); - - double t = std::max(first_time_detected_.toSec(), (detection_source->getTime() - d).toSec()); - t = std::min(t, last_time_detected_.toSec()); - t = std::max(t, (detection_source->getTime() - d2).toSec()); - double dt = t - last_time_predicted_.toSec(); - - difference = int(round(dt / period_)); - int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; - - if(difference != 0) - { - vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; - vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; - } - else - { - vx = mahalanobis_map4d_[vIndex].x; - vy = mahalanobis_map4d_[vIndex].x; - } - } - - // Update Kalman filter from the last time the track was visible: - int framesLost = int(round((detection_source->getTime() - last_time_detected_).toSec() / period_)) - 1; - - for(int i = 0; i < framesLost; i++) - { - filter_->predict(); - filter_->update(); - } - - filter_->predict(); - if (velocity_in_motion_term_) - { - filter_->update(x, y, vx, vy, distance); - } - else - { - filter_->update(x, y, distance); - } - - *tmp_filter_ = *filter_; - difference = int(round((detection_source->getTime() - last_time_predicted_).toSec() / period_)); - last_time_predicted_index_ = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; - last_time_predicted_ = last_time_detected_ = detection_source->getTime(); - if (velocity_in_motion_term_) - filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); - else - filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); - - // Update z_ and height_ with a weighted combination of current and new values: - z_ = z_ * 0.9 + z * 0.1; - height_ = height_ * 0.9 + height * 0.1; - distance_ = distance; - - if(confidence > min_confidence) - { - updates_with_enough_confidence_++; - last_time_detected_with_high_confidence_ = last_time_detected_; - } - -// if (((confidence - 0.5) < min_confidence_detections) && ((last_detector_confidence_ - 0.5) < min_confidence_detections)) - if ((confidence < (min_confidence + min_confidence_detections)/2) && (last_detector_confidence_ < (min_confidence + min_confidence_detections)/2)) - { - low_confidence_consecutive_frames_++; - } - else - { - low_confidence_consecutive_frames_ = 0; - } - last_detector_confidence_ = confidence; - - data_association_score_ = data_assocation_score; - - // Compute track age: - age_ = (detection_source->getTime() - first_time_detected_).toSec(); - - detection_source_ = detection_source; - } - - void - Track::validate() - { - validated_ = true; - } - - bool - Track::isValidated() - { - return validated_; - } - - int - Track::getId() - { - return id_; - } - - void - Track::setStatus(Track::Status s) - { - status_ = s; - } - - Track::Status - Track::getStatus() - { - return status_; - } - - void - Track::setVisibility(Track::Visibility v) - { - visibility_ = v; - } - - Track::Visibility - Track::getVisibility() - { - return visibility_; - } - - float - Track::getSecFromFirstDetection(ros::Time current_time) - { - return (current_time - first_time_detected_).toSec(); - } - - float - Track::getSecFromLastDetection(ros::Time current_time) - { - return (current_time - last_time_detected_).toSec(); - } - - float - Track::getSecFromLastHighConfidenceDetection(ros::Time current_time) - { - return (current_time - last_time_detected_with_high_confidence_).toSec(); - } - - float - Track::getLowConfidenceConsecutiveFrames() - { - return low_confidence_consecutive_frames_; - } - - int - Track::getUpdatesWithEnoughConfidence() - { - return updates_with_enough_confidence_; - } - - double - Track::getMahalanobisDistance(double x, double y, const ros::Time& when) - { - int difference = int(round((when - last_time_predicted_).toSec() / period_)); -// std::cout << "time difference from last detection: " << difference << std::endl; - int index; - if(difference <= 0) - { - index = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; - } - else - { - for(int i = 0; i < difference; i++) - { - tmp_filter_->predict(); - last_time_predicted_index_ = (last_time_predicted_index_ + 1) % MAX_SIZE; - if (velocity_in_motion_term_) - tmp_filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); - else - tmp_filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); - tmp_filter_->update(); - } - last_time_predicted_ = when; - index = last_time_predicted_index_; - } - - if (velocity_in_motion_term_) - { - ros::Duration d(1.0); - ros::Duration d2(2.0); - - double t = std::max(first_time_detected_.toSec(), (when - d).toSec()); - t = std::min(t, last_time_detected_.toSec()); - t = std::max(t, (last_time_predicted_ - d2).toSec()); - double dt = t - last_time_predicted_.toSec(); - - difference = int(round(dt / period_)); - int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; - -// std::cout << "dt: " << dt << std::endl; -// std::cout << "vIndex: " << vIndex << std::endl; - - double vx, vy; - if(difference != 0) - { - vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; - vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; - } - else - { - vx = mahalanobis_map4d_[vIndex].x; - vy = mahalanobis_map4d_[vIndex].x; - } - -// std::cout << "vx: " << vx << ", vy: " << vy<< std::endl; - - return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, vx, vy, mahalanobis_map4d_[index]); - } - else - { - return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, mahalanobis_map2d_[index]); - } - - } - - void - Track::draw(bool vertical) - { - cv::Scalar color(int(255.0 * color_(0)), int(255.0 * color_(1)), int(255.0 * color_(2))); - - double _x2, _y2; - tmp_filter_->getState(_x2, _y2); - Eigen::Vector3d centroid2(_x2, _y2, z_); - centroid2 = detection_source_->transformToCam(centroid2); - - if(visibility_ == Track::NOT_VISIBLE) - return; - - double _x, _y; - filter_->getState(_x, _y); - - cv::Scalar darkBlue(130,0,0); - cv::Scalar white(255,255,255); - Eigen::Vector3d centroid(_x, _y, z_ ); - Eigen::Vector3d top(_x, _y, z_ + (height_/2)); - Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); - - std::vector points; - double delta = height_ / 5.0; - points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ - (height_/2))); - points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ - (height_/2))); - points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ - (height_/2))); - points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ - (height_/2))); - points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ + (height_/2))); - points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ + (height_/2))); - points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ + (height_/2))); - points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ + (height_/2))); - - //TODO loop for each detection source - - centroid = detection_source_->transformToCam(centroid); - cv::circle(detection_source_->getImage(), cv::Point(centroid(0), centroid(1)), 5, color, 1); - top = detection_source_->transformToCam(top); - bottom = detection_source_->transformToCam(bottom); - for(std::vector::iterator it = points.begin(); it != points.end(); it++) - *it = detection_source_->transformToCam(*it); - - // Draw a paralellepiped around the person: - for(size_t i = 0; i < 4; i++) - { - cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), - cv::Point(points[(i + 1) % 4](0), points[(i + 1) % 4](1)), color, - visibility_ == VISIBLE ? 2 : 1, CV_AA); - cv::line(detection_source_->getImage(), cv::Point(points[i + 4](0), points[i + 4](1)), - cv::Point(points[(i + 1) % 4 + 4](0), points[(i + 1) % 4 + 4](1)), color, - visibility_ == VISIBLE ? 2 : 1, CV_AA); - cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), - cv::Point(points[i + 4](0), points[i + 4](1)), color, - visibility_ == VISIBLE ? 2 : 1, CV_AA); - } - - std::stringstream ss; - float distance_to_display = float(int(distance_*100))/100; - ss << id_ << ": " << distance_to_display; - - float id_half_number_of_digits = (float)(ss.str().size())/2; - - // Draw white id_ on a blue background: - if (not vertical) - { - cv::rectangle(detection_source_->getImage(), cv::Point(top(0)-8*id_half_number_of_digits, top(1)-12), - cv::Point(top(0)+12*id_half_number_of_digits, top(1) +2), darkBlue, CV_FILLED, - visibility_ == VISIBLE ? 8 : 1);//, 0); - cv::putText(detection_source_->getImage(), ss.str(), cv::Point(top(0)-8*id_half_number_of_digits, - top(1)), cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ - } - else - { - cv::Mat rotated_image = detection_source_->getImage(); - cv::flip(rotated_image.t(), rotated_image, -1); - cv::flip(rotated_image, rotated_image, 1); - cv::rectangle(rotated_image, cv::Point(top(1)-8*id_half_number_of_digits, (rotated_image.rows - top(0)+1)-12), - cv::Point(top(1) +12*id_half_number_of_digits, (rotated_image.rows - top(0)+1)+2), darkBlue, CV_FILLED, - visibility_ == VISIBLE ? 8 : 1);//, 0); - cv::putText(rotated_image, ss.str(), cv::Point(top(1)-8*id_half_number_of_digits, rotated_image.rows - top(0)+1), - cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ - cv::flip(rotated_image, rotated_image, -1); - cv::flip(rotated_image, rotated_image, 1); - rotated_image = rotated_image.t(); - detection_source_->setImage(rotated_image); - } - //TODO end loop - } - - void - Track::createMarker(visualization_msgs::MarkerArray::Ptr& msg) - { - if(visibility_ == Track::NOT_VISIBLE) - return; - - double _x, _y; - filter_->getState(_x, _y); - - visualization_msgs::Marker marker; - - marker.header.frame_id = frame_id_; - marker.header.stamp = ros::Time::now(); - - marker.ns = "people"; - marker.id = id_; - - marker.type = visualization_msgs::Marker::SPHERE; - marker.action = visualization_msgs::Marker::ADD; - - marker.pose.position.x = _x; - marker.pose.position.y = _y; - marker.pose.position.z = z_; - marker.pose.orientation.x = 0.0; - marker.pose.orientation.y = 0.0; - marker.pose.orientation.z = 0.0; - marker.pose.orientation.w = 1.0; - - marker.scale.x = 0.1; - marker.scale.y = 0.1; - marker.scale.z = 0.1; - - marker.color.r = color_(2); - marker.color.g = color_(1); - marker.color.b = color_(0); - marker.color.a = 1.0; - - marker.lifetime = ros::Duration(0.2); - - msg->markers.push_back(marker); - - /////////////////////////////////// - - visualization_msgs::Marker text_marker; - - text_marker.header.frame_id = frame_id_; - text_marker.header.stamp = ros::Time::now(); - - text_marker.ns = "numbers"; - text_marker.id = id_; - - text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; - text_marker.action = visualization_msgs::Marker::ADD; - - std::stringstream ss; - ss << id_; - text_marker.text = ss.str(); - - text_marker.pose.position.x = _x; - text_marker.pose.position.y = _y; - text_marker.pose.position.z = z_ + (height_/2) + 0.1; - text_marker.pose.orientation.x = 0.0; - text_marker.pose.orientation.y = 0.0; - text_marker.pose.orientation.z = 0.0; - text_marker.pose.orientation.w = 1.0; - - text_marker.scale.x = 0.2; - text_marker.scale.y = 0.2; - text_marker.scale.z = 0.2; - - text_marker.color.r = color_(2); - text_marker.color.g = color_(1); - text_marker.color.b = color_(0); - text_marker.color.a = 1.0; - - text_marker.lifetime = ros::Duration(0.2); - - msg->markers.push_back(text_marker); - - } - - bool - Track::getPointXYZRGB(pcl::PointXYZRGB& p) - { - if(visibility_ == Track::NOT_VISIBLE) - return false; - - double _x, _y; - filter_->getState(_x, _y); - - p.x = float(_x); - p.y = float(_y); - p.z = float(z_); - uchar* rgb_ptr = (uchar*)&p.rgb; - *rgb_ptr++ = uchar(color_(0) * 255.0f); - *rgb_ptr++ = uchar(color_(1) * 255.0f); - *rgb_ptr++ = uchar(color_(2) * 255.0f); - return true; - } - - void - Track::toMsg(opt_msgs::Track& track_msg, bool vertical) - { - - double _x, _y; - filter_->getState(_x, _y); - - track_msg.id = id_; - track_msg.x = _x; - track_msg.y = _y; - track_msg.height = height_; - track_msg.distance = distance_; - track_msg.age = age_; - track_msg.confidence = - data_association_score_; // minus for transforming distance into a sort of confidence - track_msg.visibility = visibility_; - - Eigen::Vector3d top(_x, _y, z_ + (height_/2)); - Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); - top = detection_source_->transformToCam(top); - bottom = detection_source_->transformToCam(bottom); - if (not vertical) - { - track_msg.box_2D.height = int(std::abs((top - bottom)(1))); - track_msg.box_2D.width = track_msg.box_2D.height / 2; - track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.height / 4; - track_msg.box_2D.y = int(top(1)); - } - else - { - track_msg.box_2D.width = int(std::abs((top - bottom)(0))); - track_msg.box_2D.height = track_msg.box_2D.width / 2; - track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.width; - track_msg.box_2D.y = int(top(1)) - track_msg.box_2D.width / 4; - } - } - - open_ptrack::detection::DetectionSource* - Track::getDetectionSource() - { - return detection_source_; - } - - void - Track::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) - { - velocity_in_motion_term_ = velocity_in_motion_term; - - // Re-initialize Kalman filter - filter_->setPredictModel (acceleration_variance); - filter_->setObserveModel (position_variance); - double x, y; - filter_->getState(x, y); - filter_->init(x, y, distance_, velocity_in_motion_term_); - - *tmp_filter_ = *filter_; - } - - void - Track::setAccelerationVariance (double acceleration_variance) - { - filter_->setPredictModel (acceleration_variance); - tmp_filter_->setPredictModel (acceleration_variance); - } - - void - Track::setPositionVariance (double position_variance) - { - filter_->setObserveModel (position_variance); - tmp_filter_->setObserveModel (position_variance); - } - } /* namespace tracking */ -} /* namespace open_ptrack */ diff --git a/tracking/launch/tracking/src/tracker.cpp b/tracking/launch/tracking/src/tracker.cpp deleted file mode 100644 index e8e7b27..0000000 --- a/tracking/launch/tracking/src/tracker.cpp +++ /dev/null @@ -1,566 +0,0 @@ -/* - * Software License Agreement (BSD License) - * - * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * Copyright (c) 2013-, Open Perception, Inc. - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the copyright holder(s) nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] - * - */ - -#include - -#include - -namespace open_ptrack -{ - namespace tracking - { - - Tracker::Tracker( - double gate_distance, - bool detector_likelihood, - std::vector likelihood_weights, - bool velocity_in_motion_term, - double min_confidence, - double min_confidence_detections, - double sec_before_old, - double sec_before_fake, - double sec_remain_new, - int detections_to_validate, - double period, - double position_variance, - double acceleration_variance, - std::string world_frame_id, - bool debug_mode, - bool vertical) : - gate_distance_(gate_distance), - detector_likelihood_(detector_likelihood), - likelihood_weights_(likelihood_weights), - velocity_in_motion_term_(velocity_in_motion_term), - min_confidence_(min_confidence), - min_confidence_detections_(min_confidence_detections), - sec_before_old_(sec_before_old), - sec_before_fake_(sec_before_fake), - sec_remain_new_(sec_remain_new), - detections_to_validate_(detections_to_validate), - period_(period), - position_variance_(position_variance), - acceleration_variance_(acceleration_variance), - world_frame_id_(world_frame_id), - debug_mode_(debug_mode), - vertical_(vertical) - { - tracks_counter_ = 0; - } - - Tracker::~Tracker() - { - // TODO Auto-generated destructor stub - } - - void - Tracker::newFrame(const std::vector& detections) - { - detections_.clear(); - unassociated_detections_.clear(); - lost_tracks_.clear(); - new_tracks_.clear(); - detections_ = detections; - - ros::Time current_detections_time = detections_[0].getSource()->getTime(); - - for(std::list::iterator it = tracks_.begin(); it != tracks_.end();) - { - open_ptrack::tracking::Track* t = *it; - bool deleted = false; - - if(((t->getVisibility() == Track::NOT_VISIBLE && (t->getSecFromLastHighConfidenceDetection(current_detections_time)) >= sec_before_old_) - || (!t->isValidated() && t->getSecFromFirstDetection(current_detections_time) >= sec_before_fake_))) - { - if (debug_mode_) - { - std::cout << "Track " << t->getId() << " DELETED" << std::endl; - } - delete t; - it = tracks_.erase(it); - deleted = true; - } - else if(!t->isValidated() && t->getUpdatesWithEnoughConfidence() == detections_to_validate_) - { - t->validate(); - if (debug_mode_) - { - std::cout << "Track " << t->getId() << " VALIDATED" << std::endl; - } - } - else if(t->getStatus() == Track::NEW && t->getSecFromFirstDetection(current_detections_time) >= sec_remain_new_) - { - t->setStatus(Track::NORMAL); - if (debug_mode_) - { - std::cout << "Track " << t->getId() << " set to NORMAL" << std::endl; - } - } - - if(!deleted) - { - if(t->getStatus() == Track::NEW && t->getVisibility() == Track::VISIBLE) - new_tracks_.push_back(t); - if(t->getVisibility() == Track::NOT_VISIBLE) - lost_tracks_.push_back(t); - it++; - } - - } - } - - void - Tracker::updateTracks() - { - createDistanceMatrix(); - createCostMatrix(); - - // Solve Global Nearest Neighbor problem: - Munkres munkres; - cost_matrix_ = munkres.solve(cost_matrix_, false); // rows: targets (tracks), cols: detections - - updateDetectedTracks(); - fillUnassociatedDetections(); - updateLostTracks(); - createNewTracks(); - } - -// void Tracker::drawRgb() -// { -// for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) -// (*it)->draw(vertical_); -// } - - void - Tracker::toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg) - { - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - t->createMarker(msg); - } - } - - void - Tracker::toMsg(opt_msgs::TrackArray::Ptr& msg) - { - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - - opt_msgs::Track track; - t->toMsg(track, vertical_); - msg->tracks.push_back(track); - } - } - - void - Tracker::toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id) - { - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - if (strcmp(t->getDetectionSource()->getFrameId().c_str(), source_frame_id.c_str()) == 0) // if strings are equal - { - opt_msgs::Track track; - t->toMsg(track, vertical_); - -// // For publishing only not occluded tracks: -// if (track.visibility < 2) -// msg->tracks.push_back(track); - - // For publishing all tracks: - msg->tracks.push_back(track); - } - } - } - - void - Tracker::getAliveIDs (opt_msgs::IDArray::Ptr& msg) - { - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - msg->ids.push_back ((*it)->getId()); - } - msg->max_ID = tracks_counter_; - } - - size_t - Tracker::appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, size_t starting_index, size_t max_size) - { - for(size_t i = 0; i < tracks_.size() && pointcloud->size() < max_size; i++) - { - pcl::PointXYZRGB point; - pointcloud->push_back(point); - } - - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - if(t->getPointXYZRGB(pointcloud->points[starting_index])) - starting_index = (starting_index + 1) % max_size; - - } - return starting_index; - } - - /************************ protected methods ************************/ - - int - Tracker::createNewTrack(open_ptrack::detection::Detection& detection) - { - if(detection.getConfidence() < min_confidence_) - return -1; - - open_ptrack::tracking::Track* t; - t = new open_ptrack::tracking::Track( - ++tracks_counter_, - world_frame_id_, - position_variance_, - acceleration_variance_, - period_, - velocity_in_motion_term_ ); - - t->init(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1),detection.getWorldCentroid()(2), - detection.getHeight(), detection.getDistance(), detection.getSource()); - - bool first_update = true; - t->update(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1), detection.getWorldCentroid()(2), - detection.getHeight(), detection.getDistance(), 0.0, - detection.getConfidence(), min_confidence_, min_confidence_detections_, detection.getSource(), first_update); - - ROS_INFO("Created %d", t->getId()); - - tracks_.push_back(t); - return tracks_counter_; - } - - void - Tracker::createDistanceMatrix() - { - distance_matrix_ = cv::Mat_(tracks_.size(), detections_.size()); - int track = 0; - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - //double x, y, height, vx, vz; - Track* t = *it; - //t->predict(x, y, height, vx, vz); - int measure = 0; - for(std::vector::iterator dit = detections_.begin(); dit != detections_.end(); dit++) - { - double detector_likelihood; - - // Compute detector likelihood: - if (detector_likelihood_) - { - detector_likelihood = dit->getConfidence(); - // detector_likelihood = log((dit->getConfidence() + 3) / 6); - } - else - { - detector_likelihood = 0; - } - - // Compute motion likelihood: - double motion_likelihood = t->getMahalanobisDistance( - dit->getWorldCentroid()(0), - dit->getWorldCentroid()(1), - dit->getSource()->getTime()); - - // Compute joint likelihood and put it in the distance matrix: - - distance_matrix_(track, measure++) = likelihood_weights_[0] * detector_likelihood + likelihood_weights_[1] * motion_likelihood; - - // Remove NaN and inf: - if (isnan(distance_matrix_(track, measure-1)) | (not std::isfinite(distance_matrix_(track, measure-1)))) - distance_matrix_(track, measure-1) = 2*gate_distance_; - -// std::cout << (*it)->getId() << ": " << "Motion likelihood: " << likelihood_weights_[0] * motion_likelihood << std::endl; -// if (detector_likelihood_) -// std::cout << (*it)->getId() << ": " << "Detector likelihood: " << likelihood_weights_[1] * dit->getConfidence() << std::endl; -// std::cout << (*it)->getId() << ": " << "JOINT LIKELIHOOD: " << distance_matrix_(track, measure-1) << std::endl; - - /*ROS_INFO("%d(%f, %f) = %f", t->getId(), - dit->getWorldCentroid()(0), - dit->getWorldCentroid()(1), - distance_matrix_(track, measure - 1));*/ - } - track++; - } - -// std::cout << "Distance matrix:" << std::endl; -// for(int row = 0; row < distance_matrix_.rows; row++) -// { -// for(int col = 0; col < distance_matrix_.cols; col++) -// { -// std::cout.width(8); -// std::cout << distance_matrix_(row,col) << ","; -// } -// std::cout << std::endl; -// } -// std::cout << std::endl; - } - - void - Tracker::createCostMatrix() - { - cost_matrix_ = distance_matrix_.clone(); - for(int i = 0; i < distance_matrix_.rows; i++) - { - for(int j = 0; j < distance_matrix_.cols; j++) - { - if(distance_matrix_(i, j) > gate_distance_) - cost_matrix_(i, j) = 1000000.0; - } - } - - -// std::cout << "Munkres input matrix:" << std::endl; -// for(int row = 0; row < cost_matrix_.rows; row++) -// { -// for(int col = 0; col < cost_matrix_.cols; col++) -// { -// std::cout.width(8); -// std::cout << cost_matrix_(row,col) << ","; -// } -// std::cout << std::endl; -// } -// std::cout << std::endl; - } - - void - Tracker::updateDetectedTracks() - { -// std::cout << "Munkres output matrix:" << std::endl; -// for(int row = 0; row < cost_matrix_.rows; row++) { -// for(int col = 0; col < cost_matrix_.cols; col++) { -// std::cout.width(1); -// std::cout << cost_matrix_(row,col) << ","; -// } -// std::cout << std::endl; -// } -// std::cout << std::endl; - - // Iterate over every track: - int track = 0; - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - bool updated = false; - open_ptrack::tracking::Track* t = *it; - - for(int measure = 0; measure < cost_matrix_.cols; measure++) - { - // If a detection<->track association has been found: - if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) - { - open_ptrack::detection::Detection& d = detections_[measure]; - - // If the detection has enough confidence in the current frame or in a recent past: -// if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) - if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) - { - // Update track with the associated detection: - bool first_update = false; - t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), - d.getDistance(), distance_matrix_(track, measure), - d.getConfidence(), min_confidence_, min_confidence_detections_, - d.getSource(), first_update); - - t->setVisibility(d.isOccluded() ? Track::OCCLUDED : Track::VISIBLE); - updated = true; - break; - } - else - { - //std::cout << "Id: " << t->getId() << ", lowConfConsFrames: " << t->getLowConfidenceConsecutiveFrames() << ", newConf: " << d.getConfidence()<< std::endl; - } - } - } - if(!updated) - { - if(t->getVisibility() != Track::NOT_VISIBLE) - { - t->setVisibility(Track::NOT_VISIBLE); - //t->update(); - } - } - track++; - } - // std::cout << std::endl; - } - - void - Tracker::fillUnassociatedDetections() - { - // Fill a list with detections not associated to any track: - if(cost_matrix_.cols == 0 && detections_.size() > 0) - { - for(size_t measure = 0; measure < detections_.size(); measure++) - unassociated_detections_.push_back(detections_[measure]); - } - else - { - for(int measure = 0; measure < cost_matrix_.cols; measure++) - { - bool associated = false; - for(int track = 0; track < cost_matrix_.rows; track++) - { - if(cost_matrix_(track, measure) == 0.0) - { - if(distance_matrix_(track, measure) > gate_distance_) - break; - associated = true; - } - } - if(!associated/* && detections_[measure].getConfidence() > min_confidence_*/) - { - unassociated_detections_.push_back(detections_[measure]); - } - } - } - } - - void - Tracker::updateLostTracks() - { - //for(std::list::iterator it = lost_tracks_.begin(); it != lost_tracks_.end(); it++) - // (*it)->update(); - } - - void - Tracker::createNewTracks() - { - for(std::list::iterator dit = unassociated_detections_.begin(); - dit != unassociated_detections_.end(); dit++) - { - createNewTrack(*dit); - } - } - - void - Tracker::setMinConfidenceForTrackInitialization (double min_confidence) - { - min_confidence_ = min_confidence; - } - - void - Tracker::setSecBeforeOld (double sec_before_old) - { - sec_before_old_ = sec_before_old; - } - - void - Tracker::setSecBeforeFake (double sec_before_fake) - { - sec_before_fake_ = sec_before_fake; - } - - void - Tracker::setSecRemainNew (double sec_remain_new) - { - sec_remain_new_ = sec_remain_new; - } - - void - Tracker::setDetectionsToValidate (int detections_to_validate) - { - detections_to_validate_ = detections_to_validate; - } - - void - Tracker::setDetectorLikelihood (bool detector_likelihood) - { - detector_likelihood_ = detector_likelihood; - } - - void - Tracker::setLikelihoodWeights (double detector_weight, double motion_weight) - { - likelihood_weights_[0] = detector_weight; - likelihood_weights_[1] = motion_weight; - } - - void - Tracker::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) - { - velocity_in_motion_term_ = velocity_in_motion_term; - acceleration_variance_ = acceleration_variance; - position_variance_ = position_variance; - - // Update all existing tracks: - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - t->setVelocityInMotionTerm (velocity_in_motion_term_, acceleration_variance_, position_variance_); - } - } - - void - Tracker::setAccelerationVariance (double acceleration_variance) - { - acceleration_variance_ = acceleration_variance; - - // Update all existing tracks: - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - t->setAccelerationVariance (acceleration_variance_); - } - } - - void - Tracker::setPositionVariance (double position_variance) - { - position_variance_ = position_variance; - - // Update all existing tracks: - for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) - { - open_ptrack::tracking::Track* t = *it; - t->setPositionVariance (position_variance_); - } - } - - void - Tracker::setGateDistance (double gate_distance) - { - gate_distance_ = gate_distance; - } - } /* namespace tracking */ -} /* namespace open_ptrack */ diff --git a/zed_ros_wrapper/CMakeLists.txt b/zed_ros_wrapper/CMakeLists.txt deleted file mode 100644 index ea6f1ec..0000000 --- a/zed_ros_wrapper/CMakeLists.txt +++ /dev/null @@ -1,100 +0,0 @@ -cmake_minimum_required(VERSION 2.8.7) -project(zed_wrapper) - -# if CMAKE_BUILD_TYPE is not specified, take 'Release' as default -IF(NOT CMAKE_BUILD_TYPE) - SET(CMAKE_BUILD_TYPE Release) -ENDIF(NOT CMAKE_BUILD_TYPE) - -find_package(ZED 1.0 REQUIRED) - -##For Jetson, OpenCV4Tegra is based on OpenCV2.4 -exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) -if ( CMAKE_SYSTEM_NAME2 MATCHES "aarch64" ) # X1 - SET(OCV_VERSION "2.4") - SET(CUDA_VERSION "7.0") -elseif(CMAKE_SYSTEM_NAME2 MATCHES "armv7l" ) # K1 - SET(OCV_VERSION "2.4") - SET(CUDA_VERSION "6.5") -else() # Desktop - SET(OCV_VERSION "3.1") - SET(CUDA_VERSION "7.5") -endif() - -find_package(OpenCV ${OCV_VERSION} COMPONENTS core highgui imgproc REQUIRED) -find_package(CUDA ${CUDA_VERSION} REQUIRED) - -find_package(PCL REQUIRED) - -find_package(catkin REQUIRED COMPONENTS - image_transport - roscpp - rosconsole - sensor_msgs - dynamic_reconfigure - tf2_ros - pcl_conversions -) - -generate_dynamic_reconfigure_options( - cfg/Zed.cfg -) - -catkin_package( - CATKIN_DEPENDS - roscpp - rosconsole - sensor_msgs - opencv - image_transport - dynamic_reconfigure - tf2_ros - pcl_conversions -) - -############################################################################### -# INCLUDES - -# Specify locations of header files. -include_directories( - ${catkin_INCLUDE_DIRS} - ${CUDA_INCLUDE_DIRS} - ${ZED_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${PCL_INCLUDE_DIRS} -) - -link_directories(${ZED_LIBRARY_DIR}) -link_directories(${CUDA_LIBRARY_DIRS}) -link_directories(${OpenCV_LIBRARY_DIRS}) -link_directories(${PCL_LIBRARY_DIRS}) - -############################################################################### - -############################################################################### -# EXECUTABLE - -add_definitions(-std=c++11)# -m64) #-Wall) - - -add_executable( - zed_wrapper_node - src/zed_wrapper_node.cpp -) - -target_link_libraries( - zed_wrapper_node - ${catkin_LIBRARIES} - ${ZED_LIBRARIES} - ${CUDA_LIBRARIES} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} - ${OpenCV_LIBS} - ${PCL_LIBRARIES} - ) - -add_dependencies(zed_wrapper_node ${PROJECT_NAME}_gencfg) -############################################################################### - -#Add all files in subdirectories of the project in -# a dummy_target so qtcreator have access to all files -FILE(GLOB_RECURSE extra_files ${CMAKE_SOURCE_DIR}/*) -add_custom_target(dummy_${PROJECT_NAME} SOURCES ${extra_files}) diff --git a/zed_ros_wrapper/LICENSE b/zed_ros_wrapper/LICENSE deleted file mode 100644 index f7581bc..0000000 --- a/zed_ros_wrapper/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Stereolabs -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of zed-ros-wrapper nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/zed_ros_wrapper/README.md b/zed_ros_wrapper/README.md deleted file mode 100644 index 93dda36..0000000 --- a/zed_ros_wrapper/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# zed-ros-wrapper -Ros wrapper for the ZED Stereo Camera SDK - -**This sample is designed to work with the ZED stereo camera only and requires the ZED SDK. For more information: https://www.stereolabs.com** - -**This wrapper also requires the PCL library** - -This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. It can provide the camera images, the depth map, a 3D textured point cloud, and the odometry given by the ZED tracking. -Published topics: - - - /camera/point_cloud/cloud - - /camera/depth/camera_info - - /camera/depth/image_rect_color - - /camera/left/camera_info - - /camera/left/image_rect_color - - /camera/rgb/camera_info - - /camera/rgb/image_rect_color - - /camera/odom - -A set of parameters can be specified in the launch file provided in the launch directory. - - - zed.launch - -The zed_ros_wrapper is a catkin package made to run on ROS Indigo, and depends -on the following ROS packages: - - - tf2_ros - - nav_msgs - - roscpp - - rosconsole - - sensor_msgs - - opencv - - image_transport - - dynamic_reconfigure - -## Build the program - -Place the package folder "zed_wrapper" in your catkin workspace source folder "~/catkin_ws/src" - -Open a terminal : - - $ cd ~/catkin_ws - $ catkin_make - $ source ./devel/setup.bash - - -## Run the program - - Open a terminal to launch the wrapper: - - $ roslaunch zed_wrapper zed.launch - - Open an other terminal to display images: - - $ rosrun image_view image_view image:=/camera/rgb/image_rect_color - - If you want to see the point cloud, lauch rviz with the following command. Then click on **add** (bottom left), select the **By Topic** tab, select **point_cloud->cloud->PointCloud2** and click **OK**. - - $ rosrun rviz rviz - - Note that rviz isn't very good at displaying a camera feed and a point cloud at the same time. You should use an other instance of rviz or the `rosrun` command. - - To visualize the odometry in rviz, select the **Add** button, and select the **odom** topic under the **By topic** tab. - - *Important: By default rviz is badly displaying the odometry, be sure to set it up correctly by opening the newly created Odometry object in the left list, and by setting **Position Tolerance** and **Angle Tolerance** to **0**, and **Keep** to **1**. * - - You can also see the point could fused with the odometry by subscribing to the 'odom' topic (even in an other rviz or an other node). - - To change your referential, use the 'Fixed Frame' parameter at the top-left of rviz. - -## Launch file parameters - - Parameter | Description | Value -------------------------------|-------------------------------------------------------------|------------------------- - svo_file | SVO filename | path to an SVO file - resolution | ZED Camera resolution | '0': HD2K - _ | _ | '1': HD1080 - _ | _ | '2': HD720 - _ | _ | '3': VGA - quality | Disparity Map quality | '0': NONE - _ | _ | '1': PERFORMANCE - _ | _ | '2': MEDIUM - _ | _ | '3': QUALITY - sensing_mode | Depth sensing mode | '0': FILL - _ | _ | '1': STANDARD - openni_depth_mode | Convert depth to 16bit in millimeters | '0': 32bit float meters - _ | _ | '1': 16bit uchar millimeters - frame_rate | Rate at which images are published | int - rgb_topic | Topic to which rgb==default==left images are published | string - rgb_cam_info_topic | Topic to which rgb==default==left camera info are published | string - rgb_frame_id | ID specified in the rgb==default==left image message header | string - left_topic | Topic to which left images are published | string - left_cam_info_topic | Topic to which left camera info are published | string - left_frame_id | ID specified in the left image message header | string - right_topic | Topic to which right images are published | string - right_cam_info_topic | Topic to which right camera info are published | string - right_frame_id | ID specified in the right image message header | string - depth_topic | Topic to which depth map images are published | string - depth_cam_info_topic | Topic to which depth camera info are published | string - depth_frame_id | ID specified in the depth image message header | string - point_cloud_topic | Topic to which point clouds are published | string - cloud_frame_id | ID specified in the point cloud message header | string - odometry_topic | Topic to which odometry is published | string - odometry_frame_id | ID specified in the odometry message header | string - odometry_transform_frame_id | Name of the transformation following the odometry | string diff --git a/zed_ros_wrapper/cfg/Zed.cfg b/zed_ros_wrapper/cfg/Zed.cfg deleted file mode 100644 index cad5bc2..0000000 --- a/zed_ros_wrapper/cfg/Zed.cfg +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python -PACKAGE = "zed_ros_wrapper" - -from dynamic_reconfigure.parameter_generator_catkin import * - -gen = ParameterGenerator() - -gen.add("confidence", int_t, 0, "Confidence threshold, the lower the better", 80, 1, 100) - -exit(gen.generate(PACKAGE, "zed_ros_wrapper", "Zed")) diff --git a/zed_ros_wrapper/launch/zed.launch b/zed_ros_wrapper/launch/zed.launch deleted file mode 100644 index 24ced3c..0000000 --- a/zed_ros_wrapper/launch/zed.launch +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/zed_ros_wrapper/launch/zed.launch~ b/zed_ros_wrapper/launch/zed.launch~ deleted file mode 100644 index c335e98..0000000 --- a/zed_ros_wrapper/launch/zed.launch~ +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/zed_ros_wrapper/launch/zed_tf.launch b/zed_ros_wrapper/launch/zed_tf.launch deleted file mode 100644 index bbd6323..0000000 --- a/zed_ros_wrapper/launch/zed_tf.launch +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - diff --git a/zed_ros_wrapper/package.xml b/zed_ros_wrapper/package.xml deleted file mode 100644 index 0bfa159..0000000 --- a/zed_ros_wrapper/package.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - zed_wrapper - 1.0.0 - zed_wrapper is a ROS wrapper for the ZED library - for interfacing with the ZED camera. - -STEREOLABS - BSD - - catkin - - tf2_ros - nav_msgs - roscpp - rosconsole - sensor_msgs - opencv - image_transport - dynamic_reconfigure - pcl_conversions - - tf2_ros - nav_msgs - roscpp - rosconsole - sensor_msgs - opencv - image_transport - dynamic_reconfigure - pcl_conversions - - - diff --git a/zed_ros_wrapper/records/record_depth.sh b/zed_ros_wrapper/records/record_depth.sh deleted file mode 100644 index b7f73c3..0000000 --- a/zed_ros_wrapper/records/record_depth.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -rosbag record /camera/rgb/camera_info /camera/depth_registered/image_raw/compressedDepth /camera/rgb/image_rect_color/compressed /tf diff --git a/zed_ros_wrapper/records/record_stereo.sh b/zed_ros_wrapper/records/record_stereo.sh deleted file mode 100644 index 97d4cfc..0000000 --- a/zed_ros_wrapper/records/record_stereo.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -rosbag record /camera/left/camera_info /camera/right/camera_info /camera/left/image_raw/compressed /camera/right/image_raw/compressed /tf diff --git a/zed_ros_wrapper/src/zed_wrapper_node.cpp b/zed_ros_wrapper/src/zed_wrapper_node.cpp deleted file mode 100644 index e494709..0000000 --- a/zed_ros_wrapper/src/zed_wrapper_node.cpp +++ /dev/null @@ -1,707 +0,0 @@ -/////////////////////////////////////////////////////////////////////////// -// -// Copyright (c) 2015, STEREOLABS. -// -// All rights reserved. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////// - - - - -/**************************************************************************************************** - ** This sample is a wrapper for the ZED library in order to use the ZED Camera with ROS. ** - ** You can publish Left+Depth or Left+Right images and camera info on the topics of your choice. ** - ** ** - ** A set of parameters can be specified in the launch file. ** - ****************************************************************************************************/ - -//standard includes -#include -#include -#include -#include -#include -#include -#include -#include // file exists - -//ROS includes -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -//opencv includes -#include -#include -#include -#include - -//Boost includes -#include - -//PCL includes -#include -#include -#include -#include -#include - -//ZED Includes -#include - -using namespace sl::zed; -using namespace std; - - -int confidence; -bool computeDepth; -int openniDepthMode = 0; // 16 bit UC data in mm else 32F in m, for more info http://www.ros.org/reps/rep-0118.html - -// Point cloud thread variables -float* cloud; -bool pointCloudThreadRunning = true; -bool point_cloud_data_processing = false; -string point_cloud_frame_id = ""; -ros::Time point_cloud_time; - -/* \brief Test if a file exist - * \param name : the path to the file - */ -bool file_exist(const std::string& name) { - struct stat buffer; - return (stat(name.c_str(), &buffer) == 0); -} - -/* \brief Image to ros message conversion - * \param img : the image to publish - * \param encodingType : the sensor_msgs::image_encodings encoding type - * \param frameId : the id of the reference frame of the image - * \param t : the ros::Time to stamp the image -*/ -sensor_msgs::ImagePtr imageToROSmsg(cv::Mat img, const std::string encodingType, std::string frameId, ros::Time t){ - sensor_msgs::ImagePtr ptr = boost::make_shared(); - sensor_msgs::Image& imgMessage = *ptr; - imgMessage.header.stamp = t; - imgMessage.header.frame_id = frameId; - imgMessage.height = img.rows; - imgMessage.width = img.cols; - imgMessage.encoding = encodingType; - int num = 1; //for endianness detection - imgMessage.is_bigendian = !(*(char *)&num == 1); - imgMessage.step = img.cols * img.elemSize(); - size_t size = imgMessage.step * img.rows; - imgMessage.data.resize(size); - - if (img.isContinuous()) - memcpy((char*)(&imgMessage.data[0]), img.data, size); - else { - uchar* opencvData = img.data; - uchar* rosData = (uchar*)(&imgMessage.data[0]); - for (unsigned int i = 0; i < img.rows; i++) { - memcpy(rosData, opencvData, imgMessage.step); - rosData += imgMessage.step; - opencvData += img.step; - } - } - return ptr; -} - -/* \brief Publish the pose of the camera with a ros Publisher - * \param Path : the 4x4 matrix representing the camera pose - * \param pub_odom : the publisher object to use - * \param odom_frame_id : the id of the reference frame of the pose - * \param t : the ros::Time to stamp the image - */ -void publishOdom(Eigen::Matrix4f Path, ros::Publisher &pub_odom, string odom_frame_id, ros::Time t) { - nav_msgs::Odometry odom; - odom.header.stamp = t; - odom.header.frame_id = odom_frame_id; - //odom.child_frame_id = "zed_optical_frame"; - - odom.pose.pose.position.y = -Path(0, 3); - odom.pose.pose.position.z = Path(1, 3); - odom.pose.pose.position.x = -Path(2, 3); - Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); - odom.pose.pose.orientation.x = -quat.z(); - odom.pose.pose.orientation.y = -quat.x(); - odom.pose.pose.orientation.z = quat.y(); - odom.pose.pose.orientation.w = quat.w(); - pub_odom.publish(odom); -} - -/* \brief Publish the pose of the camera as a transformation - * \param Path : the 4x4 matrix representing the camera pose - * \param trans_br : the TransformBroadcaster object to use - * \param odometry_transform_frame_id : the id of the transformation - * \param t : the ros::Time to stamp the image - */ -void publishTrackedFrame(Eigen::Matrix4f Path, tf2_ros::TransformBroadcaster &trans_br, string odometry_transform_frame_id, ros::Time t) { - - geometry_msgs::TransformStamped transformStamped; - transformStamped.header.stamp = ros::Time::now(); - transformStamped.header.frame_id = "zed_initial_frame"; - transformStamped.child_frame_id = odometry_transform_frame_id; - transformStamped.transform.translation.x = -Path(2, 3); - transformStamped.transform.translation.y = -Path(0, 3); - transformStamped.transform.translation.z = Path(1, 3); - Eigen::Quaternionf quat(Path.block<3, 3>(0, 0)); - transformStamped.transform.rotation.x = -quat.z(); - transformStamped.transform.rotation.y = -quat.x(); - transformStamped.transform.rotation.z = quat.y(); - transformStamped.transform.rotation.w = quat.w(); - trans_br.sendTransform(transformStamped); -} - -/* \brief Publish a cv::Mat image with a ros Publisher - * \param img : the image to publish - * \param pub_img : the publisher object to use - * \param img_frame_id : the id of the reference frame of the image - * \param t : the ros::Time to stamp the image - */ -void publishImage(cv::Mat img, image_transport::Publisher &pub_img, string img_frame_id, ros::Time t) { - pub_img.publish(imageToROSmsg(img - , sensor_msgs::image_encodings::BGR8 - , img_frame_id - , t )); -} - -/* \brief Publish a cv::Mat depth image with a ros Publisher - * \param depth : the depth image to publish - * \param pub_depth : the publisher object to use - * \param depth_frame_id : the id of the reference frame of the depth image - * \param t : the ros::Time to stamp the depth image - */ -void publishDepth(cv::Mat depth, image_transport::Publisher &pub_depth, string depth_frame_id, ros::Time t) { - string encoding; - if(openniDepthMode){ - depth *= 1000.0f; - depth.convertTo(depth, CV_16UC1); // in mm, rounded - encoding = sensor_msgs::image_encodings::TYPE_16UC1; - } - else { - encoding = sensor_msgs::image_encodings::TYPE_32FC1; - } - pub_depth.publish(imageToROSmsg(depth - , encoding - , depth_frame_id - , t )); -} - -/* \brief Publish a pointCloud with a ros Publisher - * \param p_could : the float pointer to point cloud datas - * \param width : the width of the point cloud - * \param height : the height of the point cloud - * \param pub_cloud : the publisher object to use - * \param cloud_frame_id : the id of the reference frame of the point cloud - * \param t : the ros::Time to stamp the point cloud - */ -void publishPointCloud(int width, int height, ros::Publisher &pub_cloud) { - while (pointCloudThreadRunning) { // check if the thread has to continue - if (!point_cloud_data_processing) { // check if datas are available - std::this_thread::sleep_for(std::chrono::milliseconds(2)); // No data, we just wait - continue; - } - pcl::PointCloud point_cloud; - point_cloud.width = width; - point_cloud.height = height; - int size = width*height; - point_cloud.points.resize(size); - int index4 = 0; - float color; - for (int i = 0; i < size; i++) { - if (cloud[index4 + 2] > 0) { // Check if it's an unvalid point, the depth is more than 0 - index4 += 4; - continue; - } - point_cloud.points[i].x = cloud[index4++]; - point_cloud.points[i].y = -cloud[index4++]; - point_cloud.points[i].z = -cloud[index4++]; - color = cloud[index4++]; - uint32_t color_uint = *(uint32_t*) & color; // Convert the color - unsigned char* color_uchar = (unsigned char*) &color_uint; - color_uint = ((uint32_t) color_uchar[0] << 16 | (uint32_t) color_uchar[1] << 8 | (uint32_t) color_uchar[2]); - point_cloud.points[i].rgb = *reinterpret_cast (&color_uint); - } - - //pcl::PointCloud point_cloud_output; - //std::vector indices; - //pcl::removeNaNFromPointCloud(point_cloud, point_cloud_output, indices); - - sensor_msgs::PointCloud2 output; - pcl::toROSMsg(point_cloud, output); // Convert the point cloud to a ROS message - output.header.frame_id = point_cloud_frame_id; // Set the header values of the ROS message - output.header.stamp = point_cloud_time; - pub_cloud.publish(output); - point_cloud_data_processing = false; - } -} - -/* \brief Publish the informations of a camera with a ros Publisher - * \param cam_info_msg : the information message to publish - * \param pub_cam_info : the publisher object to use - * \param t : the ros::Time to stamp the message - */ -void publishCamInfo(sensor_msgs::CameraInfoPtr cam_info_msg, ros::Publisher pub_cam_info, ros::Time t) { - static int seq = 0; - cam_info_msg->header.stamp = t; - cam_info_msg->header.seq = seq; - pub_cam_info.publish(cam_info_msg); - seq++; -} - -/* \brief Get the information of the ZED cameras and store them in an information message - * \param zed : the sl::zed::Camera* pointer to an instance - * \param left_cam_info_msg : the information message to fill with the left camera informations - * \param right_cam_info_msg : the information message to fill with the right camera informations - * \param left_frame_id : the id of the reference frame of the left camera - * \param right_frame_id : the id of the reference frame of the right camera - */ -void fillCamInfo(Camera* zed, sensor_msgs::CameraInfoPtr left_cam_info_msg, sensor_msgs::CameraInfoPtr right_cam_info_msg, - string left_frame_id, string right_frame_id) { - - int width = zed->getImageSize().width; - int height = zed->getImageSize().height; - - sl::zed::StereoParameters* zedParam = zed->getParameters(); - - float baseline = zedParam->baseline * 0.001; // baseline converted in meters - - float fx = zedParam->LeftCam.fx; - float fy = zedParam->LeftCam.fy; - float cx = zedParam->LeftCam.cx; - float cy = zedParam->LeftCam.cy; - - // There is no distorsions since the images are rectified - double k1 = 0; - double k2 = 0; - double k3 = 0; - double p1 = 0; - double p2 = 0; - - left_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; - right_cam_info_msg->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; - - left_cam_info_msg->D.resize(5); - right_cam_info_msg->D.resize(5); - left_cam_info_msg->D[0] = right_cam_info_msg->D[0] = k1; - left_cam_info_msg->D[1] = right_cam_info_msg->D[1] = k2; - left_cam_info_msg->D[2] = right_cam_info_msg->D[2] = k3; - left_cam_info_msg->D[3] = right_cam_info_msg->D[3] = p1; - left_cam_info_msg->D[4] = right_cam_info_msg->D[4] = p2; - - left_cam_info_msg->K.fill(0.0); - right_cam_info_msg->K.fill(0.0); - left_cam_info_msg->K[0] = right_cam_info_msg->K[0] = fx; - left_cam_info_msg->K[2] = right_cam_info_msg->K[2] = cx; - left_cam_info_msg->K[4] = right_cam_info_msg->K[4] = fy; - left_cam_info_msg->K[5] = right_cam_info_msg->K[5] = cy; - left_cam_info_msg->K[8] = right_cam_info_msg->K[8] = 1.0; - - - - left_cam_info_msg->R.fill(0.0); - right_cam_info_msg->R.fill(0.0); - - left_cam_info_msg->P.fill(0.0); - right_cam_info_msg->P.fill(0.0); - left_cam_info_msg->P[0] = right_cam_info_msg->P[0] = fx; - left_cam_info_msg->P[2] = right_cam_info_msg->P[2] = cx; - left_cam_info_msg->P[5] = right_cam_info_msg->P[5] = fy; - left_cam_info_msg->P[6] = right_cam_info_msg->P[6] = cy; - left_cam_info_msg->P[10] = right_cam_info_msg->P[10] = 1.0; - right_cam_info_msg->P[3] = (-1 * fx * baseline); - - left_cam_info_msg->width = right_cam_info_msg->width = width; - left_cam_info_msg->height = right_cam_info_msg->height = height; - - left_cam_info_msg->header.frame_id = left_frame_id; - right_cam_info_msg->header.frame_id = right_frame_id; -} - -void callback(zed_ros_wrapper::ZedConfig &config, uint32_t level) { - ROS_INFO("Reconfigure confidence : %d", config.confidence); - confidence = config.confidence; -} - -int main(int argc, char **argv) { - // Launch file parameters - int resolution = sl::zed::HD720; - int quality = sl::zed::MODE::PERFORMANCE; - int sensing_mode = sl::zed::SENSING_MODE::STANDARD; - int rate = 30; - string odometry_DB = ""; - - std::string img_topic = "image_rect"; - - // Set the default topic names - string rgb_topic = "rgb/" + img_topic; - string rgb_cam_info_topic = "rgb/camera_info"; - string rgb_frame_id = "/zed_tracked_frame"; - - string left_topic = "left/" + img_topic; - string left_cam_info_topic = "left/camera_info"; - string left_frame_id = "/zed_tracked_frame"; - - string right_topic = "right/" + img_topic; - string right_cam_info_topic = "right/camera_info"; - string right_frame_id = "/zed_tracked_frame"; - - string depth_topic = "depth/"; - if (openniDepthMode) - depth_topic += "image_raw"; - else - depth_topic += img_topic; - - string depth_cam_info_topic = "depth/camera_info"; - string depth_frame_id = "/zed_depth_optical_frame"; - - string point_cloud_topic = "point_cloud/" + img_topic; - string cloud_frame_id = "/zed_tracked_frame"; - - string odometry_topic = "odom"; - string odometry_frame_id = "/zed_initial_frame"; - string odometry_transform_frame_id = "/zed_tracked_frame"; - - ros::init(argc, argv, "zed_depth_stereo_wrapper_node"); - ROS_INFO("ZED_WRAPPER Node initialized"); - - ros::NodeHandle nh; - ros::NodeHandle nh_ns("~"); - - // Get parameters from launch file - nh_ns.getParam("resolution", resolution); - nh_ns.getParam("quality", quality); - nh_ns.getParam("sensing_mode", sensing_mode); - nh_ns.getParam("frame_rate", rate); - nh_ns.getParam("odometry_DB", odometry_DB); - nh_ns.getParam("openni_depth_mode", openniDepthMode); - if (openniDepthMode) - ROS_INFO_STREAM("Openni depth mode activated"); - - nh_ns.getParam("rgb_topic", rgb_topic); - nh_ns.getParam("rgb_cam_info_topic", rgb_cam_info_topic); - nh_ns.getParam("rgb_frame_id", rgb_frame_id); - - nh_ns.getParam("left_topic", left_topic); - nh_ns.getParam("left_cam_info_topic", left_cam_info_topic); - nh_ns.getParam("left_frame_id", left_frame_id); - - nh_ns.getParam("right_topic", right_topic); - nh_ns.getParam("right_cam_info_topic", right_cam_info_topic); - nh_ns.getParam("right_frame_id", right_frame_id); - - nh_ns.getParam("depth_topic", depth_topic); - nh_ns.getParam("depth_cam_info_topic", depth_cam_info_topic); - nh_ns.getParam("depth_frame_id", depth_frame_id); - - nh_ns.getParam("point_cloud_topic", point_cloud_topic); - nh_ns.getParam("cloud_frame_id", cloud_frame_id); - - nh_ns.getParam("odometry_topic", odometry_topic); - nh_ns.getParam("odometry_frame_id", odometry_frame_id); - nh_ns.getParam("odometry_transform_frame_id", odometry_transform_frame_id); - - // Create the ZED object - std::unique_ptr zed; - if (argc == 2) { - zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file - ROS_INFO_STREAM("Reading SVO file : " << argv[1]); - } else { - zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); - ROS_INFO_STREAM("Using ZED Camera"); - } - - // Try to initialize the ZED - sl::zed::InitParams param; - param.unit = UNIT::METER; - param.coordinate = COORDINATE_SYSTEM::RIGHT_HANDED; - param.mode = static_cast (quality); - param.verbose = true; - - ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; - while (err != SUCCESS) { - err = zed->init(param); - ROS_INFO_STREAM(errcode2str(err)); - std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - } - - zed->grab(static_cast (sensing_mode), true, true, true); //call the first grab - - //Tracking variables - sl::zed::TRACKING_STATE track_state; - sl::zed::TRACKING_FRAME_STATE frame_state; - Eigen::Matrix4f Path; - Path.setIdentity(4, 4); - - //ERRCODE display - dynamic_reconfigure::Server server; - dynamic_reconfigure::Server::CallbackType f; - - f = boost::bind(&callback, _1, _2); - server.setCallback(f); - //confidence was previously 80 - confidence = 80; - - // Get the parameters of the ZED images - int width = zed->getImageSize().width; - int height = zed->getImageSize().height; - ROS_DEBUG_STREAM("Image size : " << width << "x" << height); - - cv::Size cvSize(width, height); - cv::Mat leftImRGB(cvSize, CV_8UC3); - cv::Mat rightImRGB(cvSize, CV_8UC3); - - - // Create all the publishers - // Image publishers - image_transport::ImageTransport it_zed(nh); - image_transport::Publisher pub_rgb = it_zed.advertise(rgb_topic, 1); //rgb - ROS_INFO_STREAM("Advertized on topic " << rgb_topic); - image_transport::Publisher pub_left = it_zed.advertise(left_topic, 1); //left - ROS_INFO_STREAM("Advertized on topic " << left_topic); - image_transport::Publisher pub_right = it_zed.advertise(right_topic, 1); //right - ROS_INFO_STREAM("Advertized on topic " << right_topic); - image_transport::Publisher pub_depth = it_zed.advertise(depth_topic, 1); //depth - ROS_INFO_STREAM("Advertized on topic " << depth_topic); - - //PointCloud publisher - ros::Publisher pub_cloud = nh.advertise (point_cloud_topic, 1); - ROS_INFO_STREAM("Advertized on topic " << point_cloud_topic); - - // Camera info publishers - ros::Publisher pub_rgb_cam_info = nh.advertise(rgb_cam_info_topic, 1); //rgb - ROS_INFO_STREAM("Advertized on topic " << rgb_cam_info_topic); - ros::Publisher pub_left_cam_info = nh.advertise(left_cam_info_topic, 1); //left - ROS_INFO_STREAM("Advertized on topic " << left_cam_info_topic); - ros::Publisher pub_right_cam_info = nh.advertise(right_cam_info_topic, 1); //right - ROS_INFO_STREAM("Advertized on topic " << right_cam_info_topic); - ros::Publisher pub_depth_cam_info = nh.advertise(depth_cam_info_topic, 1); //depth - ROS_INFO_STREAM("Advertized on topic " << depth_cam_info_topic); - - //Odometry publisher - ros::Publisher pub_odom = nh.advertise(odometry_topic, 1); - tf2_ros::TransformBroadcaster transform_odom_broadcaster; - ROS_INFO_STREAM("Advertized on topic " << odometry_topic); - - // Create and fill the camera information messages - sensor_msgs::CameraInfoPtr rgb_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr left_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr right_cam_info_msg(new sensor_msgs::CameraInfo()); - sensor_msgs::CameraInfoPtr depth_cam_info_msg(new sensor_msgs::CameraInfo()); - fillCamInfo(zed.get(), left_cam_info_msg, right_cam_info_msg, left_frame_id, right_frame_id); - rgb_cam_info_msg = depth_cam_info_msg = left_cam_info_msg; // the reference camera is the Left one (next to the ZED logo) - - ros::Rate loop_rate(rate); - ros::Time old_t = ros::Time::now(); - bool old_image = false; - std::unique_ptr pointCloudThread = nullptr; - pointCloudThread.reset(new std::thread(&publishPointCloud, width, height, std::ref(pub_cloud))); - bool tracking_activated = false; - - try - { - // Main loop - while (ros::ok()) - { - // Check for subscribers - int rgb_SubNumber = pub_rgb.getNumSubscribers(); - int left_SubNumber = pub_left.getNumSubscribers(); - int right_SubNumber = pub_right.getNumSubscribers(); - int depth_SubNumber = pub_depth.getNumSubscribers(); - int cloud_SubNumber = pub_cloud.getNumSubscribers(); - int odom_SubNumber = pub_odom.getNumSubscribers(); - bool runLoop = (rgb_SubNumber + left_SubNumber + right_SubNumber + depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; - // Run the loop only if there is some subscribers - if (true) - { - if (odom_SubNumber > 0 && !tracking_activated) - { //Start the tracking - if (odometry_DB != "" && !file_exist(odometry_DB)) - { - odometry_DB = ""; - ROS_WARN("odometry_DB path doesn't exist or is unreachable."); - } - zed->enableTracking(Path, true, odometry_DB); - tracking_activated = true; - } - else if (odom_SubNumber == 0 && tracking_activated) - { //Stop the tracking - zed->stopTracking(); - tracking_activated = false; - } - computeDepth = (depth_SubNumber + cloud_SubNumber + odom_SubNumber) > 0; // Detect if one of the subscriber need to have the depth information - ros::Time t = ros::Time::now(); // Get current time - - if (computeDepth) - { - int actual_confidence = zed->getConfidenceThreshold(); - if (actual_confidence != confidence) - { - zed->setConfidenceThreshold(confidence); - } - - old_image = zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); // Ask to compute the depth - } - else - { - old_image = zed->grab(static_cast (sensing_mode), false, false); // Ask to not compute the depth - } - - - if (old_image) - { // Detect if a error occurred (for example: the zed have been disconnected) and re-initialize the ZED - ROS_DEBUG("Wait for a new image to proceed"); - std::this_thread::sleep_for(std::chrono::milliseconds(2)); - if ((t - old_t).toSec() > 5) - { - // delete the old object before constructing a new one - zed.reset(); - if (argc == 2) - { - zed.reset(new sl::zed::Camera(argv[1])); // Argument "svo_file" in launch file - ROS_INFO_STREAM("Reading SVO file : " << argv[1]); - } - else - { - zed.reset(new sl::zed::Camera(static_cast (resolution), rate)); - ROS_INFO_STREAM("Using ZED Camera"); - } - - ROS_INFO("Reinit camera"); - ERRCODE err = ERRCODE::ZED_NOT_AVAILABLE; - - while (err != SUCCESS) - { - err = zed->init(param); // Try to initialize the ZED - ROS_INFO_STREAM(errcode2str(err)); - std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - } - - zed->grab(static_cast (sensing_mode), true, true, cloud_SubNumber > 0); //call the first grab - tracking_activated = false; - - if (odom_SubNumber > 0) { //Start the tracking - if (odometry_DB != "" && !file_exist(odometry_DB)) - { - odometry_DB = ""; - ROS_WARN("odometry_DB path doesn't exist or is unreachable."); - } - - zed->enableTracking(Path, true, odometry_DB); - tracking_activated = true; - } - } - - continue; - } - - old_t = ros::Time::now(); - - // Publish the left == rgb image if someone has subscribed to - //if (left_SubNumber > 0 || rgb_SubNumber > 0) { - // Retrieve RGBA Left image - cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::LEFT)), leftImRGB, CV_RGBA2RGB); // Convert to RGB - // if (left_SubNumber > 0) { - //cout << "Fx: " << left_cam_info_msg->K[0] << "Cx: " << left_cam_info_msg->K[2] << "Fy: " << left_cam_info_msg->K[4] << "Cy: " << left_cam_info_msg->K[5] << endl; - publishCamInfo(left_cam_info_msg, pub_left_cam_info, t); - publishImage(leftImRGB, pub_left, left_frame_id, t); - // } - // if (rgb_SubNumber > 0) { - publishCamInfo(rgb_cam_info_msg, pub_rgb_cam_info, t); - publishImage(leftImRGB, pub_rgb, rgb_frame_id, t); // rgb is the left image - //} - //} - - // Publish the right image if someone has subscribed to - if (right_SubNumber > 0) - { - // Retrieve RGBA Right image - cv::cvtColor(slMat2cvMat(zed->retrieveImage(sl::zed::SIDE::RIGHT)), rightImRGB, CV_RGBA2RGB); // Convert to RGB - publishCamInfo(right_cam_info_msg, pub_right_cam_info, t); - publishImage(rightImRGB, pub_right, right_frame_id, t); - } - - // Publish the depth image if someone has subscribed to - if (depth_SubNumber > 0) - { - publishCamInfo(depth_cam_info_msg, pub_depth_cam_info, t); - publishDepth(slMat2cvMat(zed->retrieveMeasure(sl::zed::MEASURE::DEPTH)), pub_depth, depth_frame_id, t); // in meters - } - - // Publish the point cloud if someone has subscribed to - if ( point_cloud_data_processing == false) //cloud_SubNumber > 0 && - { - // Run the point cloud convertion asynchronously to avoid slowing down all the program - // Retrieve raw pointCloud data - cloud = (float*) zed->retrieveMeasure(sl::zed::MEASURE::XYZRGBA).data; - point_cloud_frame_id = cloud_frame_id; - point_cloud_time = t; - point_cloud_data_processing = true; - } - - // Publish the odometry if someone has subscribed to - if (odom_SubNumber > 0) - { - track_state = zed->getPosition(Path, MAT_TRACKING_TYPE::PATH); - publishOdom(Path, pub_odom, odometry_frame_id, t); - } - - //Note, the frame is published, but its values will only change if someone has subscribed to odom - publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, t); //publish the tracked Frame - - ros::spinOnce(); - loop_rate.sleep(); - } - else - { - - publishTrackedFrame(Path, transform_odom_broadcaster, odometry_transform_frame_id, ros::Time::now()); //publish the tracked Frame before the sleep - std::this_thread::sleep_for(std::chrono::milliseconds(10)); // No subscribers, we just wait - } - } - } - catch (...) - { - if (pointCloudThread && pointCloudThreadRunning) - { - pointCloudThreadRunning = false; - pointCloudThread->join(); - } - ROS_ERROR("Unknown error."); - return 1; - } - - if (pointCloudThread && pointCloudThreadRunning) - { - pointCloudThreadRunning = false; - pointCloudThread->join(); - } - - ROS_INFO("Quitting zed_depth_stereo_wrapper_node ...\n"); - - return 0; -} From 141b0ae5bb7d4a5e6fda444566919e839aa12355 Mon Sep 17 00:00:00 2001 From: remap Date: Mon, 9 Jan 2017 10:25:32 -0800 Subject: [PATCH 25/28] Updated README with ZED ROS Wrapper Instructions --- ... INSTALL ONLY FOLLOW THIS IF USING ZED.txt | 7 +- ...INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ | 119 ++++++++++++++++++ 2 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt index 758a9b9..61eb9e9 100644 --- a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt @@ -90,6 +90,11 @@ First make sure ZED Camera is plugged into a POWERED USB 3.0 Slot $ ./ZED_SDK_Linux_x86_64_v1.1.0.run Press q to exit license agreement and accept all defaults +Next you need to get the proper ZED ROS Wrapper: +$ cd +$ cd workspace/ros/catkin/src +$ git clone https://github.com/chanbrown007/zed_ros_wrapper_OPT_update.git + Finally Time To Install OPT: $ cd $ cd open_ptrack/scripts @@ -107,7 +112,7 @@ $ roslaunch tracking detection_and_tracking_zed.launch In a 2nd terminal, tracking can be fine tuned using: $ rosrun rqt_reconfigure rqt_reconfigure -ALSO ZED Camera Settings Can Be Adjusted in open_ptrack/zed_ros_wrapper/launch/zed.launch: +ALSO ZED Camera Settings Can Be Adjusted in workspace/ros/catkin/src/zed_ros_wrapper_OPT_update/launch/zed.launch: Resolution Parameter: 0 = 2K, 1 = 1080HD, 2 = 720HD, 3 = VGA Quality Parameter (Disparity Map Quality): 0 = none, 1 = performance, 2 = medium, 3 = high Frame Rate: 2K has max frame rate of 15fps, 1080 - 30fps and 720 - 60fps diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ new file mode 100644 index 0000000..2147f4e --- /dev/null +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ @@ -0,0 +1,119 @@ +Built for Ubuntu 14.04 with Ros Indigo + +---------------------------------INSTALLING OPT WITH ZED-------------------------------------------------------- + +First Install Cuda 7.5 Toolkit: + +Using below link: +https://developer.nvidia.com/cuda-75-downloads-archive + +For Options Select: +Linux +x86_64 +Ubuntu +14.04 +deb (local) + +download cuda-repo-ubuntu1404-7-5-local package + +After downloaded: +In terminal go to folder containing cuda package +$ sudo dpkg -i cuda-repo-ubuntu1404-7-5-local_7.5-18_amd64.deb +$ sudo apt-get update +$ sudo apt-get install cuda-7-5 + +After install, reboot computer +Verify Nvidia installed: +$ nvidia-settings + +and make sure there is an nvidia driver number + +Next make sure enviroment path is set: +$ export CUDA_HOME=/usr/local/cuda-7.5 +$ export LD_LIBRARY_PATH=${CUDA_HOME}/lib64 + +$ PATH=${CUDA_HOME}/bin:${PATH} +$ export PATH + +Finally verify install: +$ cuda-install-samples-7.5.sh ~ +$ cd ~/NVIDIA_CUDA-7.5_Samples +$ cd 1_Utilities/deviceQuery +$ make +$ ./deviceQuery + +The terminal then should output the cuda version info + +Next it is necessary to install ROS/OpenCV 2.0 (included in ROS): +from directory where OPT is copied currently +$ cd open_ptrack/scripts +$ chmod +x *.sh +$ ./ros_install.sh +$ source /opt/ros/indigo/setup.bash +$ ./ros_configure.sh + +OpenCV 3.1 is required for ZED to work (however it cannot conflict with OpenCV 2!! Therefore we will create a separate install path): +$ cd +$ sudo apt-get update + +$ sudo apt-get install libopencv-dev build-essential checkinstall cmake pkg-config yasm libtiff4-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils + +$ sudo add-apt-repository ppa:joyard-nicolas/ffmpeg +$ sudo apt-get update +$ sudo apt-get install ffmpeg +$ sudo apt-get install frei0r-plugins + +$ sudo mkdir /usr/local/opencv3 + +$ mkdir OpenCV +$ cd OpenCV +$ git clone https://github.com/Itseez/opencv.git + +$ cd opencv +$ mkdir release +$ cd release +$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv3 -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D ENABLE_FAST_MATH=1 -D CUDA_FAST_MATH=1 -D WITH_CUBLAS=1 .. + +Note adjust the -j7 to the number of available cores on computer +$ make -j7 +$ sudo make install +This process WILL TAKE FOREVER... + +$ printf '# OpenCV\nPKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/opencv3/lib/pkgconfig\nexport PKG_CONFIG_PATH\n' >> ~/.bashrc +$ source ~/.bashrc + +Now Time to Install ZED Camera Drivers: +Download ZED v1.1.0 SDK from: https://www.stereolabs.com/developers/release/archives/ +It is very important v1.1.0 is downloaded and not any other version + +First make sure ZED Camera is plugged into a POWERED USB 3.0 Slot +$ ./ZED_SDK_Linux_x86_64_v1.1.0.run +Press q to exit license agreement and accept all defaults + +Next you need to get the proper ZED ROS Wrapper: +$ cd +$ cd workspace/ros/catkin/src +$ git clone https://github.com/chanbrown007/zed_ros_wrapper_OPT_update.git + +Finally Time To Install OPT: +$ cd +$ cd open_ptrack/scripts +$ ./openptrack_install.sh +Note sometimes you have to run ./openptrack_install.sh twice for it to fully install +If there is a persistent error that appears to be linked with OpenCV it sometimes is then necessary to install OpenCV 2 in /usr/local (following same procedure as before) and make sure to add package configure (/usr/local/lib/pkgconfig) to path + +It is suggested to delete openp_track folder in home and then: +$ ln -s ~/workspace/ros/catkin/src/open_ptrack ~/open_ptrack + +------------------RUNNING OPT WITH ZED----------------------------------------------------- +To Run make Sure ZED IS PLUGGED IN: +$ roslaunch tracking detection_and_tracking_zed.launch + +In a 2nd terminal, tracking can be fine tuned using: +$ rosrun rqt_reconfigure rqt_reconfigure + +ALSO ZED Camera Settings Can Be Adjusted in open_ptrack/zed_ros_wrapper/launch/zed.launch: +Resolution Parameter: 0 = 2K, 1 = 1080HD, 2 = 720HD, 3 = VGA +Quality Parameter (Disparity Map Quality): 0 = none, 1 = performance, 2 = medium, 3 = high +Frame Rate: 2K has max frame rate of 15fps, 1080 - 30fps and 720 - 60fps +--------------------------------------------------------------------------------------------- From 37a10b51bd5ca50f704bb58c6695238b2b865dfd Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Mon, 9 Jan 2017 10:27:39 -0800 Subject: [PATCH 26/28] Delete README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ --- ...INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ | 119 ------------------ 1 file changed, 119 deletions(-) delete mode 100644 README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ deleted file mode 100644 index 2147f4e..0000000 --- a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt~ +++ /dev/null @@ -1,119 +0,0 @@ -Built for Ubuntu 14.04 with Ros Indigo - ----------------------------------INSTALLING OPT WITH ZED-------------------------------------------------------- - -First Install Cuda 7.5 Toolkit: - -Using below link: -https://developer.nvidia.com/cuda-75-downloads-archive - -For Options Select: -Linux -x86_64 -Ubuntu -14.04 -deb (local) - -download cuda-repo-ubuntu1404-7-5-local package - -After downloaded: -In terminal go to folder containing cuda package -$ sudo dpkg -i cuda-repo-ubuntu1404-7-5-local_7.5-18_amd64.deb -$ sudo apt-get update -$ sudo apt-get install cuda-7-5 - -After install, reboot computer -Verify Nvidia installed: -$ nvidia-settings - -and make sure there is an nvidia driver number - -Next make sure enviroment path is set: -$ export CUDA_HOME=/usr/local/cuda-7.5 -$ export LD_LIBRARY_PATH=${CUDA_HOME}/lib64 - -$ PATH=${CUDA_HOME}/bin:${PATH} -$ export PATH - -Finally verify install: -$ cuda-install-samples-7.5.sh ~ -$ cd ~/NVIDIA_CUDA-7.5_Samples -$ cd 1_Utilities/deviceQuery -$ make -$ ./deviceQuery - -The terminal then should output the cuda version info - -Next it is necessary to install ROS/OpenCV 2.0 (included in ROS): -from directory where OPT is copied currently -$ cd open_ptrack/scripts -$ chmod +x *.sh -$ ./ros_install.sh -$ source /opt/ros/indigo/setup.bash -$ ./ros_configure.sh - -OpenCV 3.1 is required for ZED to work (however it cannot conflict with OpenCV 2!! Therefore we will create a separate install path): -$ cd -$ sudo apt-get update - -$ sudo apt-get install libopencv-dev build-essential checkinstall cmake pkg-config yasm libtiff4-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils - -$ sudo add-apt-repository ppa:joyard-nicolas/ffmpeg -$ sudo apt-get update -$ sudo apt-get install ffmpeg -$ sudo apt-get install frei0r-plugins - -$ sudo mkdir /usr/local/opencv3 - -$ mkdir OpenCV -$ cd OpenCV -$ git clone https://github.com/Itseez/opencv.git - -$ cd opencv -$ mkdir release -$ cd release -$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv3 -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D ENABLE_FAST_MATH=1 -D CUDA_FAST_MATH=1 -D WITH_CUBLAS=1 .. - -Note adjust the -j7 to the number of available cores on computer -$ make -j7 -$ sudo make install -This process WILL TAKE FOREVER... - -$ printf '# OpenCV\nPKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/opencv3/lib/pkgconfig\nexport PKG_CONFIG_PATH\n' >> ~/.bashrc -$ source ~/.bashrc - -Now Time to Install ZED Camera Drivers: -Download ZED v1.1.0 SDK from: https://www.stereolabs.com/developers/release/archives/ -It is very important v1.1.0 is downloaded and not any other version - -First make sure ZED Camera is plugged into a POWERED USB 3.0 Slot -$ ./ZED_SDK_Linux_x86_64_v1.1.0.run -Press q to exit license agreement and accept all defaults - -Next you need to get the proper ZED ROS Wrapper: -$ cd -$ cd workspace/ros/catkin/src -$ git clone https://github.com/chanbrown007/zed_ros_wrapper_OPT_update.git - -Finally Time To Install OPT: -$ cd -$ cd open_ptrack/scripts -$ ./openptrack_install.sh -Note sometimes you have to run ./openptrack_install.sh twice for it to fully install -If there is a persistent error that appears to be linked with OpenCV it sometimes is then necessary to install OpenCV 2 in /usr/local (following same procedure as before) and make sure to add package configure (/usr/local/lib/pkgconfig) to path - -It is suggested to delete openp_track folder in home and then: -$ ln -s ~/workspace/ros/catkin/src/open_ptrack ~/open_ptrack - -------------------RUNNING OPT WITH ZED----------------------------------------------------- -To Run make Sure ZED IS PLUGGED IN: -$ roslaunch tracking detection_and_tracking_zed.launch - -In a 2nd terminal, tracking can be fine tuned using: -$ rosrun rqt_reconfigure rqt_reconfigure - -ALSO ZED Camera Settings Can Be Adjusted in open_ptrack/zed_ros_wrapper/launch/zed.launch: -Resolution Parameter: 0 = 2K, 1 = 1080HD, 2 = 720HD, 3 = VGA -Quality Parameter (Disparity Map Quality): 0 = none, 1 = performance, 2 = medium, 3 = high -Frame Rate: 2K has max frame rate of 15fps, 1080 - 30fps and 720 - 60fps ---------------------------------------------------------------------------------------------- From ae41c972ed1f8384d0e3d1933ac935faf9faf5b7 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Wed, 11 Jan 2017 17:09:42 -0800 Subject: [PATCH 27/28] Updated OpenCV 3.1 Download --- README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt index 61eb9e9..14ea9df 100644 --- a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt @@ -67,7 +67,8 @@ $ sudo mkdir /usr/local/opencv3 $ mkdir OpenCV $ cd OpenCV -$ git clone https://github.com/Itseez/opencv.git +$ git clone https://github.com/opencv/opencv.git +$ git checkout tags/3.1.0 $ cd opencv $ mkdir release From 161a5fb43eb94d859fee45ff33229592adb7edb7 Mon Sep 17 00:00:00 2001 From: chanbrown007 Date: Wed, 11 Jan 2017 17:12:21 -0800 Subject: [PATCH 28/28] Fixed tag --- README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt index 14ea9df..bdd0fbb 100644 --- a/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt +++ b/README-ZED WITH OPT INSTALL ONLY FOLLOW THIS IF USING ZED.txt @@ -68,9 +68,8 @@ $ sudo mkdir /usr/local/opencv3 $ mkdir OpenCV $ cd OpenCV $ git clone https://github.com/opencv/opencv.git +$ cd opencv $ git checkout tags/3.1.0 - -$ cd opencv $ mkdir release $ cd release $ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv3 -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D ENABLE_FAST_MATH=1 -D CUDA_FAST_MATH=1 -D WITH_CUBLAS=1 ..