Hi there, in this snippet of code:
std::vector<scan_point_t> points;
tf::StampedTransform sensor_to_base;
try {
m_tf.lookupTransform(m_base_frame, scan->header.frame_id, ros::Time(0), sensor_to_base);
} catch(const std::exception& ex) {
ROS_WARN_STREAM("NeoLocalizationNode: lookupTransform(scan->header.frame_id, m_base_frame) failed: " << ex.what());
return points;
}
tf::StampedTransform base_to_odom;
try {
m_tf.waitForTransform(m_odom_frame, m_base_frame, scan->header.stamp, ros::Duration(m_transform_timeout));
m_tf.lookupTransform(m_odom_frame, m_base_frame, scan->header.stamp, base_to_odom);
} catch(const std::exception& ex) {
ROS_WARN_STREAM("NeoLocalizationNode: lookupTransform(m_base_frame, m_odom_frame) failed: " << ex.what());
return points;
}
const Matrix<double, 4, 4> S = convert_transform_3(sensor_to_base);
const Matrix<double, 4, 4> L = convert_transform_25(base_to_odom);
// precompute transformation matrix from sensor to requested base
const Matrix<double, 4, 4> T = odom_to_base * L * S;
Hi there, I supposed we are working on mobile robot with 2D sensor, then why we need to do transformation in 3D instead of change all z-axis value to 0 and work on 2D transformation? And can anyone give the mathematics base of this line:
// precompute transformation matrix from sensor to requested base
const Matrix<double, 4, 4> T = odom_to_base * L * S;
Thanks a lot
Hi there, in this snippet of code:
Hi there, I supposed we are working on mobile robot with 2D sensor, then why we need to do transformation in 3D instead of change all z-axis value to 0 and work on 2D transformation? And can anyone give the mathematics base of this line:
Thanks a lot