Skip to content

Commit 732eaf4

Browse files
committed
fix get_world_pose() method for sensor
1 parent 454d69c commit 732eaf4

1 file changed

Lines changed: 37 additions & 46 deletions

File tree

src/physics_simulator/sensor/rgb_camera.py

Lines changed: 37 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -409,42 +409,39 @@ def get_world_pose(self, camera_axes="world"):
409409
"""Get the camera's pose in world coordinates.
410410
411411
Args:
412-
camera_axes: Coordinate system for the returned pose ("world" or "camera")
412+
camera_axes: Coordinate system for the returned pose ("world" or "ros")
413413
414414
Returns:
415-
tuple: (position, orientation) where position is a 3D vector and
415+
np.ndarray: Camera pose as [position, orientation] where position is a 3D vector and
416416
orientation is a quaternion in [w, x, y, z] format
417417
"""
418-
from physics_simulator.utils.camera_utils import get_camera_transform_matrix
418+
from physics_simulator.utils.camera_utils import get_camera_extrinsic_matrix
419419
from scipy.spatial.transform import Rotation as R
420420

421421
with self.simulator.lock:
422-
world_to_camera_matrix = get_camera_transform_matrix(
422+
# Get camera extrinsic matrix (camera pose in world frame)
423+
camera_extrinsic = get_camera_extrinsic_matrix(
423424
sim=self.simulator,
424-
camera_name=self.name,
425-
camera_height=self.height,
426-
camera_width=self.width,
425+
camera_name=self.name
427426
)
428-
camera_to_world_matrix = np.linalg.inv(world_to_camera_matrix)
429-
# Extract position from the last column of the matrix
430-
position = camera_to_world_matrix[:3, 3]
431-
# Extract rotation matrix and convert to quaternion
432-
rotation_matrix = camera_to_world_matrix[:3, :3]
427+
428+
# Extract position and rotation from the extrinsic matrix
429+
position = camera_extrinsic[:3, 3]
430+
rotation_matrix = camera_extrinsic[:3, :3]
433431
quaternion = R.from_matrix(rotation_matrix).as_quat()
434432

435-
inverse_transform = {
436-
"world": MUJOCO_TO_WORLD_TRANSFORM,
437-
"ros": MUJOCO_TO_ROS_TRANSFORM
438-
}.get(camera_axes, np.eye(4))
439-
440-
# Transform position
441-
homog_pos = np.append(position, 1)
442-
position = (inverse_transform @ homog_pos)[:3]
443-
444-
# Transform rotation
445-
rotation_matrix = R.from_quat(quaternion).as_matrix()
446-
transformed_rot = rotation_matrix @ inverse_transform[:3, :3].T
447-
quaternion = R.from_matrix(transformed_rot).as_quat()
433+
# Apply coordinate system transformation if needed
434+
if camera_axes == "ros":
435+
# Transform from MuJoCo camera frame to ROS camera frame
436+
ros_transform = ROS_TO_MUJOCO_TRANSFORM
437+
438+
# Transform position
439+
homog_pos = np.append(position, 1)
440+
position = (ros_transform @ homog_pos)[:3]
441+
442+
# Transform rotation
443+
transformed_rot = rotation_matrix @ ros_transform[:3, :3].T
444+
quaternion = R.from_matrix(transformed_rot).as_quat()
448445

449446
return np.concatenate([position, quaternion])
450447

@@ -455,47 +452,41 @@ def get_pose_wrt_robot(self, robot):
455452
robot: The robot to calculate the relative pose to
456453
457454
Returns:
458-
tuple: (position, orientation) relative to the robot's frame
455+
np.ndarray: Camera pose relative to robot as [position, orientation] where
456+
position is a 3D vector and orientation is a quaternion in [w, x, y, z] format
459457
"""
458+
from scipy.spatial.transform import Rotation as R
459+
460460
with self.simulator.lock:
461461
camera_pose_wrt_world = self.get_world_pose(camera_axes="world")
462-
463462
robot_position, robot_orientation = robot.get_world_pose()
464-
robot_pose_wrt_world = position_and_orientation_to_pose(
465-
robot_position, robot_orientation
466-
)
467-
# Extract camera's position and orientation (as a quaternion)
463+
464+
# Extract camera's position and orientation
468465
camera_position = camera_pose_wrt_world[:3]
469466
camera_orientation = camera_pose_wrt_world[3:]
470467

471468
# Create transformation matrices
472469
robot_rotation_matrix = R.from_quat(robot_orientation).as_matrix()
473470
camera_rotation_matrix = R.from_quat(camera_orientation).as_matrix()
474471

475-
# Create the transformation matrix for the robot in world frame
472+
# Create transformation matrices
476473
robot_transform = np.eye(4)
477474
robot_transform[:3, :3] = robot_rotation_matrix
478475
robot_transform[:3, 3] = robot_position
479476

480-
# Create the transformation matrix for the camera in world frame
481477
camera_transform = np.eye(4)
482478
camera_transform[:3, :3] = camera_rotation_matrix
483479
camera_transform[:3, 3] = camera_position
484480

485-
# Calculate the inverse of the robot transform to get world-to-robot transform
481+
# Calculate world-to-robot transform
486482
world_to_robot_transform = np.linalg.inv(robot_transform)
487483

488-
# Calculate the camera pose relative to the robot
489-
camera_pose2robot_transform = np.dot(world_to_robot_transform, camera_transform)
484+
# Calculate camera pose relative to robot
485+
camera_to_robot_transform = world_to_robot_transform @ camera_transform
490486

491-
# Extract the position and rotation from the resulting transformation matrix
492-
camera_position2robot = camera_pose2robot_transform[:3, 3]
493-
camera_rotation2robot = R.from_matrix(
494-
camera_pose2robot_transform[:3, :3]
495-
).as_quat()
487+
# Extract position and orientation
488+
relative_position = camera_to_robot_transform[:3, 3]
489+
relative_rotation_matrix = camera_to_robot_transform[:3, :3]
490+
relative_orientation = R.from_matrix(relative_rotation_matrix).as_quat()
496491

497-
# Combine position and quaternion into a single array
498-
camera_pose2robot = np.concatenate(
499-
[camera_position2robot, camera_rotation2robot]
500-
)
501-
return camera_pose2robot
492+
return np.concatenate([relative_position, relative_orientation])

0 commit comments

Comments
 (0)