Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 17 additions & 13 deletions egomimic/hydra_configs/data/eva_bc_s3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,34 @@ train_datasets:
dataset1:
_target_: egomimic.rldb.utils.S3RLDBDataset
bucket_name: "rldb"
mode: train
embodiment: "eva_bimanual"
cache_root: "/coc/flash7/rpunamiya6/.cache"
filters:
task: "fold clothes"
debug_no_threadpool: True
temp_root: "/coc/flash7/scratch/egoverseS3Dataset"
mode: total
embodiment: "eva_right_arm"
filters:
task: "object in container"
episode_hash: "2025-11-27-03-39-50-378000"
local_files_only: True

valid_datasets:
dataset1:
_target_: egomimic.rldb.utils.S3RLDBDataset
bucket_name: "rldb"
mode: valid
embodiment: "eva_bimanual"
cache_root: "/coc/flash7/rpunamiya6/.cache"
filters:
task: "fold clothes"
debug_no_threadpool: True
temp_root: "/coc/flash7/scratch/egoverseS3Dataset"
mode: total
embodiment: "eva_right_arm"
filters:
task: "object in container"
episode_hash: "2025-11-27-03-39-50-378000"
local_files_only: True

train_dataloader_params:
dataset1:
batch_size: 32
batch_size: 2
num_workers: 10

valid_dataloader_params:
dataset1:
batch_size: 32
num_workers: 10
batch_size: 2
num_workers: 10
18 changes: 9 additions & 9 deletions egomimic/hydra_configs/model/pi0.5.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,23 @@ robomimic_model:
data_schematic: _${data.dataset.data_schematic}
camera_transforms:
_target_: egomimic.utils.egomimicUtils.CameraTransforms
intrinsics_key: "base" # change to base_half if using half res
intrinsics_key: "base" # change to base_half if using half res
extrinsics_key: "ariaJun7"
ac_keys:
aria_bimanual: "actions_cartesian"
domains: ["aria_bimanual"]
eva_right_arm: "actions_cartesian"
domains: ["eva_right_arm"]

action_converters:
rules:
ARIA_BIMANUAL:
_target_: egomimic.utils.action_utils.HumanBimanualCartesianEuler
eva_right_arm:
_target_: egomimic.utils.action_utils.RobotRightCartesianEuler
# optional fallback if no match is found
fallback:
_target_: egomimic.utils.action_utils.BaseActionConverter
config:

config:
pytorch_training_precision: bfloat16
pytorch_weight_path: /storage/home/hcoda1/5/rpunamiya6/cedar-dx/rpunamiya6/Projects/EgoVerse/egomimic/algo/pi_checkpoints/pi05_base_pytorch
pytorch_weight_path: /coc/cedarp-dxu345-0/acheluva3/EgoVerse/egomimic/algo/pi_checkpoints/pi05_base_pytorch
model:
pi05: true
action_dim: 32
Expand Down Expand Up @@ -53,4 +53,4 @@ scheduler:
_partial_: true
num_warmup_steps: 1000
num_training_steps: 100000
num_cycles: 0.5
num_cycles: 0.5
22 changes: 19 additions & 3 deletions egomimic/hydra_configs/train.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
defaults:
- model: hpt_bc_flow_eva
- model: pi0.5
- paths: default
<<<<<<< HEAD
- trainer: ddp
=======
- trainer: ddp_pi
>>>>>>> 0c62af2 (debug pi rollout offline)
- debug: null
- logger: wandb
- data: eva_bc_s3
Expand All @@ -16,7 +20,7 @@ train: true
eval: false

eval_class:
_target_ : egomimic.scripts.evaluation.Eve
_target_: egomimic.scripts.evaluation.Eve
mode: real
arm: both
eval_path: "./logs/eval/${name}_${now:%Y-%m-%d_%H-%M-%S}"
Expand All @@ -36,11 +40,16 @@ data_schematic: # Dynamically fill in these shapes from the dataset
_target_: egomimic.rldb.utils.DataSchematic
norm_mode: quantile
schematic_dict:
<<<<<<< HEAD
eva_bimanual:
front_img_1: #batch key
=======
eva_right_arm:
base_0_rgb: #batch key
>>>>>>> 2a51d0f (fix pi training run debug launch + dummy rollout with dataset added)
key_type: camera_keys # key type
lerobot_key: observations.images.front_img_1 # dataset key
right_wrist_img:
right_wrist_0_rgb:
key_type: camera_keys
lerobot_key: observations.images.right_wrist_img
left_wrist_img:
Expand Down Expand Up @@ -93,10 +102,17 @@ data_schematic: # Dynamically fill in these shapes from the dataset
embodiment:
key_type: metadata_keys
lerobot_key: metadata.embodiment
<<<<<<< HEAD
viz_img_key:
eva_bimanual:
front_img_1
aria_bimanual:
front_img_1
mecka_bimanual:
base_0_rgb
=======
viz_img_key:
eva_right_arm: base_0_rgb
aria_right_arm: front_img_1
mecka_bimanual: base_0_rgb
>>>>>>> 2a51d0f (fix pi training run debug launch + dummy rollout with dataset added)
29 changes: 28 additions & 1 deletion egomimic/rldb/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from multiprocessing.dummy import connection
from pathlib import Path
from unittest import result
from tqdm import tqdm


import boto3
Expand Down Expand Up @@ -580,7 +581,8 @@ def __init__(
f"Found {len(subdirs)} subfolders. Attempting to load valid RLDB datasets..."
)

for subdir in subdirs:
print("Creating subdatasets from folder...")
for subdir in tqdm(subdirs):
info_json = subdir / "meta" / "info.json"
if not info_json.exists():
logger.warning(f"Skipping {subdir.name}: missing meta/info.json")
Expand Down Expand Up @@ -679,6 +681,7 @@ def __init__(
filters={},
**kwargs,
):
logger.info("Instantiating S3RLDBDataset...")
temp_root += "/S3_rldb_data"
filters["robot_name"] = embodiment
filters["is_deleted"] = False
Expand Down Expand Up @@ -863,6 +866,30 @@ def _submit_arg(p: Path):
kwargs=kwargs,
)

# Debugging aid: optionally run dataset loading sequentially to make hangs reproducible
# and stack traces easier to interpret.
# Enable with: RLDB_LOAD_SEQUENTIAL=1
# for p in tqdm(all_paths, total=len(all_paths), desc="Loading RLDBDataset (sequential)"):
# repo_id, ds_obj, reason, err = cls._load_rldb_dataset_one(**_submit_arg(p))

# if ds_obj is not None:
# datasets[repo_id] = ds_obj
# continue

# if reason == "not_a_dir":
# continue

# skipped.append(repo_id)

# if reason == "not_in_filtered_paths":
# logger.warning(f"Skipping {repo_id}: not in filtered S3 paths")
# elif reason and reason.startswith("embodiment_mismatch"):
# logger.warning(f"Skipping {repo_id}: {reason}")
# else:
# logger.error(f"Failed to load {repo_id} as RLDBDataset:\n{err}")

# return datasets, skipped

with ThreadPoolExecutor(max_workers=max_workers) as ex:
futures = [
ex.submit(cls._load_rldb_dataset_one, **_submit_arg(p))
Expand Down
98 changes: 98 additions & 0 deletions egomimic/robot/eva/eva_ws/src/eva/robot_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import pytorch_kinematics as pk
from scipy.spatial.transform import Rotation as R
from abc import ABC, abstractmethod
from egomimic.rldb.utils import RLDBDataset
from stream_aria import AriaRecorder, update_iptables
from stream_d405 import RealSenseRecorder
from egomimic.robot.eva.eva_kinematics import EvaMinkKinematicsSolver
Expand Down Expand Up @@ -284,6 +285,103 @@ def set_home(self):
self.controller[arm].reset_to_home()



class dummyArxInterface(Robot_Interface):
def __init__(self, arms, dataset_path=None):
# Skip Robot_Interface config loading to keep this fully local/offline.
self.arms = arms
self.recorders = {}
self._joint_positions = {arm: np.zeros(7, dtype=np.float64) for arm in arms}
self._ee_pose = {arm: np.zeros(7, dtype=np.float64) for arm in arms}
self.dataset_path = dataset_path
self.dataset = None
if self.dataset_path is not None:
self.dataset = RLDBDataset(repo_id="test_dataset", root=self.dataset_path, local_files_only=True, episodes=[0], mode="train", embodiment="eva_bimanual")
self.iter = iter(self.dataset)

def _create_controllers(self, cfg):
return None

def set_joints(self, desired_position, arm):
if desired_position.shape != (7,):
raise ValueError(
"For Eva, desired position must be of shape (7,) for single arm"
)
self._joint_positions[arm] = desired_position.astype(np.float64)

def set_pose(self, pose, arm):
if pose.shape != (7,):
raise ValueError(
f"For Eva, target position must be of shape (7,), current shape: {pose.shape}"
)
self._ee_pose[arm] = pose.astype(np.float64)
joints = np.zeros(7, dtype=np.float64)
joints[6] = pose[6]
self._joint_positions[arm] = joints
return joints

def get_obs(self):
if self.dataset is not None:
data = next(self.iter) #TODO from dataschematic instead of hardcoding
front_image_key = "observations.images.front_img_1"
right_wrist_image_key = "observations.images.right_wrist_img"
front_ims = (data[front_image_key].permute(0, 2, 3, 1).cpu().numpy() * 255.0).astype(np.uint8)
right_wrist_ims = (data[right_wrist_image_key].permute(0, 2, 3, 1).cpu().numpy() * 255.0).astype(np.uint8)
ee_pose = data["observations.state.ee_pose"].cpu().numpy()
joint_positions = data["observations.state.joint_positions"].cpu().numpy()
obs = {
"front_img_1": front_ims,
"right_wrist_img": right_wrist_ims,
"ee_pose": ee_pose,
"joint_positions": joint_positions,
}
return obs
else:
obs = {}
joint_positions = np.zeros(14, dtype=np.float64)
ee_poses = np.zeros(14, dtype=np.float64)
for arm in self.arms:
arm_offset = 0
if arm == "right":
arm_offset = 7
joint_positions[arm_offset : arm_offset + 7] = self.get_joints(arm)
xyz, rot = self.get_pose(arm, se3=False)
ee_poses[arm_offset : arm_offset + 7] = np.concatenate(
[xyz, rot.as_euler("ZYX", degrees=False), [joint_positions[arm_offset + 6]]]
)
obs["joint_positions"] = joint_positions
obs["ee_poses"] = ee_poses
return obs

def solve_ik(self, ee_pose, arm):
if ee_pose.shape != (6,):
raise ValueError(
"For Eva, target position must be of shape (6,) for single arm"
)
return np.zeros(6, dtype=np.float64)

def get_joints(self, arm):
return self._joint_positions[arm].copy()

def get_pose(self, arm, se3=False):
pose = self._ee_pose[arm]
pos = pose[:3].copy()
rot = R.from_euler("ZYX", pose[3:6], degrees=False)
if se3:
T = np.eye(4, dtype=np.float64)
T[:3, :3] = rot.as_matrix()
T[:3, 3] = pos
return T
return pos, rot

def set_home(self):
for arm in self.arms:
self._joint_positions[arm] = np.zeros(7, dtype=np.float64)
self._ee_pose[arm] = np.zeros(7, dtype=np.float64)




if __name__ == "__main__":
# Run Eva example
# Note: Update the URDF path before running
Expand Down
10 changes: 8 additions & 2 deletions egomimic/robot/rollout.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,8 +336,7 @@ def reset(self):
if getattr(self.policy.model, "diffusion", False):
for head in self.policy.model.nets.policy.heads:
if isinstance(self.policy.model.nets.policy.heads[head], DenoisingPolicy):
self.policy.model.nets.policy.heads[head].num_inference_steps = 10

self.policy.model.nets.policy.heads[head].num_inference_steps = 10

def reset_rollout(ri, policy):
print("Resetting rollout: going home + clearing policy state")
Expand Down Expand Up @@ -541,6 +540,13 @@ def main(
help="enable debug visualization of actions on images",
)

parser.add_argument(
"--offline-debug",
type=str,
default=None,
help="path to processed lerobot dataset for offline debugging",
)

args = parser.parse_args()
episodes = args.episodes if args.episodes is not None else [0]

Expand Down
45 changes: 0 additions & 45 deletions egomimic/scripts/evaluation/eval.py

This file was deleted.

Loading