diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..a6cf4d9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,28 @@
+BSD 3-Clause License
+
+Copyright (c) 2024, Zhengyi Luo
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..8d4ce76
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,5 @@
+include LICENSE
+include README.md
+recursive-include smpl_sim *.py
+recursive-include smpl_sim/utils *.py
+recursive-include smpl_sim/data *.yaml *.urdf *.xml
\ No newline at end of file
diff --git a/MUJOCO_LOG.TXT b/MUJOCO_LOG.TXT
index 051c558..217ed9d 100644
--- a/MUJOCO_LOG.TXT
+++ b/MUJOCO_LOG.TXT
@@ -1,3 +1,9 @@
Sun Jan 28 12:48:00 2024
ERROR: could not initialize GLFW
+Mon Feb 19 20:01:16 2024
+WARNING: mju_openResource: unknown file 'tmp/smpl_humanoid.xml'
+
+Sat Mar 2 18:16:01 2024
+ERROR: could not initialize GLFW
+
diff --git a/README.md b/README.md
index 529ec0f..911d737 100644
--- a/README.md
+++ b/README.md
@@ -1,29 +1,58 @@
[Repo still under construction]
-# Porting of UHC and PHC to MUJOCO>=3
+# SMPLSim: Simulating SMPL/SMPLX Humanoids in MUJOCO and Isaac Gym
`smpl_sim` is a pip-installable library containing a modelization of the SMPL humanoid in different simulators (MUJOCO and Isaac Gym). It is a minimal library to support simple humanoid tasks, and is the basis library for doing more complicated tasks such as motion imitation.
+
+

+
+
+
+
+## Core Features
+This repo supports creating the xml files for SMPL/SMPLH/SMPLX compatible humanoid models in MUJOCO>=3. It also supports creating the humanoid models in Isaac Gym. It also provides a simple PPO implementation for training the humanoid models in MUJOCO.
+
### Commands:
```
python examples/env_humanoid_test.py headless=False
-python smpl_sim/run.py env=speed exp_name=speed env.self_obs_v=2
-python smpl_sim/run.py env=getup exp_name=speed env.self_obs_v=2
-python smpl_sim/run.py env=reach exp_name=speed env.self_obs_v=2
+python smpl_sim/run.py env=speed exp_name=speed env.self_obs_v=2 robot.create_vel_sensors=True
+python smpl_sim/run.py env=getup exp_name=speed env.self_obs_v=2 robot.create_vel_sensors=True
+python smpl_sim/run.py env=reach exp_name=speed env.self_obs_v=2 robot.create_vel_sensors=True
```
## Citation
If you find this work useful for your research, please cite our paper:
```
+PULSE:
+@inproceedings{
+luo2024universal,
+title={Universal Humanoid Motion Representations for Physics-Based Control},
+author={Zhengyi Luo and Jinkun Cao and Josh Merel and Alexander Winkler and Jing Huang and Kris M. Kitani and Weipeng Xu},
+booktitle={The Twelfth International Conference on Learning Representations},
+year={2024},
+url={https://openreview.net/forum?id=OrOd8PxOO2}
+}
+
+PHC:
@inproceedings{Luo2023PerpetualHC,
author={Zhengyi Luo and Jinkun Cao and Alexander W. Winkler and Kris Kitani and Weipeng Xu},
title={Perpetual Humanoid Control for Real-time Simulated Avatars},
booktitle={International Conference on Computer Vision (ICCV)},
year={2023}
-}
+}
+
+UHC:
+@inproceedings{Luo2021DynamicsRegulatedKP,
+ title={Dynamics-Regulated Kinematic Policy for Egocentric Pose Estimation},
+ author={Zhengyi Luo and Ryo Hachiuma and Ye Yuan and Kris Kitani},
+ booktitle={Advances in Neural Information Processing Systems},
+ year={2021}
+}
```
+
Also consider citing these prior works that are used in this project:
```
@@ -42,9 +71,4 @@ Also consider citing these prior works that are used in this project:
year={2023}
}
-@inproceedings{Luo2021DynamicsRegulatedKP,
- title={Dynamics-Regulated Kinematic Policy for Egocentric Pose Estimation},
- author={Zhengyi Luo and Ryo Hachiuma and Ye Yuan and Kris Kitani},
- booktitle={Advances in Neural Information Processing Systems},
- year={2021}
-}
\ No newline at end of file
+```
diff --git a/assets/smplsim_teaser.gif b/assets/smplsim_teaser.gif
new file mode 100644
index 0000000..468db58
Binary files /dev/null and b/assets/smplsim_teaser.gif differ
diff --git a/examples/motion_lib_test.py b/examples/motion_lib_test.py
index c7d9040..8d7050b 100644
--- a/examples/motion_lib_test.py
+++ b/examples/motion_lib_test.py
@@ -35,9 +35,8 @@
def main(cfg : DictConfig) -> None:
# motions = joblib.load("sample_data/amass_isaac_standing_upright_slim.pkl")
device = torch.device("cpu")
- # motion_file = "/hdd/zen/data/ActBound/AMASS/amass_copycat_take6_test.pkl"
- # motion_file = "data/amass/singles/0-KIT_3_walking_slow08_poses.pkl"
- motion_file = "data/amass/singles/0-SSM_synced_20160930_50032_punch_kick_sync_poses.pkl"
+ motion_file = "data/amass/singles/0-KIT_3_walking_slow08_poses.pkl"
+ # motion_file = "data/amass/singles/0-SSM_synced_20160930_50032_punch_kick_sync_poses.pkl"
motion_lib_cfg = EasyDict({
"motion_file": motion_file,
"device": device,
@@ -50,7 +49,8 @@ def main(cfg : DictConfig) -> None:
"randomrize_heading": True,
})
motion_lib = MotionLibSMPL(motion_lib_cfg)
- motion_lib.load_motions(shape_params = [np.zeros(17)] )
+ shape_params = np.zeros(17)
+ motion_lib.load_motions(m_cfg = motion_lib_cfg, shape_params = [shape_params] )
env = HumanoidEnv(cfg)
env.reset()
diff --git a/pyproject.toml b/pyproject.toml
index a68cccb..7d13455 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,6 +34,7 @@ dependencies = [
'gymnasium>=0.29.1',
'hydra-core>=1.3',
'tqdm',
+ 'easydict',
"importlib-resources==3.0.0;python_version<'3.9'"
]
diff --git a/smpl_sim/agents/agent_humanoid.py b/smpl_sim/agents/agent_humanoid.py
index bcb7dd9..3e35021 100644
--- a/smpl_sim/agents/agent_humanoid.py
+++ b/smpl_sim/agents/agent_humanoid.py
@@ -180,22 +180,23 @@ def log_train(self, info):
if not self.cfg.no_log:
- wandb.log(
- data={
+ log_data = {
"avg_episode_reward": loggers.avg_episode_reward,
"eps_len": loggers.avg_episode_len,
"avg_rwd": loggers.avg_reward,
"reward_raw": loggers.info_dict,
- },
- step=self.epoch,
- )
-
+ }
+
if "log_eval" in info:
- wandb.log(data=info["log_eval"], step=self.epoch)
+ log_data.update(info["log_eval"])
+
+ wandb.log(data=log_data, step=self.epoch)
+
def optimize_policy(self,save_model=True):
starting_epoch = self.epoch
for _ in range(starting_epoch, self.cfg.learning.max_epoch):
+ info = {}
t0 = time.time()
self.pre_epoch()
batch, loggers = self.sample(self.cfg.learning.min_batch_size)
@@ -213,13 +214,12 @@ def optimize_policy(self,save_model=True):
elif save_model and (self.epoch) % self.cfg.learning.save_curr_frequency == 0:
self.save_curr()
t2 = time.time()
-
- info = {
+ info.update({
"loggers": loggers,
"T_sample": t1 - t0,
"T_update": t2 - t1,
"T_total": t2 - t0,
- }
+ })
self.log_train(info)
self.after_epoch()
diff --git a/smpl_sim/envs/humanoid_env.py b/smpl_sim/envs/humanoid_env.py
index b3948d6..6b104d4 100644
--- a/smpl_sim/envs/humanoid_env.py
+++ b/smpl_sim/envs/humanoid_env.py
@@ -12,6 +12,7 @@
from scipy.spatial.transform import Rotation as sRot
from enum import Enum
from collections import defaultdict
+import torch
from smpl_sim.envs.base_env import BaseEnv
import smpl_sim.envs.controllers as ctrls
diff --git a/smpl_sim/smpllib/motion_lib_base.py b/smpl_sim/smpllib/motion_lib_base.py
index 4af56e2..a901eff 100644
--- a/smpl_sim/smpllib/motion_lib_base.py
+++ b/smpl_sim/smpllib/motion_lib_base.py
@@ -262,7 +262,7 @@ def update_soft_sampling_weight(self, failed_keys):
def update_sampling_prob(self, termination_history):
print("------------------------------------------------------ Restoring Termination History ------------------------------------------------------")
- if len(termination_history) == len(self._termination_history):
+ if len(self._sampling_prob) == len(self._termination_history):
self._sampling_prob[:] = termination_history/termination_history.sum()
self._termination_history = termination_history
print("Successfully restored termination history")
diff --git a/smpl_sim/smpllib/motion_lib_smpl.py b/smpl_sim/smpllib/motion_lib_smpl.py
index 0430ce0..1663024 100644
--- a/smpl_sim/smpllib/motion_lib_smpl.py
+++ b/smpl_sim/smpllib/motion_lib_smpl.py
@@ -113,7 +113,7 @@ def load_motion_with_skeleton(ids, motion_data_list, shape_params, mesh_parsers,
start = random.randint(0, seq_len - max_len)
end = start + max_len
- trans = to_torch(curr_file['trans']).float().clone()[start:end]
+ trans = to_torch(curr_file['trans'] if "trans" in curr_file else curr_file['trans_orig']).float().clone()[start:end]
pose_aa = to_torch(curr_file['pose_aa'][start:end]).float().clone()
if pose_aa.shape[1] == 156:
diff --git a/smpl_sim/smpllib/np_smpl_humanoid_batch.py b/smpl_sim/smpllib/np_smpl_humanoid_batch.py
index 7ab2621..9c15006 100644
--- a/smpl_sim/smpllib/np_smpl_humanoid_batch.py
+++ b/smpl_sim/smpllib/np_smpl_humanoid_batch.py
@@ -5,20 +5,20 @@
import sys
import pdb
import os.path as osp
-from copycat.utils.torch_ext import dict_to_torch
+from smpl_sim.utils.torch_ext import dict_to_torch
sys.path.append(os.getcwd())
-from copycat.utils.torch_utils import *
-from copycat.utils.transform_utils import *
+from smpl_sim.utils.torch_utils import *
+from smpl_sim.utils.np_transform_utils import *
from scipy.spatial.transform import Rotation as sRot
import joblib
from mujoco_py import load_model_from_path
-from copycat.smpllib.smpl_mujoco import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES
-from copycat.smpllib.smpl_parser import SMPL_EE_NAMES
-from copycat.utils.tools import get_expert, get_expert_master
+from smpl_sim.smpllib.smpl_joint_names import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES
+from smpl_sim.smpllib.smpl_parser import SMPL_EE_NAMES
+from smpl_sim.utils.tools import get_expert, get_expert_master
import pytorch3d.transforms as tR
-from copycat.smpllib.smpl_parser import (
+from smpl_sim.smpllib.smpl_parser import (
SMPL_Parser,
SMPLH_Parser,
SMPLX_Parser,
@@ -406,12 +406,12 @@ def forward_kinematics_batch(self, rotations, root_rotations,
if __name__ == "__main__":
import mujoco_py
- from copycat.smpllib.smpl_robot import Robot
- from copycat.smpllib.torch_smpl_humanoid import Humanoid
- from copycat.utils.config_utils.copycat_config import Config
- from copycat.data_loaders.dataset_amass_single import DatasetAMASSSingle
- from copycat.utils.torch_ext import dict_to_torch
- from copycat.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl
+ from smpl_sim.smpllib.smpl_robot import Robot
+ from smpl_sim.smpllib.torch_smpl_humanoid import Humanoid
+ from smpl_sim.utils.config_utils.copycat_config import Config
+ from smpl_sim.data_loaders.dataset_amass_single import DatasetAMASSSingle
+ from smpl_sim.utils.torch_ext import dict_to_torch
+ from smpl_sim.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl
torch.manual_seed(0)
cfg = Config(
diff --git a/smpl_sim/smpllib/skeleton_local.py b/smpl_sim/smpllib/skeleton_local.py
index 9706d7a..a207b98 100644
--- a/smpl_sim/smpllib/skeleton_local.py
+++ b/smpl_sim/smpllib/skeleton_local.py
@@ -105,31 +105,62 @@
# }
# PHC's gains
-# GAINS = {
-# "L_Hip": [800, 80, 1, 500],
-# "L_Knee": [800, 80, 1, 500],
-# "L_Ankle": [800, 80, 1, 500],
-# "L_Toe": [500, 50, 1, 500],
-# "R_Hip": [800, 80, 1, 500],
-# "R_Knee": [800, 80, 1, 500],
-# "R_Ankle": [800, 80, 1, 500],
-# "R_Toe": [500, 50, 1, 500],
-# "Torso": [1000, 100, 1, 500],
-# "Spine": [1000, 100, 1, 500],
-# "Chest": [1000, 100, 1, 500],
-# "Neck": [500, 50, 1, 250],
-# "Head": [500, 50, 1, 250],
-# "L_Thorax": [500, 50, 1, 500],
-# "L_Shoulder": [500, 50, 1, 500],
-# "L_Elbow": [500, 50, 1, 150],
-# "L_Wrist": [300, 30, 1, 150],
-# "L_Hand": [300, 30, 1, 150],
-# "R_Thorax": [500, 50, 1, 150],
-# "R_Shoulder": [500, 50, 1, 250],
-# "R_Elbow": [500, 50, 1, 150],
-# "R_Wrist": [300, 30, 1, 150],
-# "R_Hand": [300, 30, 1, 150],
-# }
+GAINS_PHC = {
+ "L_Hip": [800, 80, 1, 500],
+ "L_Knee": [800, 80, 1, 500],
+ "L_Ankle": [800, 80, 1, 500],
+ "L_Toe": [500, 50, 1, 500],
+ "R_Hip": [800, 80, 1, 500],
+ "R_Knee": [800, 80, 1, 500],
+ "R_Ankle": [800, 80, 1, 500],
+ "R_Toe": [500, 50, 1, 500],
+ "Torso": [1000, 100, 1, 500],
+ "Spine": [1000, 100, 1, 500],
+ "Chest": [1000, 100, 1, 500],
+ "Neck": [500, 50, 1, 250],
+ "Head": [500, 50, 1, 250],
+ "L_Thorax": [500, 50, 1, 500],
+ "L_Shoulder": [500, 50, 1, 500],
+ "L_Elbow": [500, 50, 1, 150],
+ "L_Wrist": [300, 30, 1, 150],
+ "L_Hand": [300, 30, 1, 150],
+ "R_Thorax": [500, 50, 1, 150],
+ "R_Shoulder": [500, 50, 1, 250],
+ "R_Elbow": [500, 50, 1, 150],
+ "R_Wrist": [300, 30, 1, 150],
+ "R_Hand": [300, 30, 1, 150],
+
+ "L_Index1": [100, 10, 1, 150],
+ "L_Index2": [100, 10, 1, 150],
+ "L_Index3": [100, 10, 1, 150],
+ "L_Middle1": [100, 10, 1, 150],
+ "L_Middle2": [100, 10, 1, 150],
+ "L_Middle3": [100, 10, 1, 150],
+ "L_Pinky1": [100, 10, 1, 150],
+ "L_Pinky2": [100, 10, 1, 150],
+ "L_Pinky3": [100, 10, 1, 150],
+ "L_Ring1": [100, 10, 1, 150],
+ "L_Ring2": [100, 10, 1, 150],
+ "L_Ring3": [100, 10, 1, 150],
+ "L_Thumb1": [100, 10, 1, 150],
+ "L_Thumb2": [100, 10, 1, 150],
+ "L_Thumb3": [100, 10, 1, 150],
+ "R_Index1": [100, 10, 1, 150],
+ "R_Index2": [100, 10, 1, 150],
+ "R_Index3": [100, 10, 1, 150],
+ "R_Middle1": [100, 10, 1, 150],
+ "R_Middle2": [100, 10, 1, 150],
+ "R_Middle3": [100, 10, 1, 150],
+ "R_Pinky1": [100, 10, 1, 150],
+ "R_Pinky2": [100, 10, 1, 150],
+ "R_Pinky3": [100, 10, 1, 150],
+ "R_Ring1": [100, 10, 1, 150],
+ "R_Ring2": [100, 10, 1, 150],
+ "R_Ring3": [100, 10, 1, 150],
+ "R_Thumb1": [100, 10, 1, 150],
+ "R_Thumb2": [100, 10, 1, 150],
+ "R_Thumb3": [100, 10, 1, 150],
+}
### UHC Phd
# GAINS = {
@@ -158,7 +189,7 @@
# "R_Hand": [100, 10, 1, 150, 1, 1],
# }
-GAINS = {
+GAINS_MJ = {
"L_Hip": [250, 2.5, 1, 500, 10, 2],
"L_Knee": [250, 2.5, 1, 500, 10, 2],
"L_Ankle": [150, 2.5, 1, 500, 10, 2],
@@ -276,6 +307,7 @@ def load_from_offsets(
real_weight=False,
big_ankle=False,
box_body = False,
+ sim='mujoco',
ball_joints = False,
create_vel_sensors = False,
exclude_contacts = []
@@ -297,6 +329,7 @@ def load_from_offsets(
self.freeze_hand = freeze_hand
self.box_body = box_body
self.ball_joints = ball_joints
+ self.sim = sim
joint_names = list(filter(lambda x: all([t not in x for t in exclude_bones]), offsets.keys()))
dof_ind = {"x": 0, "y": 1, "z": 2}
self.len_scale = scale
@@ -369,7 +402,10 @@ def construct_tree(self,
attr = dict()
attr["name"] = name
attr["joint"] = name
- attr["gear"] = str(GAINS[name[:-2]][2])
+ if self.sim in ["mujoco"]:
+ attr["gear"] = str(GAINS_MJ[name[:-2]][2])
+ elif self.sim in ["isaacgym"]:
+ attr["gear"] = "500"
SubElement(actuators, "motor", attr)
@@ -440,7 +476,7 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
j_attr["name"] = bone.name
j_attr["type"] = "ball"
j_attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*(bone.pos + offset))
- j_attr["user"] = " ".join([ str(s) for s in GAINS[bone.name]]) # using user to set the max torque
+ j_attr["user"] = " ".join([ str(s) for s in GAINS_MJ[bone.name]]) # using user to set the max torque
if j_attr["name"] in ref_angles.keys():
j_attr["ref"] = f"{ref_angles[j_attr['name']]:.1f}"
@@ -456,16 +492,14 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
j_attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*(bone.pos + offset))
j_attr["axis"] = "{0:.4f} {1:.4f} {2:.4f}".format(*axis)
- ################################################################################
- # j_attr["stiffness"] = str(GAINS[bone.name][-2])
- # j_attr["damping"] = str(GAINS[bone.name][-1])
- ################################################################################
-
- # j_attr["stiffness"] = str(1)
- # j_attr["damping"] = str(1)
-
- j_attr["user"] = " ".join([ str(s) for s in GAINS[bone.name]]) # using user to set the max torque
- j_attr["armature"] = "0.01"
+ if self.sim in ["mujoco"]:
+ j_attr["user"] = " ".join([ str(s) for s in GAINS_MJ[bone.name]]) # using user to set the max torque
+ j_attr["armature"] = "0.01"
+ elif self.sim in ["isaacgym"]:
+ j_attr["stiffness"] = str(GAINS_PHC[bone.name][0])
+ j_attr["damping"] = str(GAINS_PHC[bone.name][1])
+ j_attr["armature"] = "0.02"
+
if i < len(bone.lb):
j_attr["range"] = "{0:.4f} {1:.4f}".format(bone.lb[i], bone.ub[i])
@@ -601,9 +635,6 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
rot = np.array([1, 0, 0, 0])
g_attr["type"] = "box"
- g_attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*pos)
- g_attr["size"] = "{0:.4f} {1:.4f} {2:.4f}".format(*size)
- g_attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(*rot)
if bone.name == "Pelvis":
size /= 1.75 # ZL Hack: shrinkage
@@ -628,7 +659,7 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
if self.real_weight_porpotion_boxes:
g_attr["density"] = str((hull_params['volume'] / (size[0] * size[1] * size[2] * 8).item()) * base_density)
-
+
g_attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*pos)
g_attr["size"] = "{0:.4f} {1:.4f} {2:.4f}".format(*size)
g_attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(*rot)
diff --git a/smpl_sim/smpllib/skeleton_mesh_local.py b/smpl_sim/smpllib/skeleton_mesh_local.py
index ce46ea1..188eccd 100644
--- a/smpl_sim/smpllib/skeleton_mesh_local.py
+++ b/smpl_sim/smpllib/skeleton_mesh_local.py
@@ -11,30 +11,118 @@
except ImportError:
from importlib.resources import files
-GAINS = {
- "L_Hip": [500, 50, 1, 500],
- "L_Knee": [500, 50, 1, 500],
- "L_Ankle": [400, 40, 1, 500],
- "L_Toe": [200, 20, 1, 500],
- "R_Hip": [500, 50, 1, 500],
- "R_Knee": [500, 50, 1, 500],
- "R_Ankle": [400, 40, 1, 500],
- "R_Toe": [200, 20, 1, 500],
+GAINS_PHC = {
+ "L_Hip": [800, 80, 1, 500],
+ "L_Knee": [800, 80, 1, 500],
+ "L_Ankle": [800, 80, 1, 500],
+ "L_Toe": [500, 50, 1, 500],
+ "R_Hip": [800, 80, 1, 500],
+ "R_Knee": [800, 80, 1, 500],
+ "R_Ankle": [800, 80, 1, 500],
+ "R_Toe": [500, 50, 1, 500],
"Torso": [1000, 100, 1, 500],
"Spine": [1000, 100, 1, 500],
"Chest": [1000, 100, 1, 500],
- "Neck": [100, 10, 1, 250],
- "Head": [100, 10, 1, 250],
- "L_Thorax": [400, 40, 1, 500],
- "L_Shoulder": [400, 40, 1, 500],
- "L_Elbow": [300, 30, 1, 150],
- "L_Wrist": [100, 10, 1, 150],
- "L_Hand": [100, 10, 1, 150],
- "R_Thorax": [400, 40, 1, 150],
- "R_Shoulder": [400, 40, 1, 250],
- "R_Elbow": [300, 30, 1, 150],
- "R_Wrist": [100, 10, 1, 150],
- "R_Hand": [100, 10, 1, 150],
+ "Neck": [500, 50, 1, 250],
+ "Head": [500, 50, 1, 250],
+ "L_Thorax": [500, 50, 1, 500],
+ "L_Shoulder": [500, 50, 1, 500],
+ "L_Elbow": [500, 50, 1, 150],
+ "L_Wrist": [300, 30, 1, 150],
+ "L_Hand": [300, 30, 1, 150],
+ "R_Thorax": [500, 50, 1, 150],
+ "R_Shoulder": [500, 50, 1, 250],
+ "R_Elbow": [500, 50, 1, 150],
+ "R_Wrist": [300, 30, 1, 150],
+ "R_Hand": [300, 30, 1, 150],
+
+ "L_Index1": [100, 10, 1, 150],
+ "L_Index2": [100, 10, 1, 150],
+ "L_Index3": [100, 10, 1, 150],
+ "L_Middle1": [100, 10, 1, 150],
+ "L_Middle2": [100, 10, 1, 150],
+ "L_Middle3": [100, 10, 1, 150],
+ "L_Pinky1": [100, 10, 1, 150],
+ "L_Pinky2": [100, 10, 1, 150],
+ "L_Pinky3": [100, 10, 1, 150],
+ "L_Ring1": [100, 10, 1, 150],
+ "L_Ring2": [100, 10, 1, 150],
+ "L_Ring3": [100, 10, 1, 150],
+ "L_Thumb1": [100, 10, 1, 150],
+ "L_Thumb2": [100, 10, 1, 150],
+ "L_Thumb3": [100, 10, 1, 150],
+ "R_Index1": [100, 10, 1, 150],
+ "R_Index2": [100, 10, 1, 150],
+ "R_Index3": [100, 10, 1, 150],
+ "R_Middle1": [100, 10, 1, 150],
+ "R_Middle2": [100, 10, 1, 150],
+ "R_Middle3": [100, 10, 1, 150],
+ "R_Pinky1": [100, 10, 1, 150],
+ "R_Pinky2": [100, 10, 1, 150],
+ "R_Pinky3": [100, 10, 1, 150],
+ "R_Ring1": [100, 10, 1, 150],
+ "R_Ring2": [100, 10, 1, 150],
+ "R_Ring3": [100, 10, 1, 150],
+ "R_Thumb1": [100, 10, 1, 150],
+ "R_Thumb2": [100, 10, 1, 150],
+ "R_Thumb3": [100, 10, 1, 150],
+}
+
+GAINS_MJ = {
+ "L_Hip": [800, 80, 1, 500],
+ "L_Knee": [800, 80, 1, 500],
+ "L_Ankle": [800, 80, 1, 500],
+ "L_Toe": [500, 50, 1, 500],
+ "R_Hip": [800, 80, 1, 500],
+ "R_Knee": [800, 80, 1, 500],
+ "R_Ankle": [800, 80, 1, 500],
+ "R_Toe": [500, 50, 1, 500],
+ "Torso": [1000, 100, 1, 500],
+ "Spine": [1000, 100, 1, 500],
+ "Chest": [1000, 100, 1, 500],
+ "Neck": [500, 50, 1, 250],
+ "Head": [500, 50, 1, 250],
+ "L_Thorax": [500, 50, 1, 500],
+ "L_Shoulder": [500, 50, 1, 500],
+ "L_Elbow": [500, 50, 1, 150],
+ "L_Wrist": [300, 30, 1, 150],
+ "L_Hand": [300, 30, 1, 150],
+ "R_Thorax": [500, 50, 1, 150],
+ "R_Shoulder": [500, 50, 1, 250],
+ "R_Elbow": [500, 50, 1, 150],
+ "R_Wrist": [300, 30, 1, 150],
+ "R_Hand": [300, 30, 1, 150],
+
+ "L_Index1": [100, 10, 1, 150],
+ "L_Index2": [100, 10, 1, 150],
+ "L_Index3": [100, 10, 1, 150],
+ "L_Middle1": [100, 10, 1, 150],
+ "L_Middle2": [100, 10, 1, 150],
+ "L_Middle3": [100, 10, 1, 150],
+ "L_Pinky1": [100, 10, 1, 150],
+ "L_Pinky2": [100, 10, 1, 150],
+ "L_Pinky3": [100, 10, 1, 150],
+ "L_Ring1": [100, 10, 1, 150],
+ "L_Ring2": [100, 10, 1, 150],
+ "L_Ring3": [100, 10, 1, 150],
+ "L_Thumb1": [100, 10, 1, 150],
+ "L_Thumb2": [100, 10, 1, 150],
+ "L_Thumb3": [100, 10, 1, 150],
+ "R_Index1": [100, 10, 1, 150],
+ "R_Index2": [100, 10, 1, 150],
+ "R_Index3": [100, 10, 1, 150],
+ "R_Middle1": [100, 10, 1, 150],
+ "R_Middle2": [100, 10, 1, 150],
+ "R_Middle3": [100, 10, 1, 150],
+ "R_Pinky1": [100, 10, 1, 150],
+ "R_Pinky2": [100, 10, 1, 150],
+ "R_Pinky3": [100, 10, 1, 150],
+ "R_Ring1": [100, 10, 1, 150],
+ "R_Ring2": [100, 10, 1, 150],
+ "R_Ring3": [100, 10, 1, 150],
+ "R_Thumb1": [100, 10, 1, 150],
+ "R_Thumb2": [100, 10, 1, 150],
+ "R_Thumb3": [100, 10, 1, 150],
}
@@ -93,6 +181,7 @@ def load_from_offsets(
scale,
equalities,
hull_dict,
+ sim="mujoco",
exclude_contacts=None,
collision_groups=None,
conaffinity=None,
@@ -117,6 +206,7 @@ def load_from_offsets(
self.hull_dict = hull_dict
self.upright_start = upright_start
self.create_vel_sensors = create_vel_sensors
+ self.sim = sim
for group, bones in collision_groups.items():
for bone in bones:
@@ -232,20 +322,11 @@ def construct_tree(
# create meshes
asset = tree.getroot().find("asset")
# ZL: Isaac version
- # for bone in self.bones:
- # if os.path.exists(f"{self.model_dir}/geom/{bone.name}.stl"):
- # attr = {
- # "file":
- # f"{self.model_dir.split('/')[-1]}/geom/{bone.name}.stl",
- # "name": f"{bone.name}_mesh"
- # }
- # # geom_relative_path = f'../mesh/smpl/{self.model_dir.split("/")[-1]}'
- # # attr = {"file": f"{geom_relative_path}/geom/{bone.name}.stl", "name": f"{bone.name}_mesh"}
- # SubElement(asset, "mesh", attr)
for bone in self.bones:
if os.path.exists(f"{self.model_dir}/geom/{bone.name}.stl"):
- attr = {"file": f"{self.model_dir}/geom/{bone.name}.stl"}
+ attr = {"file": f"{self.model_dir.split('/')[-1]}/geom/{bone.name}.stl", "name": f"{bone.name}_mesh"}
SubElement(asset, "mesh", attr)
+
# create actuators
actuators = tree.getroot().find("actuator")
@@ -304,8 +385,8 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
attr = dict()
attr["name"] = bone.name
attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*(bone.pos + offset))
- quat = quaternion_from_matrix(bone.orient)
- attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(*quat)
+ # quat = quaternion_from_matrix(bone.orient)
+ # attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(*quat)
node = SubElement(parent_node, "body", attr)
# write joints
@@ -336,9 +417,17 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
# j_attr["stiffness"] = str(GAINS[bone.name][0])
# j_attr["damping"] = str(GAINS[bone.name][1])
# j_attr["frictionloss"] = "0"
-
- j_attr["user"] = " ".join([ str(s) for s in GAINS[bone.name]]) # using user to set the max torque
- j_attr["armature"] = "0.01"
+
+ if self.sim in ["mujoco"]:
+ j_attr["user"] = " ".join([ str(s) for s in GAINS_MJ[bone.name]]) # using user to set the max torque
+ j_attr["armature"] = "0.01"
+ elif self.sim in ["isaacgym"]:
+ j_attr["stiffness"] = str(GAINS_PHC[bone.name][0])
+ j_attr["damping"] = str(GAINS_PHC[bone.name][1])
+ if bone.name in ["L_Ankle", "R_Ankle"]:
+ j_attr["armature"] = "0.01"
+ else:
+ j_attr["armature"] = "0.02"
if i < len(bone.lb):
j_attr["range"] = "{0:.4f} {1:.4f}".format(
@@ -347,6 +436,7 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
j_attr["range"] = "-180.0 180.0"
if j_attr["name"] in ref_angles.keys():
j_attr["ref"] = f"{ref_angles[j_attr['name']]:.1f}"
+
SubElement(node, "joint", j_attr)
# write sites
@@ -360,18 +450,13 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
geom_path = f"{self.model_dir}/geom/{bone.name}.stl"
if os.path.exists(geom_path):
- # g_attr = {"type": "mesh", "mesh": f"{bone.name}_mesh"} # Isaac version
- g_attr = {"type": "mesh", "mesh": f"{bone.name}"}
+ g_attr = {"type": "mesh", "mesh": f"{bone.name}_mesh"}
if bone.name in self.collision_groups.keys():
g_attr["density"] = str(base_density)
g_attr["contype"] = str(self.collision_groups[bone.name])
g_attr["conaffinity"] = str(self.conaffinity[bone.name])
- # g_attr["solimp"] = "0.9 0.95 0.001 0.5 2"
- # g_attr["solref"] = "0.02 1"
- # g_attr["size"] = str(10)
- # g_attr["friction"] = "0.000000000005 0.000000000005 0.1"
if not self.color_dict is None:
g_attr["rgba"] = self.color_dict[bone.name]
@@ -406,9 +491,8 @@ def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):
g_attr["type"] = "box"
g_attr["pos"] = "{0:.4f} {1:.4f} {2:.4f}".format(*pos)
g_attr["size"] = "{0:.4f} {1:.4f} {2:.4f}".format(*size)
- g_attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(
- *rot)
-
+ g_attr["quat"] = "{0:.4f} {1:.4f} {2:.4f} {3:.4f}".format(*rot)
+
SubElement(node, "geom", g_attr)
else:
for end in bone.ends:
diff --git a/smpl_sim/smpllib/smpl_joint_names.py b/smpl_sim/smpllib/smpl_joint_names.py
new file mode 100644
index 0000000..b78f5ec
--- /dev/null
+++ b/smpl_sim/smpllib/smpl_joint_names.py
@@ -0,0 +1,235 @@
+import os
+import sys
+import time
+import argparse
+import torch
+import pdb
+import os.path as osp
+
+sys.path.append(os.getcwd())
+
+MANO_LEFT_BONE_ORDER_NAMES = [
+ 'L_Wrist', 'L_Index1', 'L_Index2', 'L_Index3', 'L_Middle1', 'L_Middle2', 'L_Middle3', 'L_Pinky1', 'L_Pinky2', 'L_Pinky3', 'L_Ring1', 'L_Ring2', 'L_Ring3', 'L_Thumb1', 'L_Thumb2', 'L_Thumb3',
+]
+
+MANO_RIGHT_BONE_ORDER_NAMES =[
+ 'R_Wrist', 'R_Index1', 'R_Index2', 'R_Index3', 'R_Middle1', 'R_Middle2', 'R_Middle3', 'R_Pinky1', 'R_Pinky2', 'R_Pinky3', 'R_Ring1', 'R_Ring2', 'R_Ring3', 'R_Thumb1', 'R_Thumb2', 'R_Thumb3'
+]
+
+SMPL_BONE_ORDER_NAMES = [
+ "Pelvis",
+ "L_Hip",
+ "R_Hip",
+ "Torso",
+ "L_Knee",
+ "R_Knee",
+ "Spine",
+ "L_Ankle",
+ "R_Ankle",
+ "Chest",
+ "L_Toe",
+ "R_Toe",
+ "Neck",
+ "L_Thorax",
+ "R_Thorax",
+ "Head",
+ "L_Shoulder",
+ "R_Shoulder",
+ "L_Elbow",
+ "R_Elbow",
+ "L_Wrist",
+ "R_Wrist",
+ "L_Hand",
+ "R_Hand",
+]
+
+SMPLH_BONE_ORDER_NAMES = [
+ "Pelvis",
+ "L_Hip",
+ "R_Hip",
+ "Torso",
+ "L_Knee",
+ "R_Knee",
+ "Spine",
+ "L_Ankle",
+ "R_Ankle",
+ "Chest",
+ "L_Toe",
+ "R_Toe",
+ "Neck",
+ "L_Thorax",
+ "R_Thorax",
+ "Head",
+ "L_Shoulder",
+ "R_Shoulder",
+ "L_Elbow",
+ "R_Elbow",
+ "L_Wrist",
+ "R_Wrist",
+ "L_Index1",
+ "L_Index2",
+ "L_Index3",
+ "L_Middle1",
+ "L_Middle2",
+ "L_Middle3",
+ "L_Pinky1",
+ "L_Pinky2",
+ "L_Pinky3",
+ "L_Ring1",
+ "L_Ring2",
+ "L_Ring3",
+ "L_Thumb1",
+ "L_Thumb2",
+ "L_Thumb3",
+ "R_Index1",
+ "R_Index2",
+ "R_Index3",
+ "R_Middle1",
+ "R_Middle2",
+ "R_Middle3",
+ "R_Pinky1",
+ "R_Pinky2",
+ "R_Pinky3",
+ "R_Ring1",
+ "R_Ring2",
+ "R_Ring3",
+ "R_Thumb1",
+ "R_Thumb2",
+ "R_Thumb3",
+]
+
+SMPLX_BONE_ORDER_NAMES = [
+ "Pelvis",
+ "L_Hip",
+ "R_Hip",
+ "Torso",
+ "L_Knee",
+ "R_Knee",
+ "Spine",
+ "L_Ankle",
+ "R_Ankle",
+ "Chest",
+ "L_Toe",
+ "R_Toe",
+ "Neck",
+ "L_Thorax",
+ "R_Thorax",
+ "Head",
+ "L_Shoulder",
+ "R_Shoulder",
+ "L_Elbow",
+ "R_Elbow",
+ "L_Wrist",
+ "R_Wrist",
+ "Jaw",
+ "L_Eye_Smplhf",
+ "R_Eye_Smplhf",
+ "L_Index1",
+ "L_Index2",
+ "L_Index3",
+ "L_Middle1",
+ "L_Middle2",
+ "L_Middle3",
+ "L_Pinky1",
+ "L_Pinky2",
+ "L_Pinky3",
+ "L_Ring1",
+ "L_Ring2",
+ "L_Ring3",
+ "L_Thumb1",
+ "L_Thumb2",
+ "L_Thumb3",
+ "R_Index1",
+ "R_Index2",
+ "R_Index3",
+ "R_Middle1",
+ "R_Middle2",
+ "R_Middle3",
+ "R_Pinky1",
+ "R_Pinky2",
+ "R_Pinky3",
+ "R_Ring1",
+ "R_Ring2",
+ "R_Ring3",
+ "R_Thumb1",
+ "R_Thumb2",
+ "R_Thumb3",
+ "Nose",
+ "R_Eye",
+ "L_Eye",
+ "R_Ear",
+ "R_Ear",
+ "L_Big_Toe",
+ "L_Small_Toe",
+ "L_Heel",
+ "R_Big_Toe",
+ "R_Small_Toe",
+ "R_heel",
+ "L_thumb",
+ "L_index",
+ "L_middle",
+ "L_ring",
+ "L_Pinky",
+ "R_thumb",
+ "R_index",
+ "R_middle",
+ "R_ring",
+ "R_Pinky",
+ "R_Eye_Bow1",
+ "R_Eye_Bow2",
+ "R_Eye_Bow3",
+ "R_Eye_Bow4",
+ "R_Eye_Bow5",
+ "L_Eye_Bow5",
+ "L_Eye_Bow4",
+ "L_Eye_Bow3",
+ "L_Eye_Bow2",
+ "L_Eye_Bow1",
+ "Nose1",
+ "Nose2",
+ "Nose3",
+ "Nose4",
+ "R_Nose_2",
+ "R_Nose_1",
+ "Nose_middle",
+ "L_Nose_1",
+ "L_Nose_2",
+ "R_eye1",
+ "R_eye2",
+ "R_eye3",
+ "R_eye4",
+ "R_eye5",
+ "R_eye6",
+ "L_eye4",
+ "L_eye3",
+ "L_eye2",
+ "L_eye1",
+ "L_eye6",
+ "L_eye5",
+ "R_Mouth_1",
+ "R_Mouth_2",
+ "R_Mouth_3",
+ "mouth_top",
+ "L_Mouth_3",
+ "L_Mouth_2",
+ "L_Mouth_1",
+ "L_Mouth_5", # 59 in OpenPose output
+ "L_Mouth_4", # 58 in OpenPose output
+ "Mouth_Bottom",
+ "R_Mouth_4",
+ "R_Mouth_5",
+ "R_lip_1",
+ "R_lip_2",
+ "Lip_Top",
+ "L_lip_2",
+ "L_lip_1",
+ "L_lip_3",
+ "Lip_Bottom",
+ "R_lip_3",
+]
+
+SMPL_MUJOCO_NAMES = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
+SMPLH_MUJOCO_NAMES = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Index1', 'L_Index2', 'L_Index3', 'L_Middle1', 'L_Middle2', 'L_Middle3', 'L_Pinky1', 'L_Pinky2', 'L_Pinky3', 'L_Ring1', 'L_Ring2', 'L_Ring3', 'L_Thumb1', 'L_Thumb2', 'L_Thumb3', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Index1', 'R_Index2', 'R_Index3', 'R_Middle1', 'R_Middle2', 'R_Middle3', 'R_Pinky1', 'R_Pinky2', 'R_Pinky3', 'R_Ring1', 'R_Ring2', 'R_Ring3', 'R_Thumb1', 'R_Thumb2', 'R_Thumb3']
+
+if __name__ == "__main__":
+ print(SMPL_BONE_ORDER_NAMES)
\ No newline at end of file
diff --git a/smpl_sim/smpllib/smpl_local_robot.py b/smpl_sim/smpllib/smpl_local_robot.py
index 8b8eb27..107f54f 100644
--- a/smpl_sim/smpllib/smpl_local_robot.py
+++ b/smpl_sim/smpllib/smpl_local_robot.py
@@ -631,29 +631,13 @@ def update_start(self):
np.linalg.norm(vec))
def sync_node(self):
- self.node.attrib['name'] = self.name
+ self.node.attrib.pop("name", None)
if not self.size is None:
self.node.attrib["size"] = " ".join(
[f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size])
self.node.attrib["density"] = " ".join(
[f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density])
-
- # if self.type == "capsule":
- # start = self.start - self.body.pos if self.local_coord else self.start
- # end = self.end - self.body.pos if self.local_coord else self.end
- # self.node.attrib["fromto"] = " ".join(
- # [
- # f"{x:.6f}".rstrip("0").rstrip(".")
- # for x in np.concatenate([start, end])
- # ]
- # )
- # elif self.type == "box" or self.type == "sphere":
- # # self.node.attrib["pos"] = " ".join(
- # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta]
- # # )
- # import ipdb; ipdb.set_trace()
- # pass
-
+
def get_params(self, param_list, get_name=False, pad_zeros=False):
if "size" in self.param_specs:
if get_name:
@@ -1204,9 +1188,9 @@ def __init__(self, cfg, data_dir="data/smpl"):
self.rel_joint_lm = cfg.get("rel_joint_lm", True) # Rolling this out worldwide!!
self.ball_joints = cfg.get("ball_joint", False)
self.create_vel_sensors = cfg.get("create_vel_sensors", False)
+ self.sim = cfg.get("sim", "mujoco")
os.makedirs("/tmp/smpl/", exist_ok=True)
- self.masterfoot = cfg.get("masterfoot", False)
self.param_specs = self.cfg.get("body_params", {})
self.hull_dict = {}
self.beta = (torch.zeros(
@@ -1329,10 +1313,10 @@ def load_from_skeleton(
## Clear up beta for smpl and smplh
if self.smpl_model == "smpl" and self.beta.shape[1] == 16:
self.beta = self.beta[:, :10]
- # print(f"Incorrect shape size for {self.model}!!!")
elif self.smpl_model == "smplh" and self.beta.shape[1] == 10:
self.beta = torch.hstack([self.beta, torch.zeros((1, 6)).float()])
- # print(f"Incorrect shape size for {self.model}!!!")
+ elif self.smpl_model == "smplx" and (self.beta.shape[1] == 10 or self.beta.shape[1] == 16):
+ self.beta = torch.hstack([self.beta, torch.zeros((1, 20 - self.beta.shape[1])).float()])
# self.remove_geoms()
size_dict = {}
@@ -1340,25 +1324,24 @@ def load_from_skeleton(
self.model_dirs.append(f"/tmp/smpl/{uuid.uuid4()}")
self.skeleton = SkeletonMesh(self.model_dirs[-1])
- zero_pose = torch.zeros((1, 72))
+ if self.smpl_model in ["smpl"]:
+ zero_pose = torch.zeros((1, 72))
+ elif self.smpl_model in ["smpl", "smplh", "smplx"]:
+ zero_pose = torch.zeros((1, 156))
+ elif self.smpl_model in ["mano"]:
+ zero_pose = torch.zeros((1, 48))
+
if self.upright_start:
zero_pose[0, :3] = torch.tensor([1.2091996, 1.2091996, 1.2091996])
- (
- verts,
- joints,
- skin_weights,
- joint_names,
- joint_offsets,
- joint_parents,
- joint_axes,
- joint_dofs,
- joint_range,
- contype,
- conaffinity,
- ) = (smpl_parser.get_mesh_offsets(
- zero_pose=zero_pose, betas=self.beta, flatfoot=self.flatfoot)
- if self.smpl_model != "smplx" else
- smpl_parser.get_mesh_offsets(v_template=v_template))
+
+ if self.smpl_model in ['smpl', 'smplh'] :
+ (verts, joints, skin_weights, joint_names, joint_offsets, joint_parents, joint_axes, joint_dofs,
+ joint_range, contype, conaffinity) = smpl_parser.get_mesh_offsets(
+ zero_pose=zero_pose, betas=self.beta, flatfoot=self.flatfoot)
+ else:
+ (verts, joints, skin_weights, joint_names, joint_offsets, joint_parents, joint_axes, joint_dofs,
+ joint_range, contype, conaffinity) = smpl_parser.get_mesh_offsets(
+ zero_pose=zero_pose, v_template=v_template, betas=self.beta, flatfoot=self.flatfoot)
if self.rel_joint_lm:
if self.upright_start:
@@ -1394,6 +1377,7 @@ def load_from_skeleton(
joint_axes,
joint_dofs,
joint_range,
+ sim=self.sim,
upright_start=self.upright_start,
hull_dict = self.hull_dict,
create_vel_sensors = self.create_vel_sensors,
@@ -1427,8 +1411,12 @@ def load_from_skeleton(
zero_pose[0, :3] = torch.tensor(
[1.2091996, 1.2091996, 1.2091996])
- verts, joints, skin_weights, joint_names, joint_offsets, parents_dict, channels, joint_range = smpl_parser.get_offsets(
- betas=self.beta, zero_pose=zero_pose)
+ if self.smpl_model in ['smpl', 'smplh']:
+ verts, joints, skin_weights, joint_names, joint_offsets, parents_dict, channels, joint_range = (
+ smpl_parser.get_offsets(betas=self.beta, zero_pose=zero_pose))
+ else:
+ verts, joints, skin_weights, joint_names, joint_offsets, parents_dict, channels, joint_range = (
+ smpl_parser.get_offsets(v_template=v_template, betas=self.beta, zero_pose=zero_pose))
self.height = torch.max(verts[:, 1]) - torch.min(verts[:, 1])
self.hull_dict = get_geom_dict(verts,
@@ -1458,6 +1446,7 @@ def load_from_skeleton(
joint_range,
self.hull_dict, {},
channels, {},
+ sim = self.sim,
upright_start=self.upright_start,
remove_toe=self.remove_toe,
freeze_hand = self.freeze_hand,
@@ -1500,9 +1489,6 @@ def load_from_skeleton(
self.param_names = self.get_params(get_name=True)
self.init_params = self.get_params()
self.init_tree = deepcopy(self.tree)
- if self.masterfoot:
- # self.add_masterfoot_capsule()
- self.add_masterfoot_box()
all_root = self.tree.getroot()
@@ -1584,454 +1570,6 @@ def sync_node(self):
# body.reindex()
body.sync_node()
- def add_masterfoot_box(self):
- self.joint_range_master = {
- "L_Toe_x": f"-0.1 0.1",
- "L_Toe_y": f"-45.0 45",
- "L_Toe_z": f"-10 10",
- "R_Toe_x": f"-0.1 0.1",
- "R_Toe_y": f"-45 45",
- "R_Toe_z": f"-10 10",
- "L_Toe_1_joint_0": f"-0.1 0.1",
- "L_Toe_1_joint_1": f"-45 45",
- "L_Toe_1_joint_2": f"-10 10",
- "R_Toe_1_joint_0": f"-0.1 0.1",
- "R_Toe_1_joint_1": f"-45 45",
- "R_Toe_1_joint_2": f"-10 10",
- "L_Toe_1_1_joint_0": f"-0.1 0.1",
- "L_Toe_1_1_joint_1": f"-45 45",
- "L_Toe_1_1_joint_2": f"-10 10",
- "R_Toe_1_1_joint_0": f"-0.1 0.1",
- "R_Toe_1_1_joint_1": f"-45 45",
- "R_Toe_1_1_joint_2": f"-10 10",
- "L_Toe_2_joint_0": f"-0.1 0.1",
- "L_Toe_2_joint_1": f"-45 45",
- "L_Toe_2_joint_2": f"-10 10",
- "R_Toe_2_joint_0": f"-0.1 0.1",
- "R_Toe_2_joint_1": f"-45 45",
- "R_Toe_2_joint_2": f"-10 10",
- }
- body_index = [3, 7]
-
- yellow_bone_length = 0.02
- yellow_toe_back = -0.02
- for idx in body_index:
- ankle2clone = body_ankle = self.bodies[idx]
- ankle_node = body_ankle.node
- flip_y = -1 if idx == 3 else 1
-
- for element in ankle_node.getiterator(): # remvoe the geom of the ankle
- if element.tag == "geom":
- ankle_node.remove(element)
- break
-
- child_pos = body_ankle.child[0].pos - body_ankle.pos
- body_ankle.geoms = [] # remove Ankel's original geom
- toe_x, toe_y, toe_z = child_pos
- ankles_zs, ankle_xs = self.hull_dict[body_ankle.name]['norm_verts'][:, 2], self.hull_dict[body_ankle.name]['norm_verts'][:, 0]
- ankle_y_max, ankle_y_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 1].max(), self.hull_dict[body_ankle.name]['norm_verts'][:, 1].min()
- ankle_z_max, ankle_z_min = ankles_zs.max(), ankles_zs.min()
- toe_y_max, toe_y_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 1].max(), self.hull_dict[body_ankle.name]['norm_verts'][:, 1].min()
- ankle_z_max_x = self.hull_dict[body_ankle.name]['norm_verts'][ankles_zs.argmax(), 0]
- if idx == 7:
- ankle_y_tgt, ankle_y_green = ankle_y_max, ankle_y_min
- else:
- ankle_y_tgt, ankle_y_green = ankle_y_min, ankle_y_max
-
- ankle_size = np.abs(toe_z - ankles_zs.min())
-
- ankle_z_min_neg = ankles_zs[ankle_xs < 0].min()
- ankle_z_span = ankle_z_max - ankle_z_min_neg
- ankle_y_span = ankle_y_max - ankle_y_min
- ankle_frac = 10
- green_y = (toe_y + ankle_y_tgt)/2 - ankle_y_span * 2/5 * flip_y
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "box",
- "pos":
- f"{(toe_x + yellow_toe_back)/2 - (toe_x - yellow_toe_back)/4} {(green_y + ankle_y_tgt)/2 } {ankle_z_min_neg + ankle_z_span * 9/ankle_frac /2}",
- "size":
- f"{(toe_x - yellow_toe_back)/4} {np.abs((ankle_y_tgt - green_y)/2)} {ankle_z_span * 9 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- attributes = {
- "type": "box",
- "pos":
- f"{(toe_x + yellow_toe_back)/2 + (toe_x - yellow_toe_back)/4} {(green_y + ankle_y_tgt)/2 } {ankle_z_min_neg + ankle_z_span * 6/ankle_frac /2}",
- "size":
- f"{(toe_x - yellow_toe_back)/4} {np.abs((ankle_y_tgt - green_y)/2)} {ankle_z_span * 6 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
-
- ############################################################ Nodes for red toes
- body_toe = body_ankle.child[0]
- body_toe_node = body_toe.node
- for element in body_toe_node.getiterator(): # remvoe the geom of the ankle
- if element.tag == "geom":
- body_toe_node.remove(element)
- elif element.tag == "joint":
- master_range = self.cfg.get("master_range", 30)
- element.attrib["range"] = self.joint_range_master.get(element.attrib["name"], f"-{master_range} {master_range}")
-
-
- toe_x_max, toe_x_min = self.hull_dict[body_toe.name]['norm_verts'][:, 0].max(), self.hull_dict[body_toe.name]['norm_verts'][:, 0].min()
- # toe_y_max, toe_y_min = self.hull_dict[body_toe.name]['norm_verts'][:, 1].max(), self.hull_dict[body_toe.name]['norm_verts'][:, 1].min()
- # toe_z_max, toe_z_min = self.hull_dict[body_toe.name]['norm_verts'][:, 2].max(), self.hull_dict[body_toe.name]['norm_verts'][:, 2].min()
-
- attributes = {
- "type": "box",
- "pos":
- f"{(toe_x_max)/4} {(green_y + ankle_y_tgt)/2 - toe_y} {ankle_z_min_neg + ankle_z_span * 4/ankle_frac /2- toe_z}",
- "size":
- f"{(toe_x_max)/4} {np.abs((ankle_y_tgt - green_y)/2)} {ankle_z_span * 4 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(body_toe_node, "geom", attributes)
- body_toe.geoms.append(Geom(geom_node, body_toe))
-
- attributes = {
- "type": "box",
- "pos":
- f"{(toe_x_max) * 3/4 } {(green_y + ankle_y_tgt)/2 - toe_y} {ankle_z_min_neg + ankle_z_span * 2/ankle_frac /2- toe_z}",
- "size":
- f"{(toe_x_max)/4} {np.abs((ankle_y_tgt - green_y)/2)} {ankle_z_span * 2 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(body_toe_node, "geom", attributes)
- body_toe.geoms.append(Geom(geom_node, body_toe))
-
- # #################################### First additional toe (green one)
- green_z = ankle_z_max - ankle_size - ankle_z_span * 4 / ankle_frac
- toe_y_max, toe_y_min = self.hull_dict[body_toe.name]['norm_verts'][:, 1].max(), self.hull_dict[body_toe.name]['norm_verts'][:, 1].min()
- pos = np.array([yellow_bone_length, green_y, green_z])
-
-
- green_toe_body = self.add_body_joint_and_actuator(body_ankle, body_toe, pos, index_name = "_1")
- attributes = {
- "type": "box",
- "pos":
- f"{(toe_x - yellow_bone_length)/2 } { (ankle_y_green - green_y)/2 } {ankle_z_min_neg + ankle_z_span * 5/ankle_frac /2 - green_z}",
- "size":
- f"{np.abs(toe_x - yellow_bone_length)/2} {np.abs(green_y - ankle_y_green)/2} {ankle_z_span * 5 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(green_toe_body.node, "geom", attributes)
- green_toe_body.geoms.append(Geom(geom_node, green_toe_body))
-
- # #################################### Second additional toe (white one)
- pos = np.array([toe_x - yellow_bone_length, 0, toe_z - green_z])
- white_toe_body = self.add_body_joint_and_actuator(green_toe_body,
- green_toe_body,
- pos,
- index_name="_1")
-
- attributes = {
- "type": "box",
- "pos":
- f"{(toe_x_max ) * 2/5} { (ankle_y_green - green_y)/2 } {ankle_z_min_neg + ankle_z_span * 2 /ankle_frac /2- toe_z}",
- "size":
- f"{(toe_x_max ) * 2/5} {np.abs(green_y - ankle_y_green)/2} {ankle_z_span * 2 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(white_toe_body.node, "geom", attributes)
- white_toe_body.geoms.append(Geom(geom_node, white_toe_body))
-
-
- #################################### Third additional toe (white one)
- ankle_x_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 0].min()
- pos = np.array(
- [yellow_toe_back, (green_y + ankle_y_tgt) / 2 , toe_z])
-
- while_toe_body = self.add_body_joint_and_actuator(body_ankle, body_toe, pos, index_name = "_2")
- attributes = {
- "type": "box",
- "pos":
- f"{(ankle_x_min - yellow_toe_back)/2} {0} {ankle_z_min_neg + ankle_z_span * 7 /ankle_frac /2- toe_z}",
- "size":
- f"{np.abs(ankle_x_min - yellow_toe_back)/2} {np.abs((ankle_y_tgt - green_y)/2)} {ankle_z_span * 7 / ankle_frac /2}",
- "quat": f"0 0 0 1",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(while_toe_body.node, "geom", attributes)
- while_toe_body.geoms.append(Geom(geom_node, while_toe_body))
-
-
-
-
- self.init_tree = deepcopy(self.tree)
-
-
- def add_masterfoot_capsule(self):
- masterfoot_v = self.cfg.get("masterfoot_v", 0)
- body_index = [3, 7]
- self.joint_range_master = {"L_Toe_x": f"-0.1 0.1", "L_Toe_y": f"-45 45", "L_Toe_z": f"-10 10"}
- yellow_bone_length = 0.04
- yellow_toe_back = -0.02
- for idx in body_index:
- ankle2clone = body_ankle = self.bodies[idx]
- ankle_node = body_ankle.node
- flip_y = -1 if idx == 3 else 1
-
- for element in ankle_node.getiterator(): # remvoe the geom of the ankle
- if element.tag == "geom":
- ankle_node.remove(element)
- break
- child_pos = body_ankle.child[0].pos - body_ankle.pos
- body_ankle.geoms = [] # remove Ankel's original geom
- toe_x, toe_y, toe_z = child_pos
-
- ankle_y_max, ankle_y_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 1].max(), self.hull_dict[body_ankle.name]['norm_verts'][:, 1].min()
- ankle_z_max, ankle_z_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 2].max(), self.hull_dict[body_ankle.name]['norm_verts'][:, 2].min()
- toe_y_max, toe_y_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 1].max(), self.hull_dict[body_ankle.name]['norm_verts'][:, 1].min()
- ankle_z_max_x = self.hull_dict[body_ankle.name]['norm_verts'][self.hull_dict[body_ankle.name]['norm_verts'][:, 2].argmax(), 0]
- if idx == 7:
- ankle_y_tgt = ankle_y_max
- else:
- ankle_y_tgt = ankle_y_min
-
- ankle_size = np.abs(toe_z - self.hull_dict[body_ankle.name]['norm_verts'][:, 2].min())
- ankle_z_span = ankle_z_max - ankle_z_min
- ankle_frac = 10
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {toe_y } {ankle_z_max - ankle_size - ankle_z_span * 2/ankle_frac} {toe_x} {toe_y } {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {(toe_y + ankle_y_tgt)/2} {ankle_z_max - ankle_size- ankle_z_span * 1/ankle_frac} {toe_x} {(toe_y + ankle_y_tgt)/2} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {ankle_y_tgt} {ankle_z_max - ankle_size- ankle_z_span * 0/ankle_frac} {toe_x} {ankle_y_tgt} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- ########################################################################
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {toe_y} {ankle_z_max - ankle_size- ankle_z_span * 2/ankle_frac} {yellow_toe_back} {(toe_y + ankle_y_tgt)/2} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {(toe_y + ankle_y_tgt)/2} {ankle_z_max - ankle_size- ankle_z_span * 1/ankle_frac} {yellow_toe_back} {(toe_y + ankle_y_tgt)/2} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{ankle_z_max_x} {ankle_y_tgt} {ankle_z_max - ankle_size- ankle_z_span * 0/ankle_frac} {yellow_toe_back} {(toe_y + ankle_y_tgt)/2} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
- ############################################################
- green_z = ankle_z_max - ankle_size - ankle_z_span * 4/ankle_frac
-
- attributes = {
- "size": f"{ankle_size}",
- "type": "capsule",
- "fromto":
- f"{yellow_bone_length} {(toe_y + ankle_y_tgt)/2 - 0.05 * flip_y} {green_z} {yellow_toe_back} {(toe_y + ankle_y_tgt)/2} {toe_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(ankle_node, "geom", attributes)
- body_ankle.geoms.append(Geom(geom_node, body_ankle))
-
-
- ############################################################ Nodes for red toes
- body_toe = body_ankle.child[0]
- body_toe_node = body_toe.node
- for element in body_toe_node.getiterator(): # remvoe the geom of the toe
- if element.tag == "geom":
- body_toe_node.remove(element)
- elif element.tag == "joint":
- master_range = self.cfg.get("master_range", 30)
- element.attrib["range"] = self.joint_range_master.get(element.attrib["name"], f"-{master_range} {master_range}")
- print(element.attrib["range"])
-
- toe_x_max = self.hull_dict[body_toe.name]['norm_verts'][:, 0].max()
- toe_z_min_abs = np.abs(self.hull_dict[body_toe.name]['norm_verts'][:, 2].min())
-
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto": f"{0.0} {0} {0} {toe_x_max} {0} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(body_toe_node, "geom", attributes)
- body_toe.geoms.append(Geom(geom_node, body_toe))
-
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0.0} {(toe_y + ankle_y_tgt)/2 - toe_y} {0} {toe_x_max} {(toe_y + ankle_y_tgt)/2- toe_y} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(body_toe_node, "geom", attributes)
- body_toe.geoms.append(Geom(geom_node, body_toe))
-
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0.0} {ankle_y_tgt- toe_y} {0} {toe_x_max} {ankle_y_tgt- toe_y} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(body_toe_node, "geom", attributes)
- body_toe.geoms.append(Geom(geom_node, body_toe))
-
-
- #################################### First additional toe (green one)
- toe_y_max, toe_y_min = self.hull_dict[body_toe.name]['norm_verts'][:, 1].max(), self.hull_dict[body_toe.name]['norm_verts'][:, 1].min()
- if idx == 7:
- toe_y_tgt = toe_y_min + toe_z_min_abs
- else:
- toe_y_tgt = toe_y_max - toe_z_min_abs
-
- pos = np.array([yellow_bone_length, (toe_y + ankle_y_tgt)/2 - 0.05 * flip_y, green_z])
- green_toe_body = self.add_body_joint_and_actuator(body_ankle, body_toe, pos, index_name = "_1")
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0} {0} {0} {toe_x - yellow_bone_length} {0} {toe_z - green_z }",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(green_toe_body.node, "geom", attributes)
- green_toe_body.geoms.append(Geom(geom_node, green_toe_body))
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0} {toe_y_tgt + toe_z_min_abs * flip_y} {toe_z - green_z} {toe_x - yellow_bone_length} {toe_y_tgt + toe_z_min_abs* flip_y} {toe_z - green_z}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(green_toe_body.node, "geom", attributes)
- green_toe_body.geoms.append(Geom(geom_node, green_toe_body))
-
- #################################### Second additional toe (white one)
- pos = np.array([toe_x - yellow_bone_length, 0, toe_z - green_z])
- white_toe_body = self.add_body_joint_and_actuator(green_toe_body,
- green_toe_body,
- pos,
- index_name="_1")
-
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0} {0} {0} {np.abs((toe_x_max - toe_x) * 3/4) } {0} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(white_toe_body.node, "geom", attributes)
- white_toe_body.geoms.append(Geom(geom_node, white_toe_body))
-
- attributes = {
- "size": f"{toe_z_min_abs }",
- "type": "capsule",
- "fromto":
- f"{0} {toe_y_tgt + toe_z_min_abs * flip_y} {0} {np.abs((toe_x_max - toe_x) * 3/4) } {toe_y_tgt + toe_z_min_abs * flip_y} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(white_toe_body.node, "geom", attributes)
- white_toe_body.geoms.append(Geom(geom_node, white_toe_body))
-
- #################################### Third additional toe (white one)
- ankle_x_min = self.hull_dict[body_ankle.name]['norm_verts'][:, 0].min()
- pos = np.array([yellow_toe_back, (toe_y + ankle_y_tgt)/2, toe_z])
-
- while_toe_body = self.add_body_joint_and_actuator(body_ankle, body_toe, pos, index_name = "_2")
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0} {toe_z_min_abs} {0} {ankle_x_min - yellow_toe_back + toe_z_min_abs} {toe_z_min_abs} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(while_toe_body.node, "geom", attributes)
- while_toe_body.geoms.append(Geom(geom_node, while_toe_body))
-
-
- attributes = {
- "size": f"{toe_z_min_abs}",
- "type": "capsule",
- "fromto":
- f"{0} {-toe_z_min_abs} {0} {ankle_x_min - yellow_toe_back + toe_z_min_abs} {-toe_z_min_abs} {0}",
- "contype": "0",
- "conaffinity": "1",
- }
- geom_node = SubElement(while_toe_body.node, "geom", attributes)
- while_toe_body.geoms.append(Geom(geom_node, while_toe_body))
-
-
- self.init_tree = deepcopy(self.tree)
-
def add_body_joint_and_actuator(self, parent_body, body, pos, index_name = "_1"):
body_node =body.node
new_node = deepcopy(body_node)
@@ -2405,7 +1943,7 @@ def get_gnn_edges(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
robot_cfg = {
- "mesh": False,
+ "mesh": True,
"rel_joint_lm": False,
"upright_start": False,
"remove_toe": False,
@@ -2413,37 +1951,37 @@ def get_gnn_edges(self):
"real_weight_porpotion_capsules": True,
"real_weight_porpotion_boxes": True,
"replace_feet": True,
- "masterfoot": False,
"big_ankle": True,
"freeze_hand": False,
"box_body": True,
- "master_range": 50,
"model": "smpl",
"body_params": {},
"joint_params": {},
"geom_params": {},
"actuator_params": {},
- "model": "smplx",
- "ball_joint": False,
- "create_vel_sensors": True, # Create global and local velocities sensors.
+ "ball_joint": False,
+ "create_vel_sensors": False, # Create global and local velocities sensors.
+ "sim": "isaacgym"
}
- smpl_robot = SMPL_Robot(robot_cfg)
+ smpl_robot = SMPL_Robot(robot_cfg, data_dir='../../smpl')
params_names = smpl_robot.get_params(get_name=True)
betas = torch.zeros(1, 16)
- # betas[]
+ betas[0] = 1
gender = [0]
t0 = time.time()
params = smpl_robot.get_params()
smpl_robot.load_from_skeleton(betas=betas, objs_info=None, gender=gender)
print(smpl_robot.height)
-
- smpl_robot.write_xml(f"smpl_humanoid.xml")
- m = mujoco.MjModel.from_xml_path(f"smpl_humanoid.xml")
- d = mujoco.MjData(m)
-
- with mujoco.viewer.launch_passive(m, d) as viewer:
+ smpl_robot.write_xml(f"test.xml")
+ smpl_robot.write_xml(f"/tmp/smpl/{robot_cfg['model']}_{gender[0]}_humanoid.xml")
+ mj_model = mujoco.MjModel.from_xml_path(f"/tmp/smpl/{robot_cfg['model']}_{gender[0]}_humanoid.xml")
+ mj_data = mujoco.MjData(mj_model)
+
+ mj_data.qpos[2] = 0.95
+
+ with mujoco.viewer.launch_passive(mj_model, mj_data) as viewer:
# Close the viewer automatically after 30 wall-seconds.
start = time.time()
while viewer.is_running() :
@@ -2451,17 +1989,17 @@ def get_gnn_edges(self):
# mj_step can be replaced with code that also evaluates
# a policy and applies a control signal before stepping the physics.
- mujoco.mj_step(m, d)
+ mujoco.mj_forward(mj_model, mj_data)
# Example modification of a viewer option: toggle contact points every two seconds.
with viewer.lock():
- viewer.opt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = int(d.time % 2)
+ viewer.opt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = int(mj_data.time % 2)
# Pick up changes to the physics state, apply perturbations, update options from GUI.
viewer.sync()
# Rudimentary time keeping, will drift relative to wall clock.
- time_until_next_step = m.opt.timestep - (time.time() - step_start)
+ time_until_next_step = mj_model.opt.timestep - (time.time() - step_start)
if time_until_next_step > 0:
time.sleep(time_until_next_step)
diff --git a/smpl_sim/smpllib/smpl_mujoco_new.py b/smpl_sim/smpllib/smpl_mujoco_new.py
index 4b37b27..5ed4267 100644
--- a/smpl_sim/smpllib/smpl_mujoco_new.py
+++ b/smpl_sim/smpllib/smpl_mujoco_new.py
@@ -7,7 +7,7 @@
)
from .smpl_parser import SMPL_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES
from scipy.spatial.transform import Rotation as sRot
-from ..utils.transform_utils import rotation_matrix_to_angle_axis
+from smpl_sim.utils.torch_geometry_transforms import rotation_matrix_to_angle_axis
import smpl_sim.utils.mujoco_utils as mj_utils
diff --git a/smpl_sim/smpllib/smpl_parser.py b/smpl_sim/smpllib/smpl_parser.py
index 0708209..41cc19e 100644
--- a/smpl_sim/smpllib/smpl_parser.py
+++ b/smpl_sim/smpllib/smpl_parser.py
@@ -8,224 +8,11 @@
from smplx import SMPLH as _SMPLH
from smplx import SMPLX as _SMPLX
-SMPL_BONE_ORDER_NAMES = [
- "Pelvis",
- "L_Hip",
- "R_Hip",
- "Torso",
- "L_Knee",
- "R_Knee",
- "Spine",
- "L_Ankle",
- "R_Ankle",
- "Chest",
- "L_Toe",
- "R_Toe",
- "Neck",
- "L_Thorax",
- "R_Thorax",
- "Head",
- "L_Shoulder",
- "R_Shoulder",
- "L_Elbow",
- "R_Elbow",
- "L_Wrist",
- "R_Wrist",
- "L_Hand",
- "R_Hand",
-]
-
-SMPLH_BONE_ORDER_NAMES = [
- "Pelvis",
- "L_Hip",
- "R_Hip",
- "Torso",
- "L_Knee",
- "R_Knee",
- "Spine",
- "L_Ankle",
- "R_Ankle",
- "Chest",
- "L_Toe",
- "R_Toe",
- "Neck",
- "L_Thorax",
- "R_Thorax",
- "Head",
- "L_Shoulder",
- "R_Shoulder",
- "L_Elbow",
- "R_Elbow",
- "L_Wrist",
- "R_Wrist",
- "L_Index1",
- "L_Index2",
- "L_Index3",
- "L_Middle1",
- "L_Middle2",
- "L_Middle3",
- "L_Pinky1",
- "L_Pinky2",
- "L_Pinky3",
- "L_Ring1",
- "L_Ring2",
- "L_Ring3",
- "L_Thumb1",
- "L_Thumb2",
- "L_Thumb3",
- "R_Index1",
- "R_Index2",
- "R_Index3",
- "R_Middle1",
- "R_Middle2",
- "R_Middle3",
- "R_Pinky1",
- "R_Pinky2",
- "R_Pinky3",
- "R_Ring1",
- "R_Ring2",
- "R_Ring3",
- "R_Thumb1",
- "R_Thumb2",
- "R_Thumb3",
-]
-
-SMPLX_BONE_ORDER_NAMES = [
- "Pelvis",
- "L_Hip",
- "R_Hip",
- "Torso",
- "L_Knee",
- "R_Knee",
- "Spine",
- "L_Ankle",
- "R_Ankle",
- "Chest",
- "L_Toe",
- "R_Toe",
- "Neck",
- "L_Thorax",
- "R_Thorax",
- "Head",
- "L_Shoulder",
- "R_Shoulder",
- "L_Elbow",
- "R_Elbow",
- "L_Wrist",
- "R_Wrist",
- "Jaw",
- "L_Eye_Smplhf",
- "R_Eye_Smplhf",
- "L_Index1",
- "L_Index2",
- "L_Index3",
- "L_Middle1",
- "L_Middle2",
- "L_Middle3",
- "L_Pinky1",
- "L_Pinky2",
- "L_Pinky3",
- "L_Ring1",
- "L_Ring2",
- "L_Ring3",
- "L_Thumb1",
- "L_Thumb2",
- "L_Thumb3",
- "R_Index1",
- "R_Index2",
- "R_Index3",
- "R_Middle1",
- "R_Middle2",
- "R_Middle3",
- "R_Pinky1",
- "R_Pinky2",
- "R_Pinky3",
- "R_Ring1",
- "R_Ring2",
- "R_Ring3",
- "R_Thumb1",
- "R_Thumb2",
- "R_Thumb3",
- "Nose",
- "R_Eye",
- "L_Eye",
- "R_Ear",
- "R_Ear",
- "L_Big_Toe",
- "L_Small_Toe",
- "L_Heel",
- "R_Big_Toe",
- "R_Small_Toe",
- "R_heel",
- "L_thumb",
- "L_index",
- "L_middle",
- "L_ring",
- "L_Pinky",
- "R_thumb",
- "R_index",
- "R_middle",
- "R_ring",
- "R_Pinky",
- "R_Eye_Bow1",
- "R_Eye_Bow2",
- "R_Eye_Bow3",
- "R_Eye_Bow4",
- "R_Eye_Bow5",
- "L_Eye_Bow5",
- "L_Eye_Bow4",
- "L_Eye_Bow3",
- "L_Eye_Bow2",
- "L_Eye_Bow1",
- "Nose1",
- "Nose2",
- "Nose3",
- "Nose4",
- "R_Nose_2",
- "R_Nose_1",
- "Nose_middle",
- "L_Nose_1",
- "L_Nose_2",
- "R_eye1",
- "R_eye2",
- "R_eye3",
- "R_eye4",
- "R_eye5",
- "R_eye6",
- "L_eye4",
- "L_eye3",
- "L_eye2",
- "L_eye1",
- "L_eye6",
- "L_eye5",
- "R_Mouth_1",
- "R_Mouth_2",
- "R_Mouth_3",
- "mouth_top",
- "L_Mouth_3",
- "L_Mouth_2",
- "L_Mouth_1",
- "L_Mouth_5", # 59 in OpenPose output
- "L_Mouth_4", # 58 in OpenPose output
- "Mouth_Bottom",
- "R_Mouth_4",
- "R_Mouth_5",
- "R_lip_1",
- "R_lip_2",
- "Lip_Top",
- "L_lip_2",
- "L_lip_1",
- "L_lip_3",
- "Lip_Bottom",
- "R_lip_3",
-]
+from smplx import MANO as _MANO
+from smpl_sim.smpllib.smpl_joint_names import *
SMPL_EE_NAMES = ["L_Ankle", "R_Ankle", "L_Wrist", "R_Wrist", "Head"]
-
-# SMPL_BONE_ORDER_NAMES = ['Hips', 'LeftUpLeg', 'RightUpLeg', 'Spine', "LeftLeg", "RightLeg", "Spine1", "LeftFoot", \
-# "RightFoot", "Spine2", "LeftToe", "RightToe", "Neck", "LeftChest", "RightChest", "Mouth", "LeftShoulder", \
-# "RightShoulder", "LeftArm", "RightArm", "LeftWrist", "RightWrist", "LeftHand", "RightHand"
-# ]
+
JOINST_TO_USE = np.array([
0,
@@ -507,33 +294,35 @@ def get_joints_verts(self, pose, th_betas=None, th_trans=None):
# joints = smpl_output.joints[:,JOINST_TO_USE]
return vertices, joints
- def get_offsets(self, betas=torch.zeros(1, 16).float()):
- with torch.no_grad():
- verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)
- verts_np = verts.detach().cpu().numpy()
- jts_np = jts.detach().cpu().numpy()
+ def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), flatfoot=False):
+ with torch.no_grad():
+ joint_names = self.joint_names
+ if zero_pose is None:
+ verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)
+ else:
+ verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)
+
+ jts_np = Jtr.detach().cpu().numpy()
- parents = self.parents.cpu().numpy()
- offsets_smpl = [np.array([0, 0, 0])]
- for i in range(1, len(parents)):
- p_id = parents[i]
- p3d = jts_np[0, p_id]
- curr_3d = jts_np[0, i]
- offset_curr = curr_3d - p3d
- offsets_smpl.append(offset_curr)
- offsets_smpl = np.array(offsets_smpl)
- names_smpl = self.joint_names
- offset_smpl_dict = {names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))}
- parents_dict = {names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))}
- parents_dict["Hips"] = "None"
- channels = ["z", "y", "x"]
+ smpl_joint_parents = self.parents.cpu().numpy()
+ joint_pos = Jtr[0].numpy()
+ joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents) if joint_names[c] in self.joint_names}
+ parents_dict = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents) if joint_names[i] in self.joint_names and x in self.joint_names}
- return offset_smpl_dict, parents_dict, channels
+ # (SMPLX_BONE_ORDER_NAMES[:22] + SMPLX_BONE_ORDER_NAMES[25:55]) == SMPLH_BONE_ORDER_NAMES # ZL Hack: only use the weights we need.
+ skin_weights = self.lbs_weights.numpy()
+ skin_weights.argmax(axis=1)
+
+ channels = ["z", "y", "x"]
+ return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)
- def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):
+ def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), flatfoot=False):
with torch.no_grad():
joint_names = self.joint_names
- verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)
+ if zero_pose is None:
+ verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)
+ else:
+ verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)
verts_np = verts.detach().cpu().numpy()
verts = verts_np[0]
@@ -543,11 +332,11 @@ def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):
verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])
smpl_joint_parents = self.parents.cpu().numpy()
+
joint_pos = Jtr[0].numpy()
joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}
joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}
- # skin_weights = smpl_layer.th_weights.numpy()
skin_weights = self.lbs_weights.numpy()
return (
verts,
@@ -562,6 +351,38 @@ def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):
self.contype,
self.conaffinity,
)
+
+ def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):
+ with torch.no_grad():
+ joint_names = self.joint_names
+ verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)
+ verts_np = verts.detach().cpu().numpy()
+ verts = verts_np[0]
+
+ if flatfoot:
+ feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01
+ verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])
+
+ smpl_joint_parents = self.parents.cpu().numpy()
+
+ joint_pos = Jtr
+ joint_offsets = {joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c] for c, p in enumerate(smpl_joint_parents)}
+ joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}
+
+ skin_weights = self.lbs_weights
+ return (
+ verts,
+ joint_pos,
+ skin_weights,
+ joint_names,
+ joint_offsets,
+ joint_parents,
+ self.joint_axes,
+ self.joint_dofs,
+ self.joint_range,
+ self.contype,
+ self.conaffinity,
+ )
class SMPLX_Parser(_SMPLX):
@@ -571,7 +392,7 @@ def __init__(self, *args, **kwargs):
self.device = next(self.parameters()).device
self.joint_names = SMPLH_BONE_ORDER_NAMES
self.joint_axes = {x: np.identity(3) for x in self.joint_names}
- self.joint_dofs = {x: ["z", "y", "x"] for x in self.joint_names}
+ self.joint_dofs = {x: ["x", "y", "z"] for x in self.joint_names}
self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}
self.joint_range["L_Elbow"] *= 4
self.joint_range["R_Elbow"] *= 4
@@ -600,7 +421,7 @@ def get_joints_verts(self, pose, th_betas=None, th_trans=None):
th_betas = th_betas.float()
B, beta_shape = th_betas.shape
if beta_shape != 20:
- th_betas = torch.cat([th_betas, torch.zeros((B, 20 - beta_shape))], dim = -1)
+ th_betas = torch.cat([th_betas, torch.zeros((B, 20 - beta_shape)).to(th_betas)], dim = -1)
batch_size = pose.shape[0]
smpl_output = self.forward(
@@ -717,5 +538,205 @@ def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):
)
+
+
+class MANO_Parser(_MANO):
+
+ def __init__(self, create_transl=False, *args, **kwargs):
+ """SMPL model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ data_struct: Strct
+ A struct object. If given, then the parameters of the model are
+ read from the object. Otherwise, the model tries to read the
+ parameters from the given `model_path`. (default = None)
+ create_global_orient: bool, optional
+ Flag for creating a member variable for the global orientation
+ of the body. (default = True)
+ global_orient: torch.tensor, optional, Bx3
+ The default value for the global orientation variable.
+ (default = None)
+ create_body_pose: bool, optional
+ Flag for creating a member variable for the pose of the body.
+ (default = True)
+ body_pose: torch.tensor, optional, Bx(Body Joints * 3)
+ The default value for the body pose variable.
+ (default = None)
+ create_betas: bool, optional
+ Flag for creating a member variable for the shape space
+ (default = True).
+ betas: torch.tensor, optional, Bx10
+ The default value for the shape member variable.
+ (default = None)
+ create_transl: bool, optional
+ Flag for creating a member variable for the translation
+ of the body. (default = True)
+ transl: torch.tensor, optional, Bx3
+ The default value for the transl variable.
+ (default = None)
+ dtype: torch.dtype, optional
+ The data type for the created variables
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ joint_mapper: object, optional
+ An object that re-maps the joints. Useful if one wants to
+ re-order the SMPL joints to some other convention (e.g. MSCOCO)
+ (default = None)
+ gender: str, optional
+ Which gender to load
+ vertex_ids: dict, optional
+ A dictionary containing the indices of the extra vertices that
+ will be selected
+ """
+ super(MANO_Parser, self).__init__(*args, **kwargs)
+ self.device = next(self.parameters()).device
+
+ if kwargs['is_rhand']:
+ self.joint_names = MANO_RIGHT_BONE_ORDER_NAMES
+ else:
+ self.joint_names = MANO_LEFT_BONE_ORDER_NAMES
+
+ self.joint_axes = {x: np.identity(3) for x in self.joint_names}
+ self.joint_dofs = {x: ["x", "y", "z"] for x in self.joint_names}
+ self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}
+
+ self.contype = {1: self.joint_names}
+ self.conaffinity = {1: self.joint_names}
+
+ self.zero_pose = torch.zeros(1, 48).float()
+
+ def forward(self, *args, **kwargs):
+ smpl_output = super(MANO_Parser, self).forward(*args, **kwargs)
+ return smpl_output
+
+ def get_joints_verts(self, pose, th_betas=None, th_trans=None):
+ """
+ Pose should be batch_size x 45
+ """
+ if pose.shape[1] != 48:
+ pose = pose.reshape(-1, 48)
+
+ pose = pose.float()
+ if th_betas is not None:
+ th_betas = th_betas.float()
+
+ if th_betas.shape[-1] == 16:
+ th_betas = th_betas[:, :10]
+
+ batch_size = pose.shape[0]
+ smpl_output = self.forward(
+ betas=th_betas,
+ transl=th_trans,
+ hand_pose=pose[:, 3:],
+ global_orient=pose[:, :3],
+ )
+ vertices = smpl_output.vertices
+ joints = smpl_output.joints
+ # joints = smpl_output.joints[:,JOINST_TO_USE]
+ return vertices, joints
+
+ def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):
+ with torch.no_grad():
+ if zero_pose is None:
+ verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)
+ else:
+ verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)
+ verts_np = verts.detach().cpu().numpy()
+ jts_np = Jtr.detach().cpu().numpy()
+ parents = self.parents.cpu().numpy()
+ offsets_smpl = [np.array([0, 0, 0])]
+ for i in range(1, len(parents)):
+ p_id = parents[i]
+ p3d = jts_np[0, p_id]
+ curr_3d = jts_np[0, i]
+ offset_curr = curr_3d - p3d
+ offsets_smpl.append(offset_curr)
+
+ offsets_smpl = np.array(offsets_smpl)
+ joint_names = self.joint_names
+ joint_pos = Jtr[0].numpy()
+ smpl_joint_parents = self.parents.cpu().numpy()
+ joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}
+ parents_dict = {joint_names[i]: joint_names[parents[i]] for i in range(len(joint_names))}
+ channels = ["x", "y", "z"]
+
+ skin_weights = self.lbs_weights.numpy()
+ return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)
+
+ def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), flatfoot=False):
+ with torch.no_grad():
+ joint_names = self.joint_names
+ if zero_pose is None:
+ verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)
+ else:
+ verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)
+
+ verts_np = verts.detach().cpu().numpy()
+ verts = verts_np[0]
+
+ if flatfoot:
+ feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01
+ verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])
+
+ smpl_joint_parents = self.parents.cpu().numpy()
+
+ joint_pos = Jtr[0].numpy()
+ joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}
+ joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}
+
+ # skin_weights = smpl_layer.th_weights.numpy()
+ skin_weights = self.lbs_weights.numpy()
+ return (
+ verts,
+ joint_pos,
+ skin_weights,
+ joint_names,
+ joint_offsets,
+ joint_parents,
+ self.joint_axes,
+ self.joint_dofs,
+ self.joint_range,
+ self.contype,
+ self.conaffinity,
+ )
+
+ def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):
+ with torch.no_grad():
+ joint_names = self.joint_names
+ verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)
+ verts_np = verts.detach().cpu().numpy()
+ verts = verts_np[0]
+
+ if flatfoot:
+ feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01
+ verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])
+
+ smpl_joint_parents = self.parents.cpu().numpy()
+
+ joint_pos = Jtr
+ joint_offsets = {joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c] for c, p in enumerate(smpl_joint_parents)}
+ joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}
+
+ skin_weights = self.lbs_weights
+ return (
+ verts,
+ joint_pos,
+ skin_weights,
+ joint_names,
+ joint_offsets,
+ joint_parents,
+ self.joint_axes,
+ self.joint_dofs,
+ self.joint_range,
+ self.contype,
+ self.conaffinity,
+ )
+
+
+
+
if __name__ == "__main__":
- smpl_p = SMPLH_Parser("data/smpl", gender="neutral")
+ smpl_p = SMPLH_Parser("/hdd/zen/dev/copycat/Copycat/data/smpl", gender="neutral")
diff --git a/smpl_sim/smpllib/torch_smpl_humanoid_batch.py b/smpl_sim/smpllib/torch_smpl_humanoid_batch.py
index 01508a1..3d50c2e 100644
--- a/smpl_sim/smpllib/torch_smpl_humanoid_batch.py
+++ b/smpl_sim/smpllib/torch_smpl_humanoid_batch.py
@@ -30,16 +30,20 @@
import smpl_sim.utils.pytorch3d_transforms as tRot
from collections import defaultdict
import smpl_sim.utils.np_transform_utils as npt_utils
+from smpl_sim.smpllib.smpl_joint_names import SMPL_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPLX_BONE_ORDER_NAMES, SMPL_MUJOCO_NAMES, SMPLH_MUJOCO_NAMES
class Humanoid_Batch:
def __init__(self, smpl_model="smpl", data_dir="data/smpl", filter_vel = True):
-
self.smpl_model = smpl_model
if self.smpl_model == "smpl":
- self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral")
- self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male")
- self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female")
+ self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral")
+ self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male")
+ self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female")
+ self.bone_mujoco_names = SMPL_MUJOCO_NAMES
+ self._parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 11, 19, 20, 21, 22] # Mujoco's order
+ self.bone_rodered_names = SMPL_BONE_ORDER_NAMES
+
elif self.smpl_model == "smplh":
self.smpl_parser_n = SMPLH_Parser(
model_path=data_dir,
@@ -49,20 +53,26 @@ def __init__(self, smpl_model="smpl", data_dir="data/smpl", filter_vel = True):
)
self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False)
self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False)
+ self.bone_mujoco_names = SMPLH_MUJOCO_NAMES
+ self._parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 18, 19, 17, 21, 22, 17, 24, 25, 17, 27, 28, 17, 30, 31, 11, 33, 34, 35, 36, 37, 38, 36, 40, 41, 36, 43, 44, 36, 46, 47, 36, 49, 50]
+ self.bone_rodered_names = SMPLH_BONE_ORDER_NAMES
elif self.smpl_model == "smplx":
self.smpl_parser_n = SMPLX_Parser(
model_path=data_dir,
gender="neutral",
use_pca=False,
create_transl=False,
+ flat_hand_mean = True,
)
- self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False)
- self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False)
-
- self.model_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
- self._parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 11, 19, 20, 21, 22] # mujoco order SMPL parents.
- self.smpl_2_mujoco = [SMPL_BONE_ORDER_NAMES.index(i) for i in self.model_names] # Apply Mujoco order
- self.mujoco_2_smpl = [self.model_names.index(i) for i in SMPL_BONE_ORDER_NAMES] # Apply Mujoco order
+ self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False, flat_hand_mean = True,)
+ self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False, flat_hand_mean = True,)
+ self.bone_mujoco_names = SMPLH_MUJOCO_NAMES
+ self._parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 18, 19, 17, 21, 22, 17, 24, 25, 17, 27, 28, 17, 30, 31, 11, 33, 34, 35, 36, 37, 38, 36, 40, 41, 36, 43, 44, 36, 46, 47, 36, 49, 50]
+ self.bone_rodered_names = SMPLH_BONE_ORDER_NAMES
+
+
+ self.smpl_2_mujoco = [self.bone_rodered_names.index(i) for i in self.bone_mujoco_names] # Apply Mujoco order
+ self.mujoco_2_smpl = [self.bone_mujoco_names.index(i) for i in self.bone_rodered_names] # Apply Mujoco order
self.num_joints = len(self._parents)
self.dt = 1/30
self.update_model(torch.zeros((1, 10)), torch.zeros((1))) # default gender 0 and pose 0.
@@ -97,7 +107,7 @@ def update_model(self, betas, gender, dt = 1/30):
joint_offsets_all[n][gender == 1] = joint_offsets_m[n]
off_sets = []
- for n in self.model_names:
+ for n in self.bone_mujoco_names:
off_sets.append(joint_offsets_all[n])
# self._offsets = torch.from_numpy(np.stack(off_sets, axis=1))
@@ -369,4 +379,3 @@ def main(cfg : DictConfig) -> None:
main()
-
diff --git a/smpl_sim/utils/math_utils.py b/smpl_sim/utils/math_utils.py
index 6eb376b..1f34a1a 100644
--- a/smpl_sim/utils/math_utils.py
+++ b/smpl_sim/utils/math_utils.py
@@ -10,6 +10,22 @@
rotation_from_matrix,
)
+class LinearAnneal:
+ def __init__(self, start_value, end_value, total_steps):
+ self.start_value = start_value
+ self.end_value = end_value
+ self.total_steps = total_steps
+ self.current_step = 0
+
+ def step(self):
+ """Return the next annealed value."""
+ if self.current_step >= self.total_steps:
+ return self.end_value
+ alpha = self.current_step / self.total_steps
+ value = (1 - alpha) * self.start_value + alpha * self.end_value
+ self.current_step += 1
+ return value
+
def gmof(res, sigma):
"""
@@ -44,21 +60,21 @@ def normal_log_density(x, mean, log_std, std):
def get_qvel_fd_new(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
- qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
+ qrel = quaternion_multiply(next_qpos[3:7],
+ quaternion_inverse(cur_qpos[3:7]))
axis, angle = rotation_from_quaternion(qrel, True)
while angle > np.pi:
angle -= 2 * np.pi
while angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
- rv = transform_vec(rv, cur_qpos[3:7], "root") # angular velocity is in root coord
+ rv = transform_vec(rv, cur_qpos[3:7],
+ "root") # angular velocity is in root coord
diff = next_qpos[7:] - cur_qpos[7:]
-
while np.any(diff > np.pi):
diff[diff > np.pi] -= 2 * np.pi
while np.any(diff < -np.pi):
diff[diff < -np.pi] += 2 * np.pi
-
qvel = diff / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
@@ -303,4 +319,4 @@ def smpl_op_to_op(pred_joints2d):
pred_joints2d[..., 9:11, :], \
pred_joints2d[..., 12:, :]], \
axis = -2)
- return new_2d
\ No newline at end of file
+ return new_2d
diff --git a/smpl_sim/utils/np_transform_utils.py b/smpl_sim/utils/np_transform_utils.py
index c62e311..18e399d 100644
--- a/smpl_sim/utils/np_transform_utils.py
+++ b/smpl_sim/utils/np_transform_utils.py
@@ -1,5 +1,6 @@
# Mujoco: wxyz. Isaac and sRot: xyzw.
import numpy as np
+
def wxyz_to_xyzw(quat):
return quat[..., [1, 2, 3, 0]]
diff --git a/smpl_sim/utils/one_euor_filter.py b/smpl_sim/utils/one_euor_filter.py
new file mode 100644
index 0000000..6fa0b75
--- /dev/null
+++ b/smpl_sim/utils/one_euor_filter.py
@@ -0,0 +1,45 @@
+import math
+
+
+def smoothing_factor(t_e, cutoff):
+ r = 2 * math.pi * cutoff * t_e
+ return r / (r + 1)
+
+
+def exponential_smoothing(a, x, x_prev):
+ return a * x + (1 - a) * x_prev
+
+
+class OneEuroFilter:
+ def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0,
+ d_cutoff=1.0):
+ """Initialize the one euro filter."""
+ # The parameters.
+ self.min_cutoff = float(min_cutoff)
+ self.beta = float(beta)
+ self.d_cutoff = float(d_cutoff)
+ # Previous values.
+ self.x_prev = float(x0)
+ self.dx_prev = float(dx0)
+ self.t_prev = float(t0)
+
+ def __call__(self, t, x):
+ """Compute the filtered signal."""
+ t_e = t - self.t_prev
+
+ # The filtered derivative of the signal.
+ a_d = smoothing_factor(t_e, self.d_cutoff)
+ dx = (x - self.x_prev) / t_e
+ dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)
+
+ # The filtered signal.
+ cutoff = self.min_cutoff + self.beta * abs(dx_hat)
+ a = smoothing_factor(t_e, cutoff)
+ x_hat = exponential_smoothing(a, x, self.x_prev)
+
+ # Memorize the previous values.
+ self.x_prev = x_hat
+ self.dx_prev = dx_hat
+ self.t_prev = t
+
+ return x_hat
\ No newline at end of file
diff --git a/smpl_sim/utils/torch_ext.py b/smpl_sim/utils/torch_ext.py
new file mode 100644
index 0000000..9116d3e
--- /dev/null
+++ b/smpl_sim/utils/torch_ext.py
@@ -0,0 +1,239 @@
+import torch
+from torch.optim import lr_scheduler
+import numpy as np
+
+tensor = torch.tensor
+DoubleTensor = torch.DoubleTensor
+FloatTensor = torch.FloatTensor
+LongTensor = torch.LongTensor
+ByteTensor = torch.ByteTensor
+ones = torch.ones
+zeros = torch.zeros
+
+
+class to_cpu:
+
+ def __init__(self, *models):
+ self.models = list(filter(lambda x: x is not None, models))
+ self.prev_devices = [x.device if hasattr(x, 'device') else next(x.parameters()).device for x in self.models]
+ for x in self.models:
+ x.to(torch.device('cpu'))
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args):
+ for x, device in zip(self.models, self.prev_devices):
+ x.to(device)
+ return False
+
+
+class to_device:
+
+ def __init__(self, device, *models):
+ self.models = list(filter(lambda x: x is not None, models))
+ self.prev_devices = [x.device if hasattr(x, 'device') else next(x.parameters()).device for x in self.models]
+ for x in self.models:
+ x.to(device)
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args):
+ for x, device in zip(self.models, self.prev_devices):
+ x.to(device)
+ return False
+
+
+class to_test:
+
+ def __init__(self, *models):
+ self.models = list(filter(lambda x: x is not None, models))
+ self.prev_modes = [x.training for x in self.models]
+ for x in self.models:
+ x.train(False)
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args):
+ for x, mode in zip(self.models, self.prev_modes):
+ x.train(mode)
+ return False
+
+
+class to_train:
+
+ def __init__(self, *models):
+ self.models = list(filter(lambda x: x is not None, models))
+ self.prev_modes = [x.training for x in self.models]
+ for x in self.models:
+ x.train(True)
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args):
+ for x, mode in zip(self.models, self.prev_modes):
+ x.train(mode)
+ return False
+
+
+def batch_to(dst, *args):
+ return [x.to(dst) if x is not None else None for x in args]
+
+
+def get_flat_params_from(models):
+ if not hasattr(models, '__iter__'):
+ models = (models, )
+ params = []
+ for model in models:
+ for param in model.parameters():
+ params.append(param.data.view(-1))
+
+ flat_params = torch.cat(params)
+ return flat_params
+
+
+def set_flat_params_to(model, flat_params):
+ prev_ind = 0
+ for param in model.parameters():
+ flat_size = int(np.prod(list(param.size())))
+ param.data.copy_(
+ flat_params[prev_ind:prev_ind + flat_size].view(param.size()))
+ prev_ind += flat_size
+
+
+def get_flat_grad_from(inputs, grad_grad=False):
+ grads = []
+ for param in inputs:
+ if grad_grad:
+ grads.append(param.grad.grad.view(-1))
+ else:
+ if param.grad is None:
+ grads.append(zeros(param.view(-1).shape))
+ else:
+ grads.append(param.grad.view(-1))
+
+ flat_grad = torch.cat(grads)
+ return flat_grad
+
+
+def compute_flat_grad(output, inputs, filter_input_ids=set(), retain_graph=False, create_graph=False):
+ if create_graph:
+ retain_graph = True
+
+ inputs = list(inputs)
+ params = []
+ for i, param in enumerate(inputs):
+ if i not in filter_input_ids:
+ params.append(param)
+
+ grads = torch.autograd.grad(output, params, retain_graph=retain_graph, create_graph=create_graph)
+
+ j = 0
+ out_grads = []
+ for i, param in enumerate(inputs):
+ if i in filter_input_ids:
+ out_grads.append(zeros(param.view(-1).shape))
+ else:
+ out_grads.append(grads[j].view(-1))
+ j += 1
+ grads = torch.cat(out_grads)
+
+ for param in params:
+ param.grad = None
+ return grads
+
+
+def set_optimizer_lr(optimizer, lr):
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+
+def filter_state_dict(state_dict, filter_keys):
+ for key in list(state_dict.keys()):
+ for f_key in filter_keys:
+ if f_key in key:
+ del state_dict[key]
+ break
+
+
+def get_scheduler(optimizer, policy, nepoch_fix=None, nepoch=None, decay_step=None):
+ if policy == 'lambda':
+ def lambda_rule(epoch):
+ lr_l = 1.0 - max(0, epoch - nepoch_fix) / float(nepoch - nepoch_fix + 1)
+ return lr_l
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
+
+ elif policy == 'step':
+ scheduler = lr_scheduler.StepLR(
+ optimizer, step_size=decay_step, gamma=0.1)
+ elif policy == 'plateau':
+ scheduler = lr_scheduler.ReduceLROnPlateau(
+ optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
+ else:
+ return NotImplementedError('learning rate policy [%s] is not implemented', policy)
+ return scheduler
+
+def to_numpy(tensor):
+ if torch.is_tensor(tensor):
+ return tensor.cpu().numpy()
+ else:
+ return tensor
+
+def to_torch(tensor):
+ if torch.is_tensor(tensor):
+ return tensor
+ else:
+ return torch.from_numpy(tensor)
+
+def dict_to_torch(input_dict, dtype = None, device = None, add_dim = False):
+ if not isinstance(input_dict, dict):
+ return None
+ out_dict = {}
+ for key, value in input_dict.items():
+ if isinstance(value, np.ndarray):
+ value = torch.from_numpy(value)
+ else:
+ pass
+
+ if torch.is_tensor(value):
+ if dtype is not None:
+ value = value.type(dtype)
+ if device is not None:
+ value = value.to(device)
+ if add_dim:
+ value = value[None, ]
+
+ out_dict[key] = value
+
+ return out_dict
+
+def dict_to_numpy(input_dict):
+ if not isinstance(input_dict, dict):
+ return None
+ out_dict = {}
+ for key, value in input_dict.items():
+ if isinstance(value, torch.Tensor):
+ out_dict[key] = value.detach().cpu().numpy()
+ elif isinstance(value, dict):
+ out_dict[key] = dict_to_numpy(value)
+ else:
+ out_dict[key] = value
+
+ return out_dict
+
+def isNpArray(array):
+ return isinstance(array, np.ndarray) and (not array.dtype.char in ['U', 'S'])
+
+
+def gather_vecs(vec_list, indices):
+ res = []
+ for vec in vec_list:
+ gather_indices = torch.LongTensor(
+ np.tile(indices[:, None], (1, vec.shape[-1]))).to(vec.device)
+ new_vec = torch.gather(vec.reshape(-1, vec.shape[-1]), 0,
+ gather_indices)
+ res.append(new_vec)
+ return res
diff --git a/smpl_sim/utils/transform_utils.py b/smpl_sim/utils/transform_utils.py
index 68c2743..e6a3197 100644
--- a/smpl_sim/utils/transform_utils.py
+++ b/smpl_sim/utils/transform_utils.py
@@ -2,92 +2,283 @@
import numpy as np
import torch
-from .torch_geometry_transforms import *
+from smpl_sim.utils.torch_geometry_transforms import *
from torch.nn import functional as F
from scipy.spatial.transform import Rotation as sRot
from scipy.ndimage import gaussian_filter1d
-def wxyz_to_xyzw(quat):
- return quat[..., [1, 2, 3, 0]]
-
-def xyzw_to_wxyz(quat):
- return quat[..., [3, 0, 1, 2]]
-
-def normalize(v):
- norm=np.linalg.norm(v)
- if norm==0:
- norm=np.finfo(v.dtype).eps
- return v/norm
-
-def quat_unit(a):
- return normalize(a)
-
-def quat_from_angle_axis(angle, axis):
- theta = (angle / 2)[..., None]
- xyz = normalize(axis) * np.sin(theta)
- w = np.cos(theta)
- return quat_unit(np.concatenate([w, xyz], axis=-1))
-
-
-def quat_rotate(q, v):
- shape = q.shape
- q_w = q[:, 0]
- q_vec = q[:, 1:]
- a = v * (2.0 * q_w**2 - 1.0)[..., None]
- b = np.cross(q_vec, v, axis=-1) * q_w[..., None] * 2.0
- c = q_vec * \
- (q_vec.reshape(shape[0], 1, 3) @ v.reshape(
- shape[0], 3, 1)).squeeze(-1) * 2.0
- return a + b + c
-
-def calc_heading(q):
- ref_dir = np.zeros((q.shape[0], 3))
- ref_dir[..., 0] = 1
- rot_dir = quat_rotate(q, ref_dir)
-
- heading = np.arctan2(rot_dir[..., 1], rot_dir[..., 0])
- return heading
-
-
-def calc_heading_quat(q):
- heading = calc_heading(q)
- axis = np.zeros((q.shape[0], 3))
- axis[..., 2] = 1
-
- heading_q = quat_from_angle_axis(heading, axis)
- return heading_q
-
-def calc_heading_quat_inv(q):
- heading = calc_heading(q)
- axis = np.zeros((q.shape[0], 3))
- axis[..., 2] = 1
-
- heading_q = quat_from_angle_axis(-heading, axis)
- return heading_q
-
-def quat_conjugate(a):
- shape = a.shape
- a = a.reshape(-1, 4)
- return np.concatenate((a[:, :1], -a[:, 1:]), axis=-1).reshape(shape)
-
-def quat_mul(a, b):
- assert a.shape == b.shape
- shape = a.shape
- a = a.reshape(-1, 4)
- b = b.reshape(-1, 4)
-
- w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
- w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
- ww = (z1 + x1) * (x2 + y2)
- yy = (w1 - y1) * (w2 + z2)
- zz = (w1 + y1) * (w2 - z2)
- xx = ww + yy + zz
- qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
- w = qq - ww + (z1 - y1) * (y2 - z2)
- x = qq - xx + (x1 + w1) * (x2 + w2)
- y = qq - yy + (w1 - x1) * (y2 + z2)
- z = qq - zz + (z1 + y1) * (w2 - x2)
-
- quat = np.stack([w, x, y, z], axis=-1).reshape(shape)
-
- return quat
\ No newline at end of file
+
+def smpl_mat_to_aa(poses):
+ poses_aa = []
+ for pose_frame in poses:
+ pose_frames = []
+ for joint in pose_frame:
+ pose_frames.append(cv2.Rodrigues(joint)[0].flatten())
+ pose_frames = np.array(pose_frames)
+ poses_aa.append(pose_frames)
+ poses_aa = np.array(poses_aa)
+ return poses_aa
+
+
+def compute_rotation_matrix_from_ortho6d(ortho6d):
+ x_raw = ortho6d[:, 0:3] #batch*3
+ y_raw = ortho6d[:, 3:6] #batch*3
+
+ x = normalize_vector(x_raw) #batch*3
+ z = cross_product(x, y_raw) #batch*3
+ z = normalize_vector(z) #batch*3
+ y = cross_product(z, x) #batch*3
+
+ x = x.view(-1, 3, 1)
+ y = y.view(-1, 3, 1)
+ z = z.view(-1, 3, 1)
+ zeros = torch.zeros(z.shape, dtype=z.dtype).to(ortho6d.device)
+ matrix = torch.cat((x, y, z, zeros), 2) #batch*3*3
+ return matrix
+
+
+def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
+ """
+ Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
+ using Gram--Schmidt orthogonalisation per Section B of [1].
+ Args:
+ d6: 6D rotation representation, of size (*, 6)
+
+ Returns:
+ batch of rotation matrices of size (*, 3, 3)
+
+ [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
+ On the Continuity of Rotation Representations in Neural Networks.
+ IEEE Conference on Computer Vision and Pattern Recognition, 2019.
+ Retrieved from http://arxiv.org/abs/1812.07035
+ """
+
+ a1, a2 = d6[..., :3], d6[..., 3:]
+ b1 = F.normalize(a1, dim=-1)
+ b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
+ b2 = F.normalize(b2, dim=-1)
+ b3 = torch.cross(b1, b2, dim=-1)
+ return torch.stack((b1, b2, b3), dim=-2)
+
+
+def cross_product(u, v):
+ batch = u.shape[0]
+ #print (u.shape)
+ #print (v.shape)
+ i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
+ j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
+ k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
+
+ out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) #batch*3
+
+ return out
+
+
+def compute_orth6d_from_rotation_matrix(rot_mats):
+ rot_mats = rot_mats[:, :, :2].transpose(1, 2).reshape(-1, 6)
+ return rot_mats
+
+
+def convert_mat_to_6d(poses):
+ if torch.is_tensor(poses):
+ curr_pose = poses.to(poses.device).float().reshape(-1, 3, 3)
+ else:
+ curr_pose = torch.tensor(poses).to(poses.device).float().reshape(-1, 3, 3)
+
+ orth6d = curr_pose[:, :, :2].transpose(1, 2).reshape(-1, 6)
+ orth6d = orth6d.view(poses.shape[0], -1, 6)
+ return orth6d
+
+
+def convert_aa_to_orth6d(poses):
+ if torch.is_tensor(poses):
+ curr_pose = poses.to(poses.device).float().reshape(-1, 3)
+ else:
+ curr_pose = torch.from_numpy(poses).to(poses.device).float().reshape(-1, 3)
+ rot_mats = angle_axis_to_rotation_matrix(curr_pose)
+ rot_mats = rot_mats[:, :3, :]
+ orth6d = compute_orth6d_from_rotation_matrix(rot_mats)
+ orth6d = orth6d.view(poses.shape[0], -1, 6)
+ return orth6d
+
+
+def convert_orth_6d_to_aa(orth6d):
+ orth6d_flat = orth6d.reshape(-1, 6)
+ rot_mat6d = compute_rotation_matrix_from_ortho6d(orth6d_flat)
+ pose_aa = rotation_matrix_to_angle_axis(rot_mat6d)
+
+ shape_curr = list(orth6d.shape)
+ shape_curr[-1] /= 2
+ shape_curr = tuple([int(i) for i in shape_curr])
+ pose_aa = pose_aa.reshape(shape_curr)
+ return pose_aa
+
+
+def convert_orth_6d_to_mat(orth6d):
+ num_joints = int(orth6d.shape[-1] / 6)
+ orth6d_flat = orth6d.reshape(-1, 6)
+
+ rot_mat6d = compute_rotation_matrix_from_ortho6d(orth6d_flat)[:, :, :3]
+
+ shape_curr = list(orth6d.shape)
+ shape_curr[-1] = num_joints
+ shape_curr += [3, 3]
+ shape_curr = tuple([int(i) for i in shape_curr])
+ rot_mat = rot_mat6d.reshape(shape_curr)
+ return rot_mat
+
+
+def normalize_vector(v, return_mag=False):
+ batch = v.shape[0]
+ v_mag = torch.sqrt(v.pow(2).sum(1)) # batch
+ v_mag = torch.max(v_mag, torch.autograd.Variable(torch.tensor([1e-8], dtype=v_mag.dtype).to(v.device)))
+ v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1])
+ v = v / v_mag
+ if (return_mag == True):
+ return v, v_mag[:, 0]
+ else:
+ return v
+
+
+def vertizalize_smpl_root(poses, root_vec=[np.pi / 2, 0, 0]):
+ device = poses.device
+ target_mat = angle_axis_to_rotation_matrix(torch.tensor([root_vec], dtype=poses.dtype).to(device))[:, :3, :3].to(device)
+ org_mats = angle_axis_to_rotation_matrix(poses[:, :3])[:, :3, :3].to(device)
+ org_mat_inv = torch.inverse(org_mats[0]).to(device)
+ apply_mat = torch.matmul(target_mat, org_mat_inv)
+ res_root_mat = torch.matmul(apply_mat, org_mats)
+ zeros = torch.zeros((res_root_mat.shape[0], res_root_mat.shape[1], 1), dtype=res_root_mat.dtype).to(device)
+ res_root_mats_4 = torch.cat((res_root_mat, zeros), 2) #batch*3*4
+ res_root_aa = rotation_matrix_to_angle_axis(res_root_mats_4)
+
+ poses[:, :3] = res_root_aa
+ # print(res_root_aa)
+ return poses
+
+
+def vertizalize_smpl_root_and_trans(poses, trans, root_vec=[np.pi / 2, 0, 0]):
+ device = poses.device
+ target_mat = angle_axis_to_rotation_matrix(torch.tensor([root_vec], dtype=poses.dtype).to(device))[:, :3, :3].to(device)
+ org_mats = angle_axis_to_rotation_matrix(poses[:, :3])[:, :3, :3].to(device)
+ org_mat_inv = torch.inverse(org_mats[0]).to(device)
+ apply_mat = torch.matmul(target_mat, org_mat_inv)
+ res_root_mat = torch.matmul(apply_mat, org_mats)
+ zeros = torch.zeros((res_root_mat.shape[0], res_root_mat.shape[1], 1), dtype=res_root_mat.dtype).to(device)
+ res_root_mats_4 = torch.cat((res_root_mat, zeros), 2) #batch*3*4
+ res_root_aa = rotation_matrix_to_angle_axis(res_root_mats_4)
+
+ trans = torch.matmul(apply_mat, trans[:, :, None])
+
+ poses[:, :3] = res_root_aa
+ # print(res_root_aa)
+ return poses, trans.squeeze()
+
+
+def rotate_smpl_root_and_trans(poses, trans, root_vec=[np.pi / 2, 0, 0]):
+ device = poses.device
+ org_mats = angle_axis_to_rotation_matrix(poses[:, :3])[:, :3, :3].to(device)
+ apply_mat = angle_axis_to_rotation_matrix(torch.tensor([root_vec], dtype=poses.dtype).to(device))[:, :3, :3].to(device)
+ res_root_mat = torch.matmul(apply_mat, org_mats)
+ zeros = torch.zeros((res_root_mat.shape[0], res_root_mat.shape[1], 1), dtype=res_root_mat.dtype).to(device)
+ res_root_mats_4 = torch.cat((res_root_mat, zeros), 2) #batch*3*4
+ res_root_aa = rotation_matrix_to_angle_axis(res_root_mats_4)
+
+ trans = torch.matmul(apply_mat, trans[:, :, None])
+
+ poses[:, :3] = res_root_aa
+ # print(res_root_aa)
+ return poses, trans.squeeze()
+
+
+def rot6d_to_rotmat(x):
+ x = x.view(-1, 3, 2)
+
+ # Normalize the first vector
+ b1 = F.normalize(x[:, :, 0], dim=1, eps=1e-6)
+
+ dot_prod = torch.sum(b1 * x[:, :, 1], dim=1, keepdim=True)
+ # Compute the second vector by finding the orthogonal complement to it
+ b2 = F.normalize(x[:, :, 1] - dot_prod * b1, dim=-1, eps=1e-6)
+
+ # Finish building the basis by taking the cross product
+ b3 = torch.cross(b1, b2, dim=1)
+ rot_mats = torch.stack([b1, b2, b3], dim=-1)
+
+ return rot_mats
+
+
+def perspective_projection_cam(pred_joints, pred_camera):
+ pred_cam_t = torch.stack([pred_camera[:, 1], pred_camera[:, 2], 2 * 5000. / (224. * pred_camera[:, 0] + 1e-9)], dim=-1)
+ batch_size = pred_joints.shape[0]
+ camera_center = torch.zeros(batch_size, 2)
+ pred_keypoints_2d = perspective_projection(pred_joints, rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device), translation=pred_cam_t, focal_length=5000., camera_center=camera_center)
+ # Normalize keypoints to [-1,1]
+ pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)
+ return pred_keypoints_2d
+
+
+def perspective_projection(points, rotation, translation, focal_length, camera_center):
+ """
+ This function computes the perspective projection of a set of points.
+ Input:
+ points (bs, N, 3): 3D points
+ rotation (bs, 3, 3): Camera rotation
+ translation (bs, 3): Camera translation
+ focal_length (bs,) or scalar: Focal length
+ camera_center (bs, 2): Camera center
+ """
+ batch_size = points.shape[0]
+ K = torch.zeros([batch_size, 3, 3], device=points.device)
+ K[:, 0, 0] = focal_length
+ K[:, 1, 1] = focal_length
+ K[:, 2, 2] = 1.
+ K[:, :-1, -1] = camera_center
+
+ # Transform points
+ points = torch.einsum('bij,bkj->bki', rotation, points)
+ points = points + translation.unsqueeze(1)
+
+ # Apply perspective distortion
+ projected_points = points / points[:, :, -1].unsqueeze(-1)
+
+ # Apply camera intrinsics
+ projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
+
+ return projected_points[:, :, :-1]
+
+
+def quat_correct(quat):
+ """ Converts quaternion to minimize Euclidean distance from previous quaternion (wxyz order) """
+ for q in range(1, quat.shape[0]):
+ if np.linalg.norm(quat[q - 1] - quat[q], axis=0) > np.linalg.norm(quat[q - 1] + quat[q], axis=0):
+ quat[q] = -quat[q]
+ return quat
+
+
+def quat_correct_two_batch(quats_prev, quats_next):
+ selects = np.linalg.norm(quats_prev - quats_next, axis=1) > np.linalg.norm(quats_prev + quats_next, axis=1)
+ quats_next[selects] = -quats_next[selects]
+ return quats_next
+
+
+def quat_smooth_window(quats, sigma=5):
+ quats = quat_correct(quats)
+ quats = gaussian_filter1d(quats, sigma, axis=0)
+ quats /= np.linalg.norm(quats, axis=1)[:, None]
+ return quats
+
+
+def smooth_smpl_quat_window(pose_aa, sigma=5):
+ batch = pose_aa.shape[0]
+ pose_quat = sRot.from_rotvec(pose_aa.reshape(-1, 3)).as_quat().reshape(batch, -1, 4)
+ pose_quat = pose_quat[:, :, [1, 2, 3, 0]].copy()
+
+ quats_all = []
+ for i in range(pose_quat.shape[1]):
+ quats = pose_quat[:, i, :].copy()
+ quats_all.append(quat_smooth_window(quats, sigma))
+
+ pose_quat_smooth = np.stack(quats_all, axis=1)[:, :, [3, 0, 1, 2]]
+
+ pose_rot_vec = (sRot.from_quat(pose_quat_smooth.reshape(-1, 4)).as_rotvec().reshape(batch, -1, 3))
+ return pose_rot_vec
\ No newline at end of file
diff --git a/test.xml b/test.xml
new file mode 100644
index 0000000..b12f7c4
--- /dev/null
+++ b/test.xml
@@ -0,0 +1,549 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+