From e42c2bf6740bf27dbdc7145928faae42ac69f189 Mon Sep 17 00:00:00 2001 From: zhengyiluo Date: Sun, 10 Nov 2024 17:39:20 -0500 Subject: [PATCH 1/4] updating poselib filters.gaussian_filter1d --- MUJOCO_LOG.TXT | 12 + .../envs/nv/poselib => isaaclab}/__init__.py | 0 isaaclab/atlas_21jts_config.py | 100 ++ isaaclab/joint_monkey.py | 192 +++ isaaclab/launch_app.py | 337 +++++ isaaclab/launch_smpl_env.py | 421 ++++++ isaaclab/run_atlas.py | 324 +++++ isaaclab/smpl_config.py | 298 ++++ smpl_sim/envs/nv/poselib/README.md | 20 - smpl_sim/envs/nv/poselib/poselib/__init__.py | 3 - .../envs/nv/poselib/poselib/core/__init__.py | 3 - .../poselib/poselib/core/backend/__init__.py | 3 - .../poselib/poselib/core/backend/abstract.py | 128 -- .../nv/poselib/poselib/core/backend/logger.py | 20 - .../nv/poselib/poselib/core/rotation3d.py | 474 ------ .../nv/poselib/poselib/core/tensor_utils.py | 45 - .../nv/poselib/poselib/core/tests/__init__.py | 0 .../poselib/core/tests/test_rotation.py | 56 - .../nv/poselib/poselib/skeleton/__init__.py | 0 .../poselib/skeleton/backend/__init__.py | 0 .../poselib/skeleton/backend/fbx/__init__.py | 0 .../skeleton/backend/fbx/fbx_py27_backend.py | 308 ---- .../skeleton/backend/fbx/fbx_read_wrapper.py | 75 - .../nv/poselib/poselib/skeleton/skeleton3d.py | 1269 ----------------- .../poselib/skeleton/tests/__init__.py | 0 .../nv/poselib/poselib/skeleton/tests/ant.xml | 71 - .../poselib/skeleton/tests/test_skeleton.py | 132 -- .../poselib/skeleton/tests/transfer_npy.py | 31 - .../poselib/poselib/visualization/__init__.py | 0 .../poselib/poselib/visualization/common.py | 189 --- .../nv/poselib/poselib/visualization/core.py | 78 - .../poselib/visualization/plt_plotter.py | 408 ------ .../visualization/simple_plotter_tasks.py | 192 --- .../visualization/skeleton_plotter_tasks.py | 194 --- .../poselib/visualization/tests/__init__.py | 0 .../visualization/tests/test_plotter.py | 16 - smpl_sim/envs/nv/poselib/setup.py | 19 - smpl_sim/poselib/skeleton/skeleton3d.py | 2 +- smpl_sim/smpllib/smpl_local_robot.py | 1 - test.xml | 208 +-- 40 files changed, 1789 insertions(+), 3840 deletions(-) rename {smpl_sim/envs/nv/poselib => isaaclab}/__init__.py (100%) mode change 100644 => 100755 create mode 100755 isaaclab/atlas_21jts_config.py create mode 100755 isaaclab/joint_monkey.py create mode 100755 isaaclab/launch_app.py create mode 100644 isaaclab/launch_smpl_env.py create mode 100755 isaaclab/run_atlas.py create mode 100755 isaaclab/smpl_config.py delete mode 100644 smpl_sim/envs/nv/poselib/README.md delete mode 100644 smpl_sim/envs/nv/poselib/poselib/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/backend/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/backend/abstract.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/backend/logger.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/rotation3d.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/tensor_utils.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/tests/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/core/tests/test_rotation.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/backend/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_py27_backend.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/skeleton3d.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/tests/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/tests/ant.xml delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/tests/test_skeleton.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/skeleton/tests/transfer_npy.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/common.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/core.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/plt_plotter.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/simple_plotter_tasks.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/skeleton_plotter_tasks.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/tests/__init__.py delete mode 100644 smpl_sim/envs/nv/poselib/poselib/visualization/tests/test_plotter.py delete mode 100644 smpl_sim/envs/nv/poselib/setup.py diff --git a/MUJOCO_LOG.TXT b/MUJOCO_LOG.TXT index e10dc51..cfe1998 100644 --- a/MUJOCO_LOG.TXT +++ b/MUJOCO_LOG.TXT @@ -22,3 +22,15 @@ ERROR: could not initialize GLFW Mon May 27 18:49:02 2024 ERROR: could not initialize GLFW +Wed Oct 23 17:07:26 2024 +ERROR: could not initialize GLFW + +Wed Oct 23 17:07:32 2024 +ERROR: could not create window + +Wed Oct 23 17:08:00 2024 +ERROR: could not initialize GLFW + +Wed Oct 23 17:08:06 2024 +ERROR: could not create window + diff --git a/smpl_sim/envs/nv/poselib/__init__.py b/isaaclab/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from smpl_sim/envs/nv/poselib/__init__.py rename to isaaclab/__init__.py diff --git a/isaaclab/atlas_21jts_config.py b/isaaclab/atlas_21jts_config.py new file mode 100755 index 0000000..79a3dff --- /dev/null +++ b/isaaclab/atlas_21jts_config.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for a simple Cartpole robot.""" + + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.actuators import ImplicitActuatorCfg +from omni.isaac.lab.assets import ArticulationCfg +from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Configuration +## + + +Atlas_21jts_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"data/assets/usd/Atlas/atlas_21jts.usda", + + activate_contact_sensors=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4 + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 1.05), + joint_pos={".*": 0.0}, + joint_vel={".*": 0.0}, + ), + soft_joint_pos_limit_factor=0.9, + actuators={ + "legs": ImplicitActuatorCfg( + joint_names_expr=["lleg_hpy", "rleg_hpy", "lleg_hpx", "rleg_hpx", "lleg_hpz", "rleg_hpz", "lleg_kn", "rleg_kn", "lleg_aky", "rleg_aky", "lleg_akx", "rleg_akx"], + stiffness={ + "lleg_hpy": 500, + "rleg_hpy": 500, + "lleg_hpx": 500, + "rleg_hpx": 500, + "lleg_hpz": 500, + "rleg_hpz": 500, + "lleg_kn": 500, + "rleg_kn": 500, + "lleg_aky": 500, + "rleg_aky": 500, + "lleg_akx": 500, + "rleg_akx": 500, + }, + damping={ + "lleg_hpy": 50, + "rleg_hpy": 50, + "lleg_hpx": 50, + "rleg_hpx": 50, + "lleg_hpz": 50, + "rleg_hpz": 50, + "lleg_kn": 50, + "rleg_kn": 50, + "lleg_aky": 50, + "rleg_aky": 50, + "lleg_akx": 50, + "rleg_akx": 50, + }, + ), + + "arms": ImplicitActuatorCfg( + joint_names_expr=["larm_shy", "rarm_shy", "larm_shx", "rarm_shx", "larm_shz", "rarm_shz", "larm_el", "rarm_el"], + stiffness={ + "larm_shy": 500, + "rarm_shy": 500, + "larm_shx": 500, + "rarm_shx": 500, + "larm_shz": 500, + "rarm_shz": 500, + "larm_el": 500, + "rarm_el": 500, + }, + damping={ + "larm_shy": 50, + "rarm_shy": 50, + "larm_shx": 50, + "rarm_shx": 50, + "larm_shz": 50, + "rarm_shz": 50, + "larm_el": 50, + "rarm_el": 50, + }, + ) + }, +) \ No newline at end of file diff --git a/isaaclab/joint_monkey.py b/isaaclab/joint_monkey.py new file mode 100755 index 0000000..61b95cb --- /dev/null +++ b/isaaclab/joint_monkey.py @@ -0,0 +1,192 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +from omni.isaac.lab.app import AppLauncher +from phc.utils.rotation_conversions import xyzw_to_wxyz + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sim import SimulationContext +from omni.isaac.lab.utils import configclass + +ANIM_SEEK_LOWER = 1 +ANIM_SEEK_UPPER = 2 +ANIM_FINISHED = 3 + +## +# Pre-defined configs +## +# from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip +from isaaclab.smpl_config import SMPL_CFG, SMPLX_CFG +from isaaclab.atlas_21jts_config import Atlas_21jts_CFG + + +from phc.utils.flags import flags +from phc.utils.motion_lib_smpl import MotionLibSMPL as MotionLibSMPL +from phc.utils.motion_lib_base import FixHeightMode +from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState +from smpl_sim.smpllib.smpl_local_robot import SMPL_Robot +from smpl_sim.smpllib.smpl_joint_names import SMPL_BONE_ORDER_NAMES, SMPLX_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPL_MUJOCO_NAMES, SMPLH_MUJOCO_NAMES +from phc.learning.network_loader import load_z_encoder, load_z_decoder, load_pnn, load_mcp_mlp +from phc.env.tasks.humanoid_funcs import compute_humanoid_observations_smpl_max, compute_imitation_observations_v6 +from rl_games.algos_torch import torch_ext +from rl_games.algos_torch.players import rescale_actions +import torch +import joblib +from easydict import EasyDict +import numpy as np +import open3d as o3d +import copy +from scipy.spatial.transform import Rotation as sRot + + + +@configclass +class SMPLSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # articulation + # robot: ArticulationCfg = Atlas_21jts_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + robot: ArticulationCfg = SMPLX_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + env_ids = torch.zeros(scene.num_envs).to(sim.device).long() + + current_dof = 0 + anim_state = ANIM_SEEK_LOWER + anim_timer = 0.0 + anim_duration = 4.0 # Duration to animate each joint + incremental = 0.1 + + + # Simulation loop + + sim_dof_names = robot.data.joint_names + sim_body_names = robot.data.body_names + ref_root_state = torch.zeros((scene.num_envs, 13)).to(sim.device) + joint_pos = torch.zeros((scene.num_envs, len(sim_dof_names))).to(sim.device) + joint_vel = torch.zeros((scene.num_envs, len(sim_dof_names))).to(sim.device) + ref_root_state[:, 2] = 0.9 + ref_root_state[:, 3] = 1.3 + + while simulation_app.is_running(): + # Apply random action + # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + + ############################## PHC ######################################## + current_dof_val = joint_pos[:, current_dof].clone() # Preserve current_dof position + joint_pos.zero_() # Efficiently set all positions to zero + joint_pos[:, current_dof] = current_dof_val # Restore current_dof position + if anim_state == ANIM_SEEK_LOWER: + joint_pos[:, current_dof] -= incremental # Adjust the increment as needed + if joint_pos[:, current_dof] <= -np.pi: + joint_pos[:, current_dof] = -np.pi + anim_state = ANIM_SEEK_UPPER + elif anim_state == ANIM_SEEK_UPPER: + joint_pos[:, current_dof] += incremental + if joint_pos[:, current_dof] >= np.pi: + joint_pos[:, current_dof] = np.pi + anim_state = ANIM_FINISHED + elif anim_state == ANIM_FINISHED: + current_dof = (current_dof + 1) % len(sim_dof_names) + anim_state = ANIM_SEEK_LOWER + print("Animating DOF %d ('%s')" % (current_dof, sim_dof_names[current_dof]), anim_timer) + # Update the timer + anim_timer += sim_dt + if anim_timer >= anim_duration: + anim_timer = 0.0 + current_dof = (current_dof + 1) % len(sim_dof_names) + anim_state = ANIM_SEEK_LOWER + + robot.write_root_state_to_sim(ref_root_state, env_ids) + robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + + + ############################## PHC ######################################## + + # -- write data to sim + scene.write_data_to_sim() + # Perform step + # sim.step() + sim.render() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # Design scene + scene_cfg = SMPLSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() \ No newline at end of file diff --git a/isaaclab/launch_app.py b/isaaclab/launch_app.py new file mode 100755 index 0000000..b6f505d --- /dev/null +++ b/isaaclab/launch_app.py @@ -0,0 +1,337 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +from omni.isaac.lab.app import AppLauncher +from phc.utils.rotation_conversions import xyzw_to_wxyz, wxyz_to_xyzw +import sys + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg +from omni.isaac.lab.envs import DirectRLEnvCfg +from omni.isaac.lab.sim import SimulationCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sim import SimulationContext, PhysxCfg +from omni.isaac.lab.utils import configclass +from omni.isaac.lab.devices.keyboard.se2_keyboard import Se2Keyboard +import carb +from carb.input import KeyboardEventType + + + +## +# Pre-defined configs +## +# from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip +from isaaclab.smpl_config import SMPL_CFG + + +from phc.utils.flags import flags +from phc.utils.motion_lib_smpl import MotionLibSMPL as MotionLibSMPL +from phc.utils.motion_lib_base import FixHeightMode +from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState +from smpl_sim.smpllib.smpl_local_robot import SMPL_Robot +from smpl_sim.smpllib.smpl_joint_names import SMPL_BONE_ORDER_NAMES, SMPLX_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPL_MUJOCO_NAMES, SMPLH_MUJOCO_NAMES +from phc.learning.network_loader import load_z_encoder, load_z_decoder, load_pnn, load_mcp_mlp +from phc.env.tasks.humanoid_funcs import compute_humanoid_observations_smpl_max, compute_imitation_observations_v6 +from rl_games.algos_torch import torch_ext +from rl_games.algos_torch.players import rescale_actions +import torch +import joblib +from easydict import EasyDict +import numpy as np +import open3d as o3d +import copy +from scipy.spatial.transform import Rotation as sRot +import time + + + +@configclass +class SMPLSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + # articulation + smpl_robot: ArticulationCfg = SMPL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + def reset_robot(robot): + motion_res = motion_lib.get_motion_state(motion_id, motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + init_joint_pos = ref_dof_pos[:, gym_to_sim_dof] + init_joint_vel = ref_dof_vel[:, gym_to_sim_dof] + + # ref_joint_pos = torch.from_numpy(sRot.from_rotvec(ref_joint_pos.reshape(-1, 3)).as_euler("xyz")).to(ref_joint_pos).reshape(scene.num_envs, -1) + + init_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + robot.write_root_state_to_sim(init_root_state, env_ids) + robot.write_joint_state_to_sim(init_joint_pos, init_joint_vel, None, env_ids) + + def test_keyboard(): + import ipdb; ipdb.set_trace() + print('....') + + keyboard = Se2Keyboard() + keyboard.add_callback(carb.input.KeyboardInput.W, test_keyboard) + + + + device = ( + torch.device("cuda", index=0) + if torch.cuda.is_available() + else torch.device("cpu") + ) + # motion_file = "sample_data/amass_isaac_simple_run_upright_slim.pkl" + motion_file = "data/amass/pkls/singles/hard_z_fut_1_1_2_upright_slim.pkl" + dt = 1/30 + time_steps = 1 + has_upright_start = True + motion_lib_cfg = EasyDict({ + "has_upright_start": has_upright_start, + "motion_file": motion_file, + "fix_height": FixHeightMode.full_fix, + "min_length": -1, + "max_length": 3000, + "im_eval": flags.im_eval, + "multi_thread": False , + "smpl_type": 'smpl', + "randomrize_heading": True, + "device": device, + }) + robot_cfg = { + "mesh": False, + "rel_joint_lm": False, + "upright_start": has_upright_start, + "remove_toe": False, + "real_weight_porpotion_capsules": True, + "real_weight_porpotion_boxes": True, + "model": "smpl", + "big_ankle": True, + "box_body": True, + "body_params": {}, + "joint_params": {}, + "geom_params": {}, + "actuator_params": {}, + } + smpl_robot = SMPL_Robot( + robot_cfg, + data_dir="data/smpl", + ) + + gender_beta = np.zeros((17)) + smpl_robot.load_from_skeleton(betas=torch.from_numpy(gender_beta[None, 1:]), gender=gender_beta[0:1], objs_info=None) + test_good = f"/tmp/smpl/test_good.xml" + smpl_robot.write_xml(test_good) + sk_tree = SkeletonTree.from_mjcf(test_good) + num_motions = 10 + skeleton_trees = [sk_tree] * num_motions + start_idx = 0 + env_ids = torch.zeros(scene.num_envs).to(device).long() + motion_lib = MotionLibSMPL(motion_lib_cfg) + motion_lib.load_motions(skeleton_trees=skeleton_trees, + gender_betas=[torch.from_numpy(gender_beta)] * num_motions, + limb_weights=[np.zeros(10)] * num_motions, + random_sample=False, + start_idx = start_idx, + max_len=-1) + motion_id, time_steps = torch.zeros(scene.num_envs).to(device).long(), torch.zeros(scene.num_envs).to(device).float() + motion_id[:] = 0 + motion_len = motion_lib.get_motion_length(motion_id).item() + motion_time = time_steps % motion_len + + policy_path = "output/HumanoidIm/phc_3/Humanoid.pth" + check_points = [torch_ext.load_checkpoint(policy_path)] + pnn = load_pnn(check_points[0], num_prim = 3, has_lateral = False, activation = "silu", device = device) + running_mean, running_var = check_points[-1]['running_mean_std']['running_mean'], check_points[-1]['running_mean_std']['running_var'] + action_offset = joblib.load("action_offset.pkl") + pd_action_offset = action_offset[0] + pd_action_scale = action_offset[1] + ####### + + + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["smpl_robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + + ########################################################### + sim_joint_names = robot.data.joint_names + sim_body_names = robot.data.body_names + gym_joint_names = [f"{j}_{axis}" for j in SMPL_MUJOCO_NAMES[1:] for axis in ["x", "y", "z"]] + sim_to_gym_body = [sim_body_names.index(n) for n in SMPL_MUJOCO_NAMES] + sim_to_gym_dof = [sim_joint_names.index(n) for n in gym_joint_names] + + gym_to_sim_dof = [gym_joint_names.index(n) for n in sim_joint_names] + gym_to_sim_body = [SMPL_MUJOCO_NAMES.index(n) for n in sim_body_names] + + + reset_robot(robot) + + # Simulation loop + while simulation_app.is_running(): + start_time = time.time() + # Apply random action + # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + + ############################## PHC ######################################## + + motion_time = time_steps % motion_len + motion_res = motion_lib.get_motion_state(motion_id, motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + + body_pos = robot.data.body_pos_w[:, sim_to_gym_body ] + body_rot = wxyz_to_xyzw(robot.data.body_quat_w[:, sim_to_gym_body]) + body_vel = robot.data.body_lin_vel_w[:, sim_to_gym_body] + body_ang_vel = robot.data.body_ang_vel_w[:, sim_to_gym_body] + + root_pos = body_pos[:, 0, :] + root_rot = body_rot[:, 0, :] + body_pos_subset = body_pos + body_rot_subset = body_rot + body_vel_subset = body_vel + body_ang_vel_subset = body_ang_vel + ref_rb_pos_subset = ref_rb_pos + ref_rb_rot_subset = ref_rb_rot + ref_body_vel_subset = ref_body_vel + ref_body_ang_vel_subset = ref_body_ang_vel + ref_joint_pos = ref_dof_pos[:, gym_to_sim_dof] + ref_joint_vel = ref_dof_vel[:, gym_to_sim_dof] + + # Data replay + + # ref_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + # robot.write_root_state_to_sim(ref_root_state, env_ids) + # robot.write_joint_state_to_sim(ref_joint_pos, ref_joint_vel, None, env_ids) + + self_obs = compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel, body_ang_vel, ref_smpl_params, ref_limb_weights, True, True, has_upright_start, False, False) + task_obs = compute_imitation_observations_v6(root_pos, root_rot, body_pos_subset, body_rot_subset, body_vel_subset, body_ang_vel_subset, ref_rb_pos_subset, ref_rb_rot_subset, ref_body_vel_subset, ref_body_ang_vel_subset, 1, has_upright_start) + + full_obs = torch.cat([self_obs, task_obs], dim = -1) + full_obs = ((full_obs - running_mean.float()) / torch.sqrt(running_var.float() + 1e-05)) + full_obs = torch.clamp(full_obs, min=-5.0, max=5.0) + + with torch.no_grad(): + actions, _ = pnn(full_obs, idx=0) + actions = rescale_actions(-1, 1, torch.clamp(actions, -1, 1)) + actions = actions * pd_action_scale + pd_action_offset + actions = actions[:, gym_to_sim_dof] + + robot.set_joint_position_target(actions, None, env_ids) + + ############################## PHC ######################################## + + if time_steps > motion_len: + print("resetting", time_steps.item()) + reset_robot(robot) + time_steps[:] = 0 + + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # sim.render() + # Increment counter + time_steps += sim_dt + # Update buffers + scene.update(sim_dt) + + # Measure wall time and ensure 30 fps + elapsed_time = time.time() - start_time + sleep_time = max(0, (1.0 / 30.0) - elapsed_time) + # time.sleep(sleep_time) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg( + device=args_cli.device, + dt=1 / 60, + # decimation will be set in the task config + # up axis will always be Z in isaac sim + # use_gpu_pipeline is deduced from the device + gravity=(0.0, 0.0, -9.81), + physx = PhysxCfg( + # num_threads is no longer needed + solver_type=1, + # use_gpu is deduced from the device + max_position_iteration_count=4, + max_velocity_iteration_count=0, + # moved to actor config + # moved to actor config + bounce_threshold_velocity=0.2, + # moved to actor config + # default_buffer_size_multiplier is no longer needed + gpu_max_rigid_contact_count=2**23, + # num_subscenes is no longer needed + # contact_collection is no longer needed + ) + ) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # Design scene + scene_cfg = SMPLSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() \ No newline at end of file diff --git a/isaaclab/launch_smpl_env.py b/isaaclab/launch_smpl_env.py new file mode 100644 index 0000000..ca1b4b0 --- /dev/null +++ b/isaaclab/launch_smpl_env.py @@ -0,0 +1,421 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +from omni.isaac.lab.app import AppLauncher +from smpl_sim.utils.rotation_conversions import xyzw_to_wxyz, wxyz_to_xyzw +import sys + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import Articulation, ArticulationCfg, AssetBaseCfg +from omni.isaac.lab.envs import DirectRLEnvCfg +from omni.isaac.lab.sim import SimulationCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sim import SimulationContext, PhysxCfg +from omni.isaac.lab.utils import configclass +from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg, ViewerCfg +from omni.isaac.lab.devices.keyboard.se2_keyboard import Se2Keyboard +import omni.isaac.lab.utils.math as lab_math_utils +import carb +import imageio +from carb.input import KeyboardEventType + + + +## +# Pre-defined configs +## +# from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip +from isaaclab.smpl_config import SMPL_CFG, SMPLX_CFG + +from collections.abc import Sequence +from phc.utils.flags import flags +from phc.utils.motion_lib_smpl import MotionLibSMPL as MotionLibSMPL +from phc.utils.motion_lib_base import FixHeightMode +from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState +from smpl_sim.smpllib.smpl_local_robot import SMPL_Robot +from smpl_sim.smpllib.smpl_joint_names import SMPL_BONE_ORDER_NAMES, SMPLX_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPL_MUJOCO_NAMES, SMPLH_MUJOCO_NAMES +from phc.learning.network_loader import load_z_encoder, load_z_decoder, load_pnn, load_mcp_mlp +from phc.env.tasks.humanoid_funcs import compute_humanoid_observations_smpl_max, compute_imitation_observations_v6 +from rl_games.algos_torch import torch_ext +from omni.isaac.lab.sensors import TiledCamera, TiledCameraCfg, save_images_to_file +from rl_games.algos_torch.players import rescale_actions +import torch +import joblib +from easydict import EasyDict +import numpy as np +import copy +from scipy.spatial.transform import Rotation as sRot +import time + + + +flags.test=False + +@configclass +class SMPLSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + + + +@configclass +class SMPLEnvCfg(DirectRLEnvCfg): + num_actions = 69 + num_observations = 1 + num_states = 1 + + decimation = 2 + + sim = sim_utils.SimulationCfg( + device=args_cli.device, + dt=1 / 60, + # decimation will be set in the task config + # up axis will always be Z in isaac sim + # use_gpu_pipeline is deduced from the device + gravity=(0.0, 0.0, -9.81), + physx = PhysxCfg( + # num_threads is no longer needed + solver_type=1, + # use_gpu is deduced from the device + max_position_iteration_count=4, + max_velocity_iteration_count=0, + # moved to actor config + # moved to actor config + bounce_threshold_velocity=0.2, + # moved to actor config + # default_buffer_size_multiplier is no longer needed + gpu_max_rigid_contact_count=2**23, + # num_subscenes is no longer needed + # contact_collection is no longer needed + ) + ) + + + viewer = ViewerCfg(eye=(20.0, 20.0, 20.0)) + + # smpl_robot: ArticulationCfg = SMPL_CFG.replace(prim_path="/World/envs/env_.*/Robot") + smpl_robot: ArticulationCfg = SMPLX_CFG.replace(prim_path="/World/envs/env_.*/Robot") + + tiled_camera: TiledCameraCfg = TiledCameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=TiledCameraCfg.OffsetCfg(pos=(-4.0, 0.0, 2.0), rot=(0.9961947, 0, 0.0871557, 0), convention="world"), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=10.0, focus_distance=50.0, horizontal_aperture=5, clipping_range=(0.1, 20.0) + ), + width=512, + height=512, + ) + + # scene + scene: InteractiveSceneCfg = SMPLSceneCfg(num_envs=args_cli.num_envs, env_spacing=20.0, replicate_physics=True) + + +class SMPLEnv(DirectRLEnv): + cfg: SMPLEnvCfg + + def __init__(self, cfg: SMPLEnvCfg, render_mode: str | None = None, **kwargs): + self.cfg = cfg + self.humanoid_type = "smplx" + super().__init__(cfg, render_mode, **kwargs) + + SMPL_NAMES = SMPLH_MUJOCO_NAMES if self.humanoid_type == "smplx" else SMPL_MUJOCO_NAMES + sim_joint_names = self._robot.data.joint_names + sim_body_names = self._robot.data.body_names + gym_joint_names = [f"{j}_{axis}" for j in SMPL_NAMES[1:] for axis in ["x", "y", "z"]] + + self.sim_to_gym_body = [sim_body_names.index(n) for n in SMPL_NAMES] + self.sim_to_gym_dof = [sim_joint_names.index(n) for n in gym_joint_names] + + self.gym_to_sim_dof = [gym_joint_names.index(n) for n in sim_joint_names] + self.gym_to_sim_body = [SMPL_NAMES.index(n) for n in sim_body_names] + + keyboard_interface = Se2Keyboard() + keyboard_interface.add_callback("R", self.reset) + + + def close(self): + super().close() + + def _configure_gym_env_spaces(self): + self.num_actions = self.cfg.num_actions + self.num_observations = self.cfg.num_observations + self.num_states = self.cfg.num_states + + + def _setup_scene(self): + self._robot = robot = Articulation(self.cfg.smpl_robot) + self._tiled_camera = TiledCamera(self.cfg.tiled_camera) + + # clone, filter, and replicate + self.scene.clone_environments(copy_from_source=False) + # self.scene.filter_collisions(global_prim_paths=[]) + + # add articultion and sensors to scene + self.scene.articulations["robot"] = self._robot + self.scene.sensors["tiled_camera"] = self._tiled_camera + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + + # motion_file = "data/amass/pkls/singles/hard_z_fut_1_1_2_upright_slim.pkl" + # motion_file = "data/amass/pkls/singles/0-CMU_88_88_05_poses_upright_slim.pkl" + motion_file = "data/amass_x/mouse.pkl" + self._load_motion(motion_file) + + + def _load_motion(self, motion_file): + + time_steps = 1 + self._has_upright_start = False + motion_lib_cfg = EasyDict({ + "has_upright_start": self._has_upright_start, + "motion_file": motion_file, + "fix_height": FixHeightMode.full_fix, + "min_length": -1, + "max_length": 3000, + "im_eval": flags.im_eval, + "multi_thread": False , + "smpl_type": self.humanoid_type, + "randomrize_heading": True, + "device": self.device, + }) + robot_cfg = { + "mesh": False, + "rel_joint_lm": False, + "upright_start": self._has_upright_start, + "remove_toe": False, + "real_weight_porpotion_capsules": True, + "real_weight_porpotion_boxes": True, + "model": self.humanoid_type, + "big_ankle": True, + "box_body": True, + "body_params": {}, + "joint_params": {}, + "geom_params": {}, + "actuator_params": {}, + } + smpl_robot = SMPL_Robot( + robot_cfg, + data_dir="data/smpl", + ) + + # gender_beta = np.zeros((17)) + gender_beta = np.zeros((16)) + smpl_robot.load_from_skeleton(betas=torch.from_numpy(gender_beta[None, 1:]), gender=gender_beta[0:1], objs_info=None) + test_good = f"/tmp/smpl/test_good.xml" + smpl_robot.write_xml(test_good) + sk_tree = SkeletonTree.from_mjcf(test_good) + num_motions = self.num_envs + skeleton_trees = [sk_tree] * num_motions + start_idx = 0 + + motion_lib = MotionLibSMPL(motion_lib_cfg) + motion_lib.load_motions(skeleton_trees=skeleton_trees, + gender_betas=[torch.from_numpy(gender_beta)] * num_motions, + limb_weights=[np.zeros(10)] * num_motions, + random_sample=False, + start_idx = start_idx, + max_len=-1) + self._motion_lib = motion_lib + self._motion_id, self._motion_time = torch.arange(self.num_envs).to(self.device).long(), torch.zeros(self.num_envs).to(self.device).float() + + + def _pre_physics_step(self, actions: torch.Tensor) -> None: + self.actions = actions + + # robot_root = self._robot.data.root_pos_w + # eyes = robot_root + torch.tensor([0, -4, 0]).to(robot_root) + # targets = robot_root + # self._tiled_camera.set_world_poses_from_view(eyes, targets) + + # head_pos = self._robot.data.body_pos_w[:, self._robot.data.body_names.index("Head")] + # head_rot = self._robot.data.body_quat_w[:, self._robot.data.body_names.index("Head")] + # eye_offset = torch.tensor([[0.0, 0.075, 0.1]] * self.num_envs, device=head_rot.device) + + # eye_pos = head_pos + lab_math_utils.quat_rotate(head_rot, eye_offset) + # self._tiled_camera.set_world_poses(eye_pos, head_rot) + pass + + def _post_physics_step(self) -> None: + + + pass + + def _apply_action(self) -> None: + self._robot.set_joint_position_target(self.actions, joint_ids=None) + + def _get_observations(self) -> dict: + + self._motion_time = (self.episode_length_buf + 1) * self.step_dt + + motion_res = self._motion_lib.get_motion_state(self._motion_id, self._motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + + body_pos = self._robot.data.body_pos_w[:, self.sim_to_gym_body ] + body_rot = wxyz_to_xyzw(self._robot.data.body_quat_w[:, self.sim_to_gym_body]) + body_vel = self._robot.data.body_lin_vel_w[:, self.sim_to_gym_body] + body_ang_vel = self._robot.data.body_ang_vel_w[:, self.sim_to_gym_body] + + root_pos = body_pos[:, 0, :] + root_rot = body_rot[:, 0, :] + body_pos_subset = body_pos + body_rot_subset = body_rot + body_vel_subset = body_vel + body_ang_vel_subset = body_ang_vel + ref_rb_pos_subset = ref_rb_pos + ref_rb_rot_subset = ref_rb_rot + ref_body_vel_subset = ref_body_vel + ref_body_ang_vel_subset = ref_body_ang_vel + ref_joint_pos = ref_dof_pos[:, self.gym_to_sim_dof] + ref_joint_vel = ref_dof_vel[:, self.gym_to_sim_dof] + + # Data replay + + # ref_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + # robot.write_root_state_to_sim(ref_root_state, env_ids) + # robot.write_joint_state_to_sim(ref_joint_pos, ref_joint_vel, None, env_ids) + + self_obs = compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel, body_ang_vel, ref_smpl_params, ref_limb_weights, True, True, self._has_upright_start, False, False) + task_obs = compute_imitation_observations_v6(root_pos, root_rot, body_pos_subset, body_rot_subset, body_vel_subset, body_ang_vel_subset, ref_rb_pos_subset, ref_rb_rot_subset, ref_body_vel_subset, ref_body_ang_vel_subset, 1, self._has_upright_start) + + return { + "self_obs": self_obs, + "task_obs": task_obs, + } + + def _get_rewards(self) -> torch.Tensor: + return torch.zeros(self.num_envs) + + def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: + return torch.zeros(self.num_envs), torch.zeros(self.num_envs) + + def _reset_idx(self, env_ids: Sequence[int]): + super()._reset_idx(env_ids) + self._motion_time[env_ids] = 0 + + motion_res = self._motion_lib.get_motion_state(self._motion_id, self._motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + init_joint_pos = ref_dof_pos[:, self.gym_to_sim_dof] + init_joint_vel = ref_dof_vel[:, self.gym_to_sim_dof] + + # ref_joint_pos = torch.from_numpy(sRot.from_rotvec(ref_joint_pos.reshape(-1, 3)).as_euler("xyz")).to(ref_joint_pos).reshape(scene.num_envs, -1) + + init_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + self._robot.write_root_state_to_sim(init_root_state, env_ids) + self._robot.write_joint_state_to_sim(init_joint_pos, init_joint_vel, None, env_ids) + + +def main(): + """Main function.""" + env_cfg = SMPLEnvCfg() + env = SMPLEnv(env_cfg) + + + device = env.device + if env.humanoid_type == "smplx": + policy_path = "output/HumanoidIm/phc_x_pnn/Humanoid.pth" + else: + policy_path = "output/HumanoidIm/phc_3/Humanoid.pth" + + policy_path = "output/HumanoidIm/phc_x_pnn/Humanoid.pth" + check_points = [torch_ext.load_checkpoint(policy_path)] + pnn = load_pnn(check_points[0], num_prim = 3, has_lateral = False, activation = "silu", device = device) + running_mean, running_var = check_points[-1]['running_mean_std']['running_mean'], check_points[-1]['running_mean_std']['running_var'] + if env.humanoid_type == "smplx": + action_offset = joblib.load("data/action_offset_smplx.pkl") + else: + action_offset = joblib.load("data/action_offset_smpl.pkl") + + pd_action_offset = action_offset[0] + pd_action_scale = action_offset[1] + + writer = imageio.get_writer('tiled_camera_output.mp4', fps=30) + time = 0 + obs_dict, extras = env.reset() + while True: + + #### Test Rendering ##### + # camera_data = env.scene['tiled_camera'].data.output['rgb'] + # camera_data = (camera_data).byte().cpu().numpy() + # batch_size = camera_data.shape[0] + # grid_size = int(np.ceil(np.sqrt(batch_size))) + # frame_height, frame_width = camera_data.shape[1], camera_data.shape[2] + # collage = np.zeros((grid_size * frame_height, grid_size * frame_width, 3), dtype=np.uint8) + # for i in range(batch_size): + # row = i // grid_size + # col = i % grid_size + # collage[row * frame_height:(row + 1) * frame_height, col * frame_width:(col + 1) * frame_width, :] = camera_data[i] + # frame = collage + + # writer.append_data(frame) + + # time += 1 + # if time > 200: + # import ipdb; ipdb.set_trace() + # writer.close() + # break + #### Test Rendering ##### + + self_obs, task_obs = obs_dict["self_obs"], obs_dict["task_obs"] + full_obs = torch.cat([self_obs, task_obs], dim = -1) + full_obs = ((full_obs - running_mean.float()) / torch.sqrt(running_var.float() + 1e-05)) + full_obs = torch.clamp(full_obs, min=-5.0, max=5.0) + + with torch.no_grad(): + actions, _ = pnn(full_obs, idx=0) + actions = rescale_actions(-1, 1, torch.clamp(actions, -1, 1)) + actions = actions * pd_action_scale + pd_action_offset + actions = actions[:, env.gym_to_sim_dof] + + obs_dict, _, _, _, _ = env.step(actions) + + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() \ No newline at end of file diff --git a/isaaclab/run_atlas.py b/isaaclab/run_atlas.py new file mode 100755 index 0000000..0529417 --- /dev/null +++ b/isaaclab/run_atlas.py @@ -0,0 +1,324 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +from omni.isaac.lab.app import AppLauncher +from phc.utils.rotation_conversions import xyzw_to_wxyz, wxyz_to_xyzw +import sys + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +# args_cli = parser.parse_args() +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sim import SimulationContext, PhysxCfg +from omni.isaac.lab.utils import configclass +from omni.isaac.lab.devices.keyboard.se2_keyboard import Se2Keyboard +import carb +## +# Pre-defined configs +## +# from omni.isaac.lab_assets import CARTPOLE_CFG # isort:skip +from isaaclab.smpl_config import SMPL_CFG +from isaaclab.atlas_21jts_config import Atlas_21jts_CFG + + +from phc.utils.flags import flags +from phc.utils.motion_lib_real import MotionLibReal +from phc.utils.motion_lib_base import FixHeightMode +from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState +from phc.learning.network_loader import load_z_encoder, load_z_decoder, load_pnn, load_mcp_mlp +from phc.env.tasks.humanoid_funcs import compute_humanoid_observations_smpl_max, compute_imitation_observations_v6 +from rl_games.algos_torch import torch_ext +from rl_games.algos_torch.players import rescale_actions +import torch +import joblib +from easydict import EasyDict +import numpy as np +import open3d as o3d +import copy +from scipy.spatial.transform import Rotation as sRot +import time +from omegaconf import DictConfig, OmegaConf +from multiprocessing import Pool +import hydra +from phc.utils.torch_humanoid_batch import Humanoid_Batch + +@configclass +class SMPLSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # articulation + robot: ArticulationCfg = Atlas_21jts_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + +def run_simulator(cfg, sim: sim_utils.SimulationContext, scene: InteractiveScene): + + def test_keyboard(): + import ipdb; ipdb.set_trace() + print('....') + + keyboard = Se2Keyboard() + keyboard.add_callback(carb.input.KeyboardInput.W, test_keyboard) + + def reset_robot(robot): + robot.write_root_state_to_sim(init_root_state, env_ids) + robot.write_joint_state_to_sim(init_joint_pos, init_joint_vel, None, env_ids) + + humanoid_fk = Humanoid_Batch(cfg.robot) + + device = ( + torch.device("cuda", index=0) + if torch.cuda.is_available() + else torch.device("cpu") + ) + motion_file = "data/e_atlas_nohand/v1/amass_phc.pkl" + dt = 1/30 + time_steps = 1 + has_upright_start = True + motion_lib_cfg = EasyDict({ + "has_upright_start": has_upright_start, + "motion_file": motion_file, + "fix_height": FixHeightMode.full_fix, + "min_length": -1, + "max_length": 3000, + "im_eval": flags.im_eval, + "multi_thread": False , + "randomrize_heading": True, + "device": device, + "robot": cfg.robot, + }) + + + gender_beta = np.zeros((17)) + sk_tree = SkeletonTree.from_mjcf(cfg.robot.asset.assetFileName) + num_motions = 10 + skeleton_trees = [sk_tree] * num_motions + start_idx = 0 + motion_lib = MotionLibReal(motion_lib_cfg) + motion_lib.load_motions(skeleton_trees=skeleton_trees, + gender_betas=[torch.from_numpy(gender_beta)] * num_motions, + limb_weights=[np.zeros(10)] * num_motions, + random_sample=False, + start_idx = start_idx, + max_len=-1) + motion_id, time_steps = torch.zeros(1).to(device).long(), torch.zeros(1).to(device).float() + motion_id[:] = 2 + motion_len = motion_lib.get_motion_length(motion_id).item() + motion_time = time_steps % motion_len + + policy_path = "output/HumanoidIm/eatlas_phc_1002/Humanoid.pth" + check_points = [torch_ext.load_checkpoint(policy_path)] + mlp = load_mcp_mlp(check_points[0], activation = "silu", device = device) + running_mean, running_var = check_points[-1]['running_mean_std']['running_mean'], check_points[-1]['running_mean_std']['running_var'] + action_offset = joblib.load("atlas_pd_action_offset_scale.pkl") + pd_action_offset = action_offset[0] + pd_action_scale = action_offset[1] + ####### + + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + + ########################################################### + sim_joint_names = robot.data.joint_names + sim_body_names = robot.data.body_names + body_names_gym = cfg.robot.body_names + dof_names_gym = ['larm_shy', 'larm_shx', 'larm_shz', 'larm_el', 'lleg_hpy', + 'lleg_hpx', 'lleg_hpz', 'lleg_kn', 'lleg_aky', 'lleg_akx', + 'rarm_shy', 'rarm_shx', 'rarm_shz', 'rarm_el', 'rleg_hpy', + 'rleg_hpx', 'rleg_hpz', 'rleg_kn', 'rleg_aky', 'rleg_akx'] + + sim_to_gym_body = [sim_body_names.index(n) for n in body_names_gym] + sim_to_gym_dof = [sim_joint_names.index(n) for n in dof_names_gym] + + gym_to_sim_body = [body_names_gym.index(n) for n in sim_body_names] + gym_to_sim_dof = [dof_names_gym.index(n) for n in sim_joint_names] + + motion_res = motion_lib.get_motion_state(motion_id, motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + init_joint_pos = ref_dof_pos[:, gym_to_sim_dof] + init_joint_vel = ref_dof_vel[:, gym_to_sim_dof] + + # ref_joint_pos = torch.from_numpy(sRot.from_rotvec(ref_joint_pos.reshape(-1, 3)).as_euler("xyz")).to(ref_joint_pos).reshape(scene.num_envs, -1) + env_ids = torch.zeros(scene.num_envs).to(device).long() + + init_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + robot.write_root_state_to_sim(init_root_state, env_ids) + robot.write_joint_state_to_sim(init_joint_pos, init_joint_vel, None, env_ids) + time_step = 3 + + # Simulation loop + while simulation_app.is_running(): + start_time = time.time() + # Apply random action + # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + + ############################## PHC ######################################## + + motion_time = time_steps % motion_len + time_internals = torch.arange(time_step).to(device).repeat(scene.num_envs).view(-1, time_step) * 1/30 + motion_time_steps = motion_time + time_internals + env_ids_steps = motion_id.repeat_interleave(time_step) + # motion_time_steps = motion_time + # env_ids_steps = motion_id + + motion_res = motion_lib.get_motion_state(env_ids_steps, motion_time_steps) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_limb_weights, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_limb_weights"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + + body_pos = robot.data.body_pos_w[:, sim_to_gym_body ] + body_rot = wxyz_to_xyzw(robot.data.body_quat_w[:, sim_to_gym_body]) + body_vel = robot.data.body_lin_vel_w[:, sim_to_gym_body] + body_ang_vel = robot.data.body_ang_vel_w[:, sim_to_gym_body] + + root_pos = body_pos[:, 0, :] + root_rot = body_rot[:, 0, :] + body_pos_subset = body_pos + body_rot_subset = body_rot + body_vel_subset = body_vel + body_ang_vel_subset = body_ang_vel + ref_rb_pos_subset = ref_rb_pos + ref_rb_rot_subset = ref_rb_rot + ref_body_vel_subset = ref_body_vel + ref_body_ang_vel_subset = ref_body_ang_vel + + # Data replay + # ref_joint_pos = ref_dof_pos[..., gym_to_sim_dof] + # ref_joint_vel = ref_dof_vel[..., gym_to_sim_dof] + # ref_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + # robot.write_root_state_to_sim(ref_root_state, env_ids) + # robot.write_joint_state_to_sim(ref_joint_pos, ref_joint_vel, None, env_ids) + + self_obs = compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel, body_ang_vel, ref_smpl_params, ref_limb_weights, True, False, has_upright_start, False, False) + task_obs = compute_imitation_observations_v6(root_pos, root_rot, body_pos_subset, body_rot_subset, body_vel_subset, body_ang_vel_subset, ref_rb_pos_subset, ref_rb_rot_subset, ref_body_vel_subset, ref_body_ang_vel_subset, 3, has_upright_start) + + full_obs = torch.cat([self_obs, task_obs], dim = -1) + full_obs = ((full_obs - running_mean.float()) / torch.sqrt(running_var.float() + 1e-05)) + full_obs = torch.clamp(full_obs, min=-5.0, max=5.0) + + with torch.no_grad(): + actions = mlp(full_obs) + actions = rescale_actions(-1, 1, torch.clamp(actions, -1, 1)) + actions = actions * pd_action_scale + pd_action_offset + actions = actions[:, gym_to_sim_dof] + actions[:] = 0 + + robot.set_joint_position_target(actions, None, env_ids) + + ############################## PHC ######################################## + + if time_steps > motion_len: + print("resetting") + reset_robot(robot) + time_steps[:] = 0 + + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # sim.render() + # Increment counter + time_steps += sim_dt + # Update buffers + scene.update(sim_dt) + + # Measure wall time and ensure 30 fps + elapsed_time = time.time() - start_time + sleep_time = max(0, (1.0 / 30.0) - elapsed_time) + # time.sleep(sleep_time) + +@hydra.main(version_base=None, config_path="../phc/data/cfg", config_name="config") +def main(cfg : DictConfig) -> None: + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg( + device=args_cli.device, + dt=1 / 60, + # decimation will be set in the task config + # up axis will always be Z in isaac sim + # use_gpu_pipeline is deduced from the device + gravity=(0.0, 0.0, -9.81), + physx = PhysxCfg( + # num_threads is no longer needed + solver_type=1, + # use_gpu is deduced from the device + max_position_iteration_count=4, + max_velocity_iteration_count=0, + # moved to actor config + # moved to actor config + bounce_threshold_velocity=0.2, + # moved to actor config + # default_buffer_size_multiplier is no longer needed + gpu_max_rigid_contact_count=2**23, + # num_subscenes is no longer needed + # contact_collection is no longer needed + ) + ) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # Design scene + scene_cfg = SMPLSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(cfg, sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() \ No newline at end of file diff --git a/isaaclab/smpl_config.py b/isaaclab/smpl_config.py new file mode 100755 index 0000000..b720306 --- /dev/null +++ b/isaaclab/smpl_config.py @@ -0,0 +1,298 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for a simple Cartpole robot.""" + + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.actuators import ImplicitActuatorCfg +from omni.isaac.lab.assets import ArticulationCfg +from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Configuration +## + + +SMPL_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"data/assets/usd/smpl_humanoid.usda", + + activate_contact_sensors=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4 + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 1.05), + joint_pos={".*": 0.0}, + joint_vel={".*": 0.0}, + ), + soft_joint_pos_limit_factor=0.9, + actuators={ + "legs": ImplicitActuatorCfg( + joint_names_expr=["L_Hip_.", "R_Hip_.", "L_Knee_.", "R_Knee_.", "L_Ankle_.", "R_Ankle_.", "L_Toe_.", "R_Toe_."], + effort_limit=500, + velocity_limit=100.0, + stiffness={ + "L_Hip_.": 800, + "R_Hip_.": 800, + "L_Knee_.": 800, + "R_Knee_.": 800, + "L_Ankle_.": 800, + "R_Ankle_.": 800, + "L_Toe_.": 500, + "R_Toe_.": 500, + }, + damping={ + "L_Hip_.": 80, + "R_Hip_.": 80, + "L_Knee_.": 80, + "R_Knee_.": 80, + "L_Ankle_.": 80, + "R_Ankle_.": 80, + "L_Toe_.": 50, + "R_Toe_.": 50, + + }, + ), + "torso": ImplicitActuatorCfg( + joint_names_expr=["Torso_.", "Spine_.", "Chest_.", "Neck_.", "Head_.", "L_Thorax_.", "R_Thorax_."], + effort_limit=500, + velocity_limit=100.0, + stiffness={ + "Torso_.": 1000, + "Spine_.": 1000, + "Chest_.": 1000, + "Neck_.": 500, + "Head_.": 500, + "L_Thorax_.": 500, + "R_Thorax_.": 500, + }, + damping={ + "Torso_.": 100, + "Spine_.": 100, + "Chest_.": 100, + "Neck_.": 50, + "Head_.": 50, + "L_Thorax_.": 50, + "R_Thorax_.": 50, + }, + ), + "arms": ImplicitActuatorCfg( + joint_names_expr=["L_Shoulder_.", "R_Shoulder_.", "L_Elbow_.", "R_Elbow_.", "L_Wrist_.", "R_Wrist_.", "L_Hand_.", "R_Hand_."], + effort_limit=300, + velocity_limit=100.0, + stiffness={ + "L_Shoulder_.": 500, + "R_Shoulder_.": 500, + "L_Elbow_.": 300, + "R_Elbow_.": 300, + "L_Wrist_.": 300, + "R_Wrist_.": 300, + "L_Hand_.": 300, + "R_Hand_.": 300, + }, + damping={ + "L_Shoulder_.": 50, + "R_Shoulder_.": 50, + "L_Elbow_.": 30, + "R_Elbow_.": 30, + "L_Wrist_.": 30, + "R_Wrist_.": 30, + }, + ), + }, +) + + +SMPLX_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + # usd_path=f"data/assets/usd/smplx_0.usda", + usd_path=f"data/assets/usd/smplx.usda", + + activate_contact_sensors=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4 + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 1.05), + joint_pos={".*": 0.0}, + joint_vel={".*": 0.0}, + ), + soft_joint_pos_limit_factor=0.9, + actuators={ + "legs": ImplicitActuatorCfg( + joint_names_expr=["L_Hip_.", "R_Hip_.", "L_Knee_.", "R_Knee_.", "L_Ankle_.", "R_Ankle_.", "L_Toe_.", "R_Toe_."], + effort_limit=500, + velocity_limit=100.0, + stiffness={ + "L_Hip_.": 800, + "R_Hip_.": 800, + "L_Knee_.": 800, + "R_Knee_.": 800, + "L_Ankle_.": 800, + "R_Ankle_.": 800, + "L_Toe_.": 500, + "R_Toe_.": 500, + }, + damping={ + "L_Hip_.": 80, + "R_Hip_.": 80, + "L_Knee_.": 80, + "R_Knee_.": 80, + "L_Ankle_.": 80, + "R_Ankle_.": 80, + "L_Toe_.": 50, + "R_Toe_.": 50, + + }, + ), + "torso": ImplicitActuatorCfg( + joint_names_expr=["Torso_.", "Spine_.", "Chest_.", "Neck_.", "Head_.", "L_Thorax_.", "R_Thorax_."], + effort_limit=500, + velocity_limit=100.0, + stiffness={ + "Torso_.": 1000, + "Spine_.": 1000, + "Chest_.": 1000, + "Neck_.": 500, + "Head_.": 500, + "L_Thorax_.": 500, + "R_Thorax_.": 500, + }, + damping={ + "Torso_.": 100, + "Spine_.": 100, + "Chest_.": 100, + "Neck_.": 50, + "Head_.": 50, + "L_Thorax_.": 50, + "R_Thorax_.": 50, + }, + ), + "arms": ImplicitActuatorCfg( + joint_names_expr=["L_Shoulder_.", "R_Shoulder_.", "L_Elbow_.", "R_Elbow_.", "L_Wrist_.", "R_Wrist_."], + effort_limit=300, + velocity_limit=100.0, + stiffness={ + "L_Shoulder_.": 500, + "R_Shoulder_.": 500, + "L_Elbow_.": 300, + "R_Elbow_.": 300, + "L_Wrist_.": 300, + "R_Wrist_.": 300, + }, + damping={ + "L_Shoulder_.": 50, + "R_Shoulder_.": 50, + "L_Elbow_.": 30, + "R_Elbow_.": 30, + "L_Wrist_.": 30, + "R_Wrist_.": 30, + }, + ), + + "hands": ImplicitActuatorCfg( + joint_names_expr=[ + "L_Index1_.", "L_Index2_.", "L_Index3_.", + "L_Middle1_.", "L_Middle2_.", "L_Middle3_.", + "L_Pinky1_.", "L_Pinky2_.", "L_Pinky3_.", + "L_Ring1_.", "L_Ring2_.", "L_Ring3_.", + "L_Thumb1_.", "L_Thumb2_.", "L_Thumb3_.", + "R_Index1_.", "R_Index2_.", "R_Index3_.", + "R_Middle1_.", "R_Middle2_.", "R_Middle3_.", + "R_Pinky1_.", "R_Pinky2_.", "R_Pinky3_.", + "R_Ring1_.", "R_Ring2_.", "R_Ring3_.", + "R_Thumb1_.", "R_Thumb2_.", "R_Thumb3_." + ], + effort_limit=100, + velocity_limit=10.0, + stiffness={ + "L_Index1_.": 100, + "L_Index2_.": 100, + "L_Index3_.": 100, + "L_Middle1_.": 100, + "L_Middle2_.": 100, + "L_Middle3_.": 100, + "L_Pinky1_.": 100, + "L_Pinky2_.": 100, + "L_Pinky3_.": 100, + "L_Ring1_.": 100, + "L_Ring2_.": 100, + "L_Ring3_.": 100, + "L_Thumb1_.": 100, + "L_Thumb2_.": 100, + "L_Thumb3_.": 100, + "R_Index1_.": 100, + "R_Index2_.": 100, + "R_Index3_.": 100, + "R_Middle1_.": 100, + "R_Middle2_.": 100, + "R_Middle3_.": 100, + "R_Pinky1_.": 100, + "R_Pinky2_.": 100, + "R_Pinky3_.": 100, + "R_Ring1_.": 100, + "R_Ring2_.": 100, + "R_Ring3_.": 100, + "R_Thumb1_.": 100, + "R_Thumb2_.": 100, + "R_Thumb3_.": 100, + }, + damping={ + "L_Index1_.": 10, + "L_Index2_.": 10, + "L_Index3_.": 10, + "L_Middle1_.": 10, + "L_Middle2_.": 10, + "L_Middle3_.": 10, + "L_Pinky1_.": 10, + "L_Pinky2_.": 10, + "L_Pinky3_.": 10, + "L_Ring1_.": 10, + "L_Ring2_.": 10, + "L_Ring3_.": 10, + "L_Thumb1_.": 10, + "L_Thumb2_.": 10, + "L_Thumb3_.": 10, + "R_Index1_.": 10, + "R_Index2_.": 10, + "R_Index3_.": 10, + "R_Middle1_.": 10, + "R_Middle2_.": 10, + "R_Middle3_.": 10, + "R_Pinky1_.": 10, + "R_Pinky2_.": 10, + "R_Pinky3_.": 10, + "R_Ring1_.": 10, + "R_Ring2_.": 10, + "R_Ring3_.": 10, + "R_Thumb1_.": 10, + "R_Thumb2_.": 10, + "R_Thumb3_.": 10, + }, + ), + }, +) \ No newline at end of file diff --git a/smpl_sim/envs/nv/poselib/README.md b/smpl_sim/envs/nv/poselib/README.md deleted file mode 100644 index d8fc8cb..0000000 --- a/smpl_sim/envs/nv/poselib/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# poselib - -`poselib` is a library for loading, manipulating, and retargeting skeleton poses and motions. It is separated into three modules: `poselib.poselib.core` for basic data loading and tensor operations, `poselib.poselib.skeleton` for higher-level skeleton operations, and `poselib.poselib.visualization` for displaying skeleton poses. - -## poselib.poselib.core -- `poselib.poselib.core.rotation3d`: A set of Torch JIT functions for dealing with quaternions, transforms, and rotation/transformation matrices. - - `quat_*` manipulate and create quaternions in [x, y, z, w] format (where w is the real component). - - `transform_*` handle 7D transforms in [quat, pos] format. - - `rot_matrix_*` handle 3x3 rotation matrices. - - `euclidean_*` handle 4x4 Euclidean transformation matrices. -- `poselib.poselib.core.tensor_utils`: Provides loading and saving functions for PyTorch tensors. - -## poselib.poselib.skeleton -- `poselib.poselib.skeleton.skeleton3d`: Utilities for loading and manipulating skeleton poses, and retargeting poses to different skeletons. - - `SkeletonTree` is a class that stores a skeleton as a tree structure. - - `SkeletonState` describes the static state of a skeleton, and provides both global and local joint angles. - - `SkeletonMotion` describes a time-series of skeleton states and provides utilities for computing joint velocities. - -## poselib.poselib.visualization -- `poselib.poselib.visualization.common`: Functions used for visualizing skeletons interactively in `matplotlib`. diff --git a/smpl_sim/envs/nv/poselib/poselib/__init__.py b/smpl_sim/envs/nv/poselib/poselib/__init__.py deleted file mode 100644 index fd13ae7..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -__version__ = "0.0.1" - -from .core import * diff --git a/smpl_sim/envs/nv/poselib/poselib/core/__init__.py b/smpl_sim/envs/nv/poselib/poselib/core/__init__.py deleted file mode 100644 index e3c0f9d..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .tensor_utils import * -from .rotation3d import * -from .backend import Serializable, logger diff --git a/smpl_sim/envs/nv/poselib/poselib/core/backend/__init__.py b/smpl_sim/envs/nv/poselib/poselib/core/backend/__init__.py deleted file mode 100644 index 49705b2..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/backend/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .abstract import Serializable - -from .logger import logger diff --git a/smpl_sim/envs/nv/poselib/poselib/core/backend/abstract.py b/smpl_sim/envs/nv/poselib/poselib/core/backend/abstract.py deleted file mode 100644 index caef630..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/backend/abstract.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from abc import ABCMeta, abstractmethod, abstractclassmethod -from collections import OrderedDict -import json - -import numpy as np -import os - -TENSOR_CLASS = {} - - -def register(name): - global TENSOR_CLASS - - def core(tensor_cls): - TENSOR_CLASS[name] = tensor_cls - return tensor_cls - - return core - - -def _get_cls(name): - global TENSOR_CLASS - return TENSOR_CLASS[name] - - -class NumpyEncoder(json.JSONEncoder): - """ Special json encoder for numpy types """ - - def default(self, obj): - if isinstance( - obj, - ( - np.int_, - np.intc, - np.intp, - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, - ), - ): - return int(obj) - elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)): - return float(obj) - elif isinstance(obj, (np.ndarray,)): - return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) - return json.JSONEncoder.default(self, obj) - - -def json_numpy_obj_hook(dct): - if isinstance(dct, dict) and "__ndarray__" in dct: - data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"]) - return data.reshape(dct["shape"]) - return dct - - -class Serializable: - """ Implementation to read/write to file. - All class the is inherited from this class needs to implement to_dict() and - from_dict() - """ - - @abstractclassmethod - def from_dict(cls, dict_repr, *args, **kwargs): - """ Read the object from an ordered dictionary - - :param dict_repr: the ordered dictionary that is used to construct the object - :type dict_repr: OrderedDict - :param args, kwargs: the arguments that need to be passed into from_dict() - :type args, kwargs: additional arguments - """ - pass - - @abstractmethod - def to_dict(self): - """ Construct an ordered dictionary from the object - - :rtype: OrderedDict - """ - pass - - @classmethod - def from_file(cls, path, *args, **kwargs): - """ Read the object from a file (either .npy or .json) - - :param path: path of the file - :type path: string - :param args, kwargs: the arguments that need to be passed into from_dict() - :type args, kwargs: additional arguments - """ - if path.endswith(".json"): - with open(path, "r") as f: - d = json.load(f, object_hook=json_numpy_obj_hook) - elif path.endswith(".npy"): - d = np.load(path, allow_pickle=True).item() - else: - assert False, "failed to load {} from {}".format(cls.__name__, path) - assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format( - d["__name__"], cls.__name__ - ) - return cls.from_dict(d, *args, **kwargs) - - def to_file(self, path: str) -> None: - """ Write the object to a file (either .npy or .json) - - :param path: path of the file - :type path: string - """ - if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - d = self.to_dict() - d["__name__"] = self.__class__.__name__ - if path.endswith(".json"): - with open(path, "w") as f: - json.dump(d, f, cls=NumpyEncoder, indent=4) - elif path.endswith(".npy"): - np.save(path, d) diff --git a/smpl_sim/envs/nv/poselib/poselib/core/backend/logger.py b/smpl_sim/envs/nv/poselib/poselib/core/backend/logger.py deleted file mode 100644 index 369cae9..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/backend/logger.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import logging - -logger = logging.getLogger("poselib") -logger.setLevel(logging.INFO) - -if not len(logger.handlers): - formatter = logging.Formatter( - fmt="%(asctime)-15s - %(levelname)s - %(module)s - %(message)s" - ) - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.info("logger initialized") diff --git a/smpl_sim/envs/nv/poselib/poselib/core/rotation3d.py b/smpl_sim/envs/nv/poselib/poselib/core/rotation3d.py deleted file mode 100644 index ae32d93..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/rotation3d.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from typing import List, Optional - -import math -import torch - - -@torch.jit.script -def quat_mul(a, b): - """ - quaternion multiplication - """ - x1, y1, z1, w1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3] - x2, y2, z2, w2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3] - - w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 - x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 - y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 - z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 - - return torch.stack([x, y, z, w], dim=-1) - - -@torch.jit.script -def quat_pos(x): - """ - make all the real part of the quaternion positive - """ - q = x - z = (q[..., 3:] < 0).float() - q = (1 - 2 * z) * q - return q - - -@torch.jit.script -def quat_abs(x): - """ - quaternion norm (unit quaternion represents a 3D rotation, which has norm of 1) - """ - x = x.norm(p=2, dim=-1) - return x - - -@torch.jit.script -def quat_unit(x): - """ - normalized quaternion with norm of 1 - """ - norm = quat_abs(x).unsqueeze(-1) - return x / (norm.clamp(min=1e-9)) - - -@torch.jit.script -def quat_conjugate(x): - """ - quaternion with its imaginary part negated - x, y, z , w -> -x, -y, -z, w - """ - return torch.cat([-x[..., :3], x[..., 3:]], dim=-1) - - -@torch.jit.script -def quat_real(x): - """ - real component of the quaternion - """ - return x[..., 3] - - -@torch.jit.script -def quat_imaginary(x): - """ - imaginary components of the quaternion - """ - return x[..., :3] - - -@torch.jit.script -def quat_norm_check(x): - """ - verify that a quaternion has norm 1 - """ - assert bool((abs(x.norm(p=2, dim=-1) - 1) < 1e-3).all()), "the quaternion is has non-1 norm: {}".format(abs(x.norm(p=2, dim=-1) - 1)) - assert bool((x[..., 3] >= 0).all()), "the quaternion has negative real part" - - -@torch.jit.script -def quat_normalize(q): - """ - Construct 3D rotation from quaternion (the quaternion needs not to be normalized). - """ - q = quat_unit(quat_pos(q)) # normalized to positive and unit quaternion - return q - - -@torch.jit.script -def quat_from_xyz(xyz): - """ - Construct 3D rotation from the imaginary component - """ - w = (1.0 - xyz.norm()).unsqueeze(-1) - assert bool((w >= 0).all()), "xyz has its norm greater than 1" - return torch.cat([xyz, w], dim=-1) - - -@torch.jit.script -def quat_identity(shape: List[int]): - """ - Construct 3D identity rotation given shape - """ - w = torch.ones(shape + [1]) - xyz = torch.zeros(shape + [3]) - q = torch.cat([xyz, w], dim=-1) - return quat_normalize(q) - - -@torch.jit.script -def quat_from_angle_axis(angle, axis, degree: bool = False): - """ Create a 3D rotation from angle and axis of rotation. The rotation is counter-clockwise - along the axis. - - The rotation can be interpreted as a_R_b where frame "b" is the new frame that - gets rotated counter-clockwise along the axis from frame "a" - - :param angle: angle of rotation - :type angle: Tensor - :param axis: axis of rotation - :type axis: Tensor - :param degree: put True here if the angle is given by degree - :type degree: bool, optional, default=False - """ - if degree: - angle = angle / 180.0 * math.pi - theta = (angle / 2).unsqueeze(-1) - axis = axis / (axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9)) - xyz = axis * theta.sin() - w = theta.cos() - return quat_normalize(torch.cat([xyz, w], dim=-1)) - - -@torch.jit.script -def quat_from_rotation_matrix(m): - """ - Construct a 3D rotation from a valid 3x3 rotation matrices. - Reference can be found here: - http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche52.html - - :param m: 3x3 orthogonal rotation matrices. - :type m: Tensor - - :rtype: Tensor - """ - m = m.unsqueeze(0) - diag0 = m[..., 0, 0] - diag1 = m[..., 1, 1] - diag2 = m[..., 2, 2] - - # Math stuff. - w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None))**0.5 - x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None))**0.5 - y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None))**0.5 - z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None))**0.5 - - # Only modify quaternions where w > x, y, z. - c0 = (w >= x) & (w >= y) & (w >= z) - x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign() - y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign() - z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign() - - # Only modify quaternions where x > w, y, z - c1 = (x >= w) & (x >= y) & (x >= z) - w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign() - y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign() - z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign() - - # Only modify quaternions where y > w, x, z. - c2 = (y >= w) & (y >= x) & (y >= z) - w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign() - x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign() - z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign() - - # Only modify quaternions where z > w, x, y. - c3 = (z >= w) & (z >= x) & (z >= y) - w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign() - x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign() - y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign() - - return quat_normalize(torch.stack([x, y, z, w], dim=-1)).squeeze(0) - - -@torch.jit.script -def quat_mul_norm(x, y): - """ - Combine two set of 3D rotations together using \**\* operator. The shape needs to be - broadcastable - """ - return quat_normalize(quat_mul(x, y)) - - -@torch.jit.script -def quat_rotate(rot, vec): - """ - Rotate a 3D vector with the 3D rotation - """ - other_q = torch.cat([vec, torch.zeros_like(vec[..., :1])], dim=-1) - return quat_imaginary(quat_mul(quat_mul(rot, other_q), quat_conjugate(rot))) - - -@torch.jit.script -def quat_inverse(x): - """ - The inverse of the rotation - """ - return quat_conjugate(x) - - -@torch.jit.script -def quat_identity_like(x): - """ - Construct identity 3D rotation with the same shape - """ - return quat_identity(x.shape[:-1]) - - -@torch.jit.script -def quat_angle_axis(x): - """ - The (angle, axis) representation of the rotation. The axis is normalized to unit length. - The angle is guaranteed to be between [0, pi]. - """ - s = 2 * (x[..., 3]**2) - 1 - angle = s.clamp(-1, 1).arccos() # just to be safe - axis = x[..., :3] - axis /= axis.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-9) - return angle, axis - - -@torch.jit.script -def quat_yaw_rotation(x, z_up: bool = True): - """ - Yaw rotation (rotation along z-axis) - """ - q = x - if z_up: - q = torch.cat([torch.zeros_like(q[..., 0:2]), q[..., 2:3], q[..., 3:]], dim=-1) - else: - q = torch.cat( - [ - torch.zeros_like(q[..., 0:1]), - q[..., 1:2], - torch.zeros_like(q[..., 2:3]), - q[..., 3:4], - ], - dim=-1, - ) - return quat_normalize(q) - - -@torch.jit.script -def transform_from_rotation_translation(r: Optional[torch.Tensor] = None, t: Optional[torch.Tensor] = None): - """ - Construct a transform from a quaternion and 3D translation. Only one of them can be None. - """ - assert r is not None or t is not None, "rotation and translation can't be all None" - if r is None: - assert t is not None - r = quat_identity(list(t.shape)) - if t is None: - t = torch.zeros(list(r.shape) + [3]) - return torch.cat([r, t], dim=-1) - - -@torch.jit.script -def transform_identity(shape: List[int]): - """ - Identity transformation with given shape - """ - r = quat_identity(shape) - t = torch.zeros(shape + [3]) - return transform_from_rotation_translation(r, t) - - -@torch.jit.script -def transform_rotation(x): - """Get rotation from transform""" - return x[..., :4] - - -@torch.jit.script -def transform_translation(x): - """Get translation from transform""" - return x[..., 4:] - - -@torch.jit.script -def transform_inverse(x): - """ - Inverse transformation - """ - inv_so3 = quat_inverse(transform_rotation(x)) - return transform_from_rotation_translation(r=inv_so3, t=quat_rotate(inv_so3, -transform_translation(x))) - - -@torch.jit.script -def transform_identity_like(x): - """ - identity transformation with the same shape - """ - return transform_identity(x.shape) - - -@torch.jit.script -def transform_mul(x, y): - """ - Combine two transformation together - """ - z = transform_from_rotation_translation( - r=quat_mul_norm(transform_rotation(x), transform_rotation(y)), - t=quat_rotate(transform_rotation(x), transform_translation(y)) + transform_translation(x), - ) - return z - - -@torch.jit.script -def transform_apply(rot, vec): - """ - Transform a 3D vector - """ - assert isinstance(vec, torch.Tensor) - return quat_rotate(transform_rotation(rot), vec) + transform_translation(rot) - - -@torch.jit.script -def rot_matrix_det(x): - """ - Return the determinant of the 3x3 matrix. The shape of the tensor will be as same as the - shape of the matrix - """ - a, b, c = x[..., 0, 0], x[..., 0, 1], x[..., 0, 2] - d, e, f = x[..., 1, 0], x[..., 1, 1], x[..., 1, 2] - g, h, i = x[..., 2, 0], x[..., 2, 1], x[..., 2, 2] - t1 = a * (e * i - f * h) - t2 = b * (d * i - f * g) - t3 = c * (d * h - e * g) - return t1 - t2 + t3 - - -@torch.jit.script -def rot_matrix_integrity_check(x): - """ - Verify that a rotation matrix has a determinant of one and is orthogonal - """ - det = rot_matrix_det(x) - assert bool((abs(det - 1) < 1e-3).all()), "the matrix has non-one determinant" - rtr = x @ x.permute(torch.arange(x.dim() - 2), -1, -2) - rtr_gt = rtr.zeros_like() - rtr_gt[..., 0, 0] = 1 - rtr_gt[..., 1, 1] = 1 - rtr_gt[..., 2, 2] = 1 - assert bool(((rtr - rtr_gt) < 1e-3).all()), "the matrix is not orthogonal" - - -# @torch.jit.script -# def rot_matrix_from_quaternion(q): -# """ -# Construct rotation matrix from quaternion -# x, y, z, w convension -# """ -# print("!!!!!!! This function does well-formed rotation matrices!!!") -# # Shortcuts for individual elements (using wikipedia's convention) -# qi, qj, qk, qr = q[..., 0], q[..., 1], q[..., 2], q[..., 3] - -# # Set individual elements -# R00 = 1.0 - 2.0 * (qj**2 + qk**2) -# R01 = 2 * (qi * qj - qk * qr) -# R02 = 2 * (qi * qk + qj * qr) -# R10 = 2 * (qi * qj + qk * qr) -# R11 = 1.0 - 2.0 * (qi**2 + qk**2) -# R12 = 2 * (qj * qk - qi * qr) -# R20 = 2 * (qi * qk - qj * qr) -# R21 = 2 * (qj * qk + qi * qr) -# R22 = 1.0 - 2.0 * (qi**2 + qj**2) - -# R0 = torch.stack([R00, R01, R02], dim=-1) -# R1 = torch.stack([R10, R11, R12], dim=-1) -# R2 = torch.stack([R10, R21, R22], dim=-1) - -# R = torch.stack([R0, R1, R2], dim=-2) - -# return R - - -@torch.jit.script -def rot_matrix_from_quaternion(quaternions: torch.Tensor) -> torch.Tensor: - """ - Convert rotations given as quaternions to rotation matrices. - - Args: - quaternions: quaternions with real part first, - as tensor of shape (..., 4). - - Returns: - Rotation matrices as tensor of shape (..., 3, 3). - """ - i, j, k, r = torch.unbind(quaternions, -1) - two_s = 2.0 / (quaternions * quaternions).sum(-1) - - o = torch.stack( - ( - 1 - two_s * (j * j + k * k), - two_s * (i * j - k * r), - two_s * (i * k + j * r), - two_s * (i * j + k * r), - 1 - two_s * (i * i + k * k), - two_s * (j * k - i * r), - two_s * (i * k - j * r), - two_s * (j * k + i * r), - 1 - two_s * (i * i + j * j), - ), - -1, - ) - return o.reshape(quaternions.shape[:-1] + (3, 3)) - - -@torch.jit.script -def euclidean_to_rotation_matrix(x): - """ - Get the rotation matrix on the top-left corner of a Euclidean transformation matrix - """ - return x[..., :3, :3] - - -@torch.jit.script -def euclidean_integrity_check(x): - euclidean_to_rotation_matrix(x) # check 3d-rotation matrix - assert bool((x[..., 3, :3] == 0).all()), "the last row is illegal" - assert bool((x[..., 3, 3] == 1).all()), "the last row is illegal" - - -@torch.jit.script -def euclidean_translation(x): - """ - Get the translation vector located at the last column of the matrix - """ - return x[..., :3, 3] - - -@torch.jit.script -def euclidean_inverse(x): - """ - Compute the matrix that represents the inverse rotation - """ - s = x.zeros_like() - irot = quat_inverse(quat_from_rotation_matrix(x)) - s[..., :3, :3] = irot - s[..., :3, 4] = quat_rotate(irot, -euclidean_translation(x)) - return s - - -@torch.jit.script -def euclidean_to_transform(transformation_matrix): - """ - Construct a transform from a Euclidean transformation matrix - """ - return transform_from_rotation_translation( - r=quat_from_rotation_matrix(m=euclidean_to_rotation_matrix(transformation_matrix)), - t=euclidean_translation(transformation_matrix), - ) diff --git a/smpl_sim/envs/nv/poselib/poselib/core/tensor_utils.py b/smpl_sim/envs/nv/poselib/poselib/core/tensor_utils.py deleted file mode 100644 index 2646556..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/tensor_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from collections import OrderedDict -from .backend import Serializable -import torch - - -class TensorUtils(Serializable): - @classmethod - def from_dict(cls, dict_repr, *args, **kwargs): - """ Read the object from an ordered dictionary - - :param dict_repr: the ordered dictionary that is used to construct the object - :type dict_repr: OrderedDict - :param kwargs: the arguments that need to be passed into from_dict() - :type kwargs: additional arguments - """ - return torch.from_numpy(dict_repr["arr"].astype(dict_repr["context"]["dtype"])) - - def to_dict(self): - """ Construct an ordered dictionary from the object - - :rtype: OrderedDict - """ - return NotImplemented - -def tensor_to_dict(x): - """ Construct an ordered dictionary from the object - - :rtype: OrderedDict - """ - x_np = x.numpy() - return { - "arr": x_np, - "context": { - "dtype": x_np.dtype.name - } - } diff --git a/smpl_sim/envs/nv/poselib/poselib/core/tests/__init__.py b/smpl_sim/envs/nv/poselib/poselib/core/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/core/tests/test_rotation.py b/smpl_sim/envs/nv/poselib/poselib/core/tests/test_rotation.py deleted file mode 100644 index c5b6802..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/core/tests/test_rotation.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from ..rotation3d import * -import numpy as np -import torch - -q = torch.from_numpy(np.array([[0, 1, 2, 3], [-2, 3, -1, 5]], dtype=np.float32)) -print("q", q) -r = quat_normalize(q) -x = torch.from_numpy(np.array([[1, 0, 0], [0, -1, 0]], dtype=np.float32)) -print(r) -print(quat_rotate(r, x)) - -angle = torch.from_numpy(np.array(np.random.rand() * 10.0, dtype=np.float32)) -axis = torch.from_numpy(np.array([1, np.random.rand() * 10.0, np.random.rand() * 10.0], dtype=np.float32),) - -print(repr(angle)) -print(repr(axis)) - -rot = quat_from_angle_axis(angle, axis) -x = torch.from_numpy(np.random.rand(5, 6, 3)) -y = quat_rotate(quat_inverse(rot), quat_rotate(rot, x)) -print(x.numpy()) -print(y.numpy()) -assert np.allclose(x.numpy(), y.numpy()) - -m = torch.from_numpy(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=np.float32)) -r = quat_from_rotation_matrix(m) -t = torch.from_numpy(np.array([0, 1, 0], dtype=np.float32)) -se3 = transform_from_rotation_translation(r=r, t=t) -print(se3) -print(transform_apply(se3, t)) - -rot = quat_from_angle_axis( - torch.from_numpy(np.array([45, -54], dtype=np.float32)), - torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32)), - degree=True, -) -trans = torch.from_numpy(np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32)) -transform = transform_from_rotation_translation(r=rot, t=trans) - -t = transform_mul(transform, transform_inverse(transform)) -gt = np.zeros((2, 7)) -gt[:, 0] = 1.0 -print(t.numpy()) -print(gt) -# assert np.allclose(t.numpy(), gt) - -transform2 = torch.from_numpy(np.array([[1, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32),) -transform2 = euclidean_to_transform(transform2) -print(transform2) diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/__init__.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/__init__.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/__init__.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_py27_backend.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_py27_backend.py deleted file mode 100644 index bee7f9f..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_py27_backend.py +++ /dev/null @@ -1,308 +0,0 @@ -""" -Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - -NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary -rights in and to this software, related documentation and any modifications thereto. Any -use, reproduction, disclosure or distribution of this software and related documentation -without an express license agreement from NVIDIA CORPORATION is strictly prohibited. -""" - -""" -This script reads an fbx file and saves the joint names, parents, and transforms to a -numpy array. - -NOTE: It must be run from python 2.7 with the fbx SDK installed. To use this script, -please use the read_fbx file -""" - -import sys - -import numpy as np - -try: - import fbx - import FbxCommon -except ImportError as e: - print("Error: FBX Import Failed. Message: {}".format(e)) - if sys.version_info[0] >= 3: - print( - "WARNING: you are using python 3 when this script should only be run from " - "python 2" - ) - else: - print( - "You are using python 2 but importing fbx failed. You must install it from " - "http://help.autodesk.com/view/FBX/2018/ENU/?guid=FBX_Developer_Help_" - "scripting_with_python_fbx_html" - ) - print("Exiting") - exit() - - -def fbx_to_npy(file_name_in, file_name_out, root_joint_name, fps): - """ - This function reads in an fbx file, and saves the relevant info to a numpy array - - Fbx files have a series of animation curves, each of which has animations at different - times. This script assumes that for mocap data, there is only one animation curve that - contains all the joints. Otherwise it is unclear how to read in the data. - - If this condition isn't met, then the method throws an error - - :param file_name_in: str, file path in. Should be .fbx file - :param file_name_out: str, file path out. Should be .npz file - :return: nothing, it just writes a file. - """ - - # Create the fbx scene object and load the .fbx file - fbx_sdk_manager, fbx_scene = FbxCommon.InitializeSdkObjects() - FbxCommon.LoadScene(fbx_sdk_manager, fbx_scene, file_name_in) - - """ - To read in the animation, we must find the root node of the skeleton. - - Unfortunately fbx files can have "scene parents" and other parts of the tree that are - not joints - - As a crude fix, this reader just takes and finds the first thing which has an - animation curve attached - """ - - search_root = (root_joint_name is None or root_joint_name == "") - - # Get the root node of the skeleton, which is the child of the scene's root node - possible_root_nodes = [fbx_scene.GetRootNode()] - found_root_node = False - max_key_count = 0 - root_joint = None - while len(possible_root_nodes) > 0: - joint = possible_root_nodes.pop(0) - if not search_root: - if joint.GetName() == root_joint_name: - root_joint = joint - try: - curve, anim_layer = _get_animation_curve(joint, fbx_scene) - except RuntimeError: - curve = None - if curve is not None: - key_count = curve.KeyGetCount() - if key_count > max_key_count: - found_root_node = True - max_key_count = key_count - root_curve = curve - if search_root and not root_joint: - root_joint = joint - - if not search_root and curve is not None and root_joint is not None: - break - - for child_index in range(joint.GetChildCount()): - possible_root_nodes.append(joint.GetChild(child_index)) - - if not found_root_node: - raise RuntimeError("No root joint found!! Exiting") - - joint_list, joint_names, parents = _get_skeleton(root_joint) - - """ - Read in the transformation matrices of the animation, taking the scaling into account - """ - - anim_range, frame_count, frame_rate = _get_frame_count(fbx_scene) - - local_transforms = [] - #for frame in range(frame_count): - time_sec = anim_range.GetStart().GetSecondDouble() - time_range_sec = anim_range.GetStop().GetSecondDouble() - time_sec - fbx_fps = frame_count / time_range_sec - if fps != 120: - fbx_fps = fps - print("FPS: ", fbx_fps) - while time_sec < anim_range.GetStop().GetSecondDouble(): - fbx_time = fbx.FbxTime() - fbx_time.SetSecondDouble(time_sec) - fbx_time = fbx_time.GetFramedTime() - transforms_current_frame = [] - - # Fbx has a unique time object which you need - #fbx_time = root_curve.KeyGetTime(frame) - for joint in joint_list: - arr = np.array(_recursive_to_list(joint.EvaluateLocalTransform(fbx_time))) - scales = np.array(_recursive_to_list(joint.EvaluateLocalScaling(fbx_time))) - - lcl_trans = joint.LclTranslation.Get() - lcl_rot = joint.LclRotation.Get() - lcl_matrix = fbx.FbxAMatrix() - # lcl_matrix.SetR(fbx.FbxVector4(lcl_rot[0], lcl_rot[1], lcl_rot[2], 1.0)) - # lcl_matrix.SetT(fbx.FbxVector4(lcl_trans[0], lcl_trans[1], lcl_trans[2], 1.0)) - # lcl_matrix = np.array(_recursive_to_list(lcl_matrix)) - curve = joint.LclTranslation.GetCurve(anim_layer, "X") - transX = curve.Evaluate(fbx_time)[0] if curve else lcl_trans[0] - curve = joint.LclTranslation.GetCurve(anim_layer, "Y") - transY = curve.Evaluate(fbx_time)[0] if curve else lcl_trans[1] - curve = joint.LclTranslation.GetCurve(anim_layer, "Z") - transZ = curve.Evaluate(fbx_time)[0] if curve else lcl_trans[2] - - curve = joint.LclRotation.GetCurve(anim_layer, "X") - rotX = curve.Evaluate(fbx_time)[0] if curve else lcl_rot[0] - curve = joint.LclRotation.GetCurve(anim_layer, "Y") - rotY = curve.Evaluate(fbx_time)[0] if curve else lcl_rot[1] - curve = joint.LclRotation.GetCurve(anim_layer, "Z") - rotZ = curve.Evaluate(fbx_time)[0] if curve else lcl_rot[2] - - lcl_matrix.SetR(fbx.FbxVector4(rotX, rotY, rotZ, 1.0)) - lcl_matrix.SetT(fbx.FbxVector4(transX, transY, transZ, 1.0)) - lcl_matrix = np.array(_recursive_to_list(lcl_matrix)) - # if not np.allclose(scales[0:3], scales[0]): - # raise ValueError( - # "Different X, Y and Z scaling. Unsure how this should be handled. " - # "To solve this, look at this link and try to upgrade the script " - # "http://help.autodesk.com/view/FBX/2017/ENU/?guid=__files_GUID_10CDD" - # "63C_79C1_4F2D_BB28_AD2BE65A02ED_htm" - # ) - # Adjust the array for scaling - arr /= scales[0] - arr[3, 3] = 1.0 - lcl_matrix[3, 3] = 1.0 - transforms_current_frame.append(lcl_matrix) - local_transforms.append(transforms_current_frame) - - time_sec += (1.0/fbx_fps) - - local_transforms = np.array(local_transforms) - print("Frame Count: ", len(local_transforms)) - - # Write to numpy array - np.savez_compressed( - file_name_out, names=joint_names, parents=parents, transforms=local_transforms, fps=fbx_fps - ) - -def _get_frame_count(fbx_scene): - # Get the animation stacks and layers, in order to pull off animation curves later - num_anim_stacks = fbx_scene.GetSrcObjectCount( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId) - ) - # if num_anim_stacks != 1: - # raise RuntimeError( - # "More than one animation stack was found. " - # "This script must be modified to handle this case. Exiting" - # ) - if num_anim_stacks > 1: - index = 1 - else: - index = 0 - anim_stack = fbx_scene.GetSrcObject( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index - ) - - anim_range = anim_stack.GetLocalTimeSpan() - duration = anim_range.GetDuration() - fps = duration.GetFrameRate(duration.GetGlobalTimeMode()) - frame_count = duration.GetFrameCount(True) - - return anim_range, frame_count, fps - -def _get_animation_curve(joint, fbx_scene): - # Get the animation stacks and layers, in order to pull off animation curves later - num_anim_stacks = fbx_scene.GetSrcObjectCount( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId) - ) - # if num_anim_stacks != 1: - # raise RuntimeError( - # "More than one animation stack was found. " - # "This script must be modified to handle this case. Exiting" - # ) - if num_anim_stacks > 1: - index = 1 - else: - index = 0 - anim_stack = fbx_scene.GetSrcObject( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimStack.ClassId), index - ) - - num_anim_layers = anim_stack.GetSrcObjectCount( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId) - ) - if num_anim_layers != 1: - raise RuntimeError( - "More than one animation layer was found. " - "This script must be modified to handle this case. Exiting" - ) - animation_layer = anim_stack.GetSrcObject( - FbxCommon.FbxCriteria.ObjectType(FbxCommon.FbxAnimLayer.ClassId), 0 - ) - - def _check_longest_curve(curve, max_curve_key_count): - longest_curve = None - if curve and curve.KeyGetCount() > max_curve_key_count[0]: - max_curve_key_count[0] = curve.KeyGetCount() - return True - - return False - - max_curve_key_count = [0] - longest_curve = None - for c in ["X", "Y", "Z"]: - curve = joint.LclTranslation.GetCurve( - animation_layer, c - ) # sample curve for translation - if _check_longest_curve(curve, max_curve_key_count): - longest_curve = curve - - curve = joint.LclRotation.GetCurve( - animation_layer, "X" - ) - if _check_longest_curve(curve, max_curve_key_count): - longest_curve = curve - - return longest_curve, animation_layer - - -def _get_skeleton(root_joint): - - # Do a depth first search of the skeleton to extract all the joints - joint_list = [root_joint] - joint_names = [root_joint.GetName()] - parents = [-1] # -1 means no parent - - def append_children(joint, pos): - """ - Depth first search function - :param joint: joint item in the fbx - :param pos: position of current element (for parenting) - :return: Nothing - """ - for child_index in range(joint.GetChildCount()): - child = joint.GetChild(child_index) - joint_list.append(child) - joint_names.append(child.GetName()) - parents.append(pos) - append_children(child, len(parents) - 1) - - append_children(root_joint, 0) - return joint_list, joint_names, parents - - -def _recursive_to_list(array): - """ - Takes some iterable that might contain iterables and converts it to a list of lists - [of lists... etc] - - Mainly used for converting the strange fbx wrappers for c++ arrays into python lists - :param array: array to be converted - :return: array converted to lists - """ - try: - return float(array) - except TypeError: - return [_recursive_to_list(a) for a in array] - - -if __name__ == "__main__": - - # Read in the input and output files, then read the fbx - file_name_in, file_name_out = sys.argv[1:3] - root_joint_name = sys.argv[3] - fps = int(sys.argv[4]) - - fbx_to_npy(file_name_in, file_name_out, root_joint_name, fps) diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py deleted file mode 100644 index 1dbd5d4..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - -NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary -rights in and to this software, related documentation and any modifications thereto. Any -use, reproduction, disclosure or distribution of this software and related documentation -without an express license agreement from NVIDIA CORPORATION is strictly prohibited. -""" - -""" -Script that reads in fbx files from python 2 - -This requires a configs file, which contains the command necessary to switch conda -environments to run the fbx reading script from python 2 -""" - -from ....core import logger - -import inspect -import os - -import numpy as np - -# Get the current folder to import the config file -current_folder = os.path.realpath( - os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]) -) - - -def fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps): - """ - Reads an fbx file to an array. - - Currently reading of the frame time is not supported. 120 fps is hard coded TODO - - :param fbx_file_path: str, file path to fbx - :return: tuple with joint_names, parents, transforms, frame time - """ - - # Ensure the file path is valid - fbx_file_path = os.path.abspath(fbx_file_path) - assert os.path.exists(fbx_file_path) - - # Switch directories to the utils folder to ensure the reading works - previous_cwd = os.getcwd() - os.chdir(current_folder) - - # Call the python 2.7 script - temp_file_path = os.path.join(current_folder, fbx_configs["tmp_path"]) - python_path = fbx_configs["fbx_py27_path"] - logger.info("executing python script to read fbx data using Autodesk FBX SDK...") - command = '{} fbx_py27_backend.py "{}" "{}" "{}" "{}"'.format( - python_path, fbx_file_path, temp_file_path, root_joint, fps - ) - logger.debug("executing command: {}".format(command)) - os.system(command) - logger.info( - "executing python script to read fbx data using Autodesk FBX SDK... done" - ) - - with open(temp_file_path, "rb") as f: - data = np.load(f) - output = ( - data["names"], - data["parents"], - data["transforms"], - data["fps"], - ) - - # Remove the temporary file - os.remove(temp_file_path) - - # Return the os to its previous cwd, otherwise reading multiple files might fail - os.chdir(previous_cwd) - return output diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/skeleton3d.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/skeleton3d.py deleted file mode 100644 index 605456c..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/skeleton3d.py +++ /dev/null @@ -1,1269 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import xml.etree.ElementTree as ET -from collections import OrderedDict -from typing import List, Optional, Type, Dict - -import numpy as np -import torch - -from ..core import * -from .backend.fbx.fbx_read_wrapper import fbx_to_array -import scipy.ndimage.filters as filters - - -class SkeletonTree(Serializable): - """ - A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure - over a list of nodes with their names indicated by strings. Each edge in the tree has a local - translation associated with it which describes the distance between the two nodes that it - connects. - - Basic Usage: - >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__) - >>> t - SkeletonTree( - node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'], - parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]), - local_translation=tensor([[ 0.0000, 0.0000, 0.7500], - [ 0.0000, 0.0000, 0.0000], - [ 0.2000, 0.2000, 0.0000], - [ 0.2000, 0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [-0.2000, 0.2000, 0.0000], - [-0.2000, 0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [-0.2000, -0.2000, 0.0000], - [-0.2000, -0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [ 0.2000, -0.2000, 0.0000], - [ 0.2000, -0.2000, 0.0000]]) - ) - >>> t.node_names - ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'] - >>> t.parent_indices - tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]) - >>> t.local_translation - tensor([[ 0.0000, 0.0000, 0.7500], - [ 0.0000, 0.0000, 0.0000], - [ 0.2000, 0.2000, 0.0000], - [ 0.2000, 0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [-0.2000, 0.2000, 0.0000], - [-0.2000, 0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [-0.2000, -0.2000, 0.0000], - [-0.2000, -0.2000, 0.0000], - [ 0.0000, 0.0000, 0.0000], - [ 0.2000, -0.2000, 0.0000], - [ 0.2000, -0.2000, 0.0000]]) - >>> t.parent_of('front_left_leg') - 'torso' - >>> t.index('front_right_foot') - 6 - >>> t[2] - 'aux_1' - """ - - __example_mjcf_path__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tests/ant.xml") - - def __init__(self, node_names, parent_indices, local_translation): - """ - :param node_names: a list of names for each tree node - :type node_names: List[str] - :param parent_indices: an int32-typed tensor that represents the edge to its parent.\ - -1 represents the root node - :type parent_indices: Tensor - :param local_translation: a 3d vector that gives local translation information - :type local_translation: Tensor - """ - ln, lp, ll = len(node_names), len(parent_indices), len(local_translation) - assert len(set((ln, lp, ll))) == 1 - self._node_names = node_names - self._parent_indices = parent_indices.long() - self._local_translation = local_translation - self._node_indices = {self.node_names[i]: i for i in range(len(self))} - - def __len__(self): - """ number of nodes in the skeleton tree """ - return len(self.node_names) - - def __iter__(self): - """ iterator that iterate through the name of each node """ - yield from self.node_names - - def __getitem__(self, item): - """ get the name of the node given the index """ - return self.node_names[item] - - def __repr__(self): - return ("SkeletonTree(\n node_names={},\n parent_indices={}," - "\n local_translation={}\n)".format( - self._indent(repr(self.node_names)), - self._indent(repr(self.parent_indices)), - self._indent(repr(self.local_translation)), - )) - - def _indent(self, s): - return "\n ".join(s.split("\n")) - - @property - def node_names(self): - return self._node_names - - @property - def parent_indices(self): - return self._parent_indices - - @property - def local_translation(self): - return self._local_translation - - @property - def num_joints(self): - """ number of nodes in the skeleton tree """ - return len(self) - - @classmethod - def from_dict(cls, dict_repr, *args, **kwargs): - return cls( - list(map(str, dict_repr["node_names"])), - TensorUtils.from_dict(dict_repr["parent_indices"], *args, **kwargs), - TensorUtils.from_dict(dict_repr["local_translation"], *args, **kwargs), - ) - - def to_dict(self): - return OrderedDict([ - ("node_names", self.node_names), - ("parent_indices", tensor_to_dict(self.parent_indices)), - ("local_translation", tensor_to_dict(self.local_translation)), - ]) - - @classmethod - def from_mjcf(cls, path: str) -> "SkeletonTree": - """ - Parses a mujoco xml scene description file and returns a Skeleton Tree. - We use the model attribute at the root as the name of the tree. - - :param path: - :type path: string - :return: The skeleton tree constructed from the mjcf file - :rtype: SkeletonTree - """ - tree = ET.parse(path) - xml_doc_root = tree.getroot() - xml_world_body = xml_doc_root.find("worldbody") - if xml_world_body is None: - raise ValueError("MJCF parsed incorrectly please verify it.") - # assume this is the root - xml_body_root = xml_world_body.find("body") - if xml_body_root is None: - raise ValueError("MJCF parsed incorrectly please verify it.") - - node_names = [] - parent_indices = [] - local_translation = [] - - # recursively adding all nodes into the skel_tree - def _add_xml_node(xml_node, parent_index, node_index): - node_name = xml_node.attrib.get("name") - # parse the local translation into float list - try: - pos = np.fromstring(xml_node.attrib.get("pos"), dtype=float, sep=" ") - except: - # if there is no pos attribute, the default is (0,0,0) - # https://mujoco.readthedocs.io/en/stable/XMLreference.html#body-pos - pos = np.array([0.,0.,0.], dtype=float) - node_names.append(node_name) - parent_indices.append(parent_index) - local_translation.append(pos) - curr_index = node_index - node_index += 1 - for next_node in xml_node.findall("body"): - node_index = _add_xml_node(next_node, curr_index, node_index) - return node_index - - _add_xml_node(xml_body_root, -1, 0) - - return cls( - node_names, - torch.from_numpy(np.array(parent_indices, dtype=np.int32)), - torch.from_numpy(np.array(local_translation, dtype=np.float32)), - ) - - def parent_of(self, node_name): - """ get the name of the parent of the given node - - :param node_name: the name of the node - :type node_name: string - :rtype: string - """ - return self[int(self.parent_indices[self.index(node_name)].item())] - - def index(self, node_name): - """ get the index of the node - - :param node_name: the name of the node - :type node_name: string - :rtype: int - """ - return self._node_indices[node_name] - - def drop_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> "SkeletonTree": - new_length = len(self) - len(node_names) - new_node_names = [] - new_local_translation = torch.zeros(new_length, 3, dtype=self.local_translation.dtype) - new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype) - parent_indices = self.parent_indices.numpy() - new_node_indices: dict = {} - new_node_index = 0 - for node_index in range(len(self)): - if self[node_index] in node_names: - continue - tb_node_index = parent_indices[node_index] - if tb_node_index != -1: - local_translation = self.local_translation[node_index, :] - while tb_node_index != -1 and self[tb_node_index] in node_names: - local_translation += self.local_translation[tb_node_index, :] - tb_node_index = parent_indices[tb_node_index] - assert tb_node_index != -1, "the root node cannot be dropped" - - if pairwise_translation is not None: - local_translation = pairwise_translation[tb_node_index, node_index, :] - else: - local_translation = self.local_translation[node_index, :] - - new_node_names.append(self[node_index]) - new_local_translation[new_node_index, :] = local_translation - if tb_node_index == -1: - new_parent_indices[new_node_index] = -1 - else: - new_parent_indices[new_node_index] = new_node_indices[self[tb_node_index]] - new_node_indices[self[node_index]] = new_node_index - new_node_index += 1 - - return SkeletonTree(new_node_names, new_parent_indices, new_local_translation) - - def keep_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> "SkeletonTree": - nodes_to_drop = list(filter(lambda x: x not in node_names, self)) - return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation) - - -class SkeletonState(Serializable): - """ - A skeleton state contains all the information needed to describe a static state of a skeleton. - It requires a skeleton tree, local/global rotation at each joint and the root translation. - - Example: - >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__) - >>> zero_pose = SkeletonState.zero_pose(t) - >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common` - [plot of the ant at zero pose - >>> local_rotation = zero_pose.local_rotation.clone() - >>> local_rotation[2] = torch.tensor([0, 0, 1, 0]) - >>> new_pose = SkeletonState.from_rotation_and_root_translation( - ... skeleton_tree=t, - ... r=local_rotation, - ... t=zero_pose.root_translation, - ... is_local=True - ... ) - >>> new_pose.local_rotation - tensor([[0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 1., 0., 0.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.]]) - >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent - [plot of the ant with the new pose - >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3 - tensor([[0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 1., 0., 0.], - [0., 1., 0., 0.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.], - [0., 0., 0., 1.]]) - - Global/Local Representation (cont. from the previous example) - >>> new_pose.is_local - True - >>> new_pose.tensor # this will return the local rotation followed by the root translation - tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., - 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., - 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., - 0.]) - >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio - torch.Size([55]) - >>> new_pose.global_repr().is_local - False - >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead - tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., - 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., - 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., - 0.]) - >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation - torch.Size([55]) - """ - - def __init__(self, tensor_backend, skeleton_tree, is_local): - self._skeleton_tree = skeleton_tree - self._is_local = is_local - self.tensor = tensor_backend.clone() - - def __len__(self): - return self.tensor.shape[0] - - @property - def rotation(self): - if not hasattr(self, "_rotation"): - self._rotation = self.tensor[..., :self.num_joints * 4].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 4))) - return self._rotation - - @property - def _local_rotation(self): - if self._is_local: - return self.rotation - else: - return None - - @property - def _global_rotation(self): - if not self._is_local: - return self.rotation - else: - return None - - @property - def is_local(self): - """ is the rotation represented in local frame? - - :rtype: bool - """ - return self._is_local - - @property - def invariant_property(self): - return {"skeleton_tree": self.skeleton_tree, "is_local": self.is_local} - - @property - def num_joints(self): - """ number of joints in the skeleton tree - - :rtype: int - """ - return self.skeleton_tree.num_joints - - @property - def skeleton_tree(self): - """ skeleton tree - - :rtype: SkeletonTree - """ - return self._skeleton_tree - - @property - def root_translation(self): - """ root translation - - :rtype: Tensor - """ - if not hasattr(self, "_root_translation"): - self._root_translation = self.tensor[..., self.num_joints * 4:self.num_joints * 4 + 3] - return self._root_translation - - @property - def global_transformation(self): - """ global transformation of each joint (transform from joint frame to global frame) """ - # Forward Kinematics - if not hasattr(self, "_global_transformation"): - local_transformation = self.local_transformation - global_transformation = [] - parent_indices = self.skeleton_tree.parent_indices.numpy() - # global_transformation = local_transformation.identity_like() - for node_index in range(len(self.skeleton_tree)): - parent_index = parent_indices[node_index] - if parent_index == -1: - global_transformation.append(local_transformation[..., node_index, :]) - else: - global_transformation.append(transform_mul( - global_transformation[parent_index], - local_transformation[..., node_index, :], - )) - self._global_transformation = torch.stack(global_transformation, axis=-2) - return self._global_transformation - - @property - def global_rotation(self): - """ global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global - F.O.R) """ - if self._global_rotation is None: - if not hasattr(self, "_comp_global_rotation"): - self._comp_global_rotation = transform_rotation(self.global_transformation) - return self._comp_global_rotation - else: - return self._global_rotation - - @property - def global_translation(self): - """ global translation of each joint """ - if not hasattr(self, "_global_translation"): - self._global_translation = transform_translation(self.global_transformation) - return self._global_translation - - @property - def global_translation_xy(self): - """ global translation in xy """ - trans_xy_data = self.global_translation.zeros_like() - trans_xy_data[..., 0:2] = self.global_translation[..., 0:2] - return trans_xy_data - - @property - def global_translation_xz(self): - """ global translation in xz """ - trans_xz_data = self.global_translation.zeros_like() - trans_xz_data[..., 0:1] = self.global_translation[..., 0:1] - trans_xz_data[..., 2:3] = self.global_translation[..., 2:3] - return trans_xz_data - - @property - def local_rotation(self): - """ the rotation from child frame to parent frame given in the order of child nodes appeared - in `.skeleton_tree.node_names` """ - if self._local_rotation is None: - if not hasattr(self, "_comp_local_rotation"): - local_rotation = quat_identity_like(self.global_rotation) - for node_index in range(len(self.skeleton_tree)): - parent_index = self.skeleton_tree.parent_indices[node_index] - if parent_index == -1: - local_rotation[..., node_index, :] = self.global_rotation[..., node_index, :] - else: - local_rotation[..., node_index, :] = quat_mul_norm( - quat_inverse(self.global_rotation[..., parent_index, :]), - self.global_rotation[..., node_index, :], - ) - self._comp_local_rotation = local_rotation - return self._comp_local_rotation - else: - return self._local_rotation - - @property - def local_transformation(self): - """ local translation + local rotation. It describes the transformation from child frame to - parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` """ - if not hasattr(self, "_local_transformation"): - self._local_transformation = transform_from_rotation_translation(r=self.local_rotation, t=self.local_translation) - return self._local_transformation - - @property - def local_translation(self): - """ local translation of the skeleton state. It is identical to the local translation in - `.skeleton_tree.local_translation` except the root translation. The root translation is - identical to `.root_translation` """ - if not hasattr(self, "_local_translation"): - broadcast_shape = (tuple(self.tensor.shape[:-1]) + (len(self.skeleton_tree),) + tuple(self.skeleton_tree.local_translation.shape[-1:])) - local_translation = self.skeleton_tree.local_translation.broadcast_to(*broadcast_shape).clone() - local_translation[..., 0, :] = self.root_translation - self._local_translation = local_translation - return self._local_translation - - # Root Properties - @property - def root_translation_xy(self): - """ root translation on xy """ - if not hasattr(self, "_root_translation_xy"): - self._root_translation_xy = self.global_translation_xy[..., 0, :] - return self._root_translation_xy - - @property - def global_root_rotation(self): - """ root rotation """ - if not hasattr(self, "_global_root_rotation"): - self._global_root_rotation = self.global_rotation[..., 0, :] - return self._global_root_rotation - - @property - def global_root_yaw_rotation(self): - """ root yaw rotation """ - if not hasattr(self, "_global_root_yaw_rotation"): - self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation() - return self._global_root_yaw_rotation - - # Properties relative to root - @property - def local_translation_to_root(self): - """ The 3D translation from joint frame to the root frame. """ - if not hasattr(self, "_local_translation_to_root"): - self._local_translation_to_root = (self.global_translation - self.root_translation.unsqueeze(-1)) - return self._local_translation_to_root - - @property - def local_rotation_to_root(self): - """ The 3D rotation from joint frame to the root frame. It is equivalent to - The root_R_world * world_R_node """ - return (quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation) - - def compute_forward_vector( - self, - left_shoulder_index, - right_shoulder_index, - left_hip_index, - right_hip_index, - gaussian_filter_width=20, - ): - """ Computes forward vector based on cross product of the up vector with - average of the right->left shoulder and hip vectors """ - global_positions = self.global_translation - # Perpendicular to the forward direction. - # Uses the shoulders and hips to find this. - side_direction = (global_positions[:, left_shoulder_index].numpy() - global_positions[:, right_shoulder_index].numpy() + global_positions[:, left_hip_index].numpy() - global_positions[:, right_hip_index].numpy()) - side_direction = (side_direction / np.sqrt((side_direction**2).sum(axis=-1))[..., np.newaxis]) - - # Forward direction obtained by crossing with the up direction. - forward_direction = np.cross(side_direction, np.array([[0, 1, 0]])) - - # Smooth the forward direction with a Gaussian. - # Axis 0 is the time/frame axis. - forward_direction = filters.gaussian_filter1d(forward_direction, gaussian_filter_width, axis=0, mode="nearest") - forward_direction = (forward_direction / np.sqrt((forward_direction**2).sum(axis=-1))[..., np.newaxis]) - - return torch.from_numpy(forward_direction) - - @staticmethod - def _to_state_vector(rot, rt): - state_shape = rot.shape[:-2] - vr = rot.reshape(*(state_shape + (-1,))) - vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(*(state_shape + (-1,))) - v = torch.cat([vr, vt], axis=-1) - return v - - @classmethod - def from_dict(cls: Type["SkeletonState"], dict_repr: OrderedDict, *args, **kwargs) -> "SkeletonState": - rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs) - rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs) - return cls( - SkeletonState._to_state_vector(rot, rt), - SkeletonTree.from_dict(dict_repr["skeleton_tree"], *args, **kwargs), - dict_repr["is_local"], - ) - - def to_dict(self) -> OrderedDict: - return OrderedDict([ - ("rotation", tensor_to_dict(self.rotation)), - ("root_translation", tensor_to_dict(self.root_translation)), - ("skeleton_tree", self.skeleton_tree.to_dict()), - ("is_local", self.is_local), - ]) - - @classmethod - def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True): - """ - Construct a skeleton state from rotation and root translation - - :param skeleton_tree: the skeleton tree - :type skeleton_tree: SkeletonTree - :param r: rotation (either global or local) - :type r: Tensor - :param t: root translation - :type t: Tensor - :param is_local: to indicate that whether the rotation is local or global - :type is_local: bool, optional, default=True - """ - assert (r.dim() > 0), "the rotation needs to have at least 1 dimension (dim = {})".format(r.dim) - state_vec = SkeletonState._to_state_vector(r, t) - - return cls( - state_vec, - skeleton_tree=skeleton_tree, - is_local=is_local, - ) - - @classmethod - def zero_pose(cls, skeleton_tree): - """ - Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local - rotation is 0 and root translation is also 0. - - :param skeleton_tree: the skeleton tree as the rigid body - :type skeleton_tree: SkeletonTree - """ - return cls.from_rotation_and_root_translation( - skeleton_tree=skeleton_tree, - r=quat_identity([skeleton_tree.num_joints]), - t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype), - is_local=True, - ) - - def local_repr(self): - """ - Convert the skeleton state into local representation. This will only affects the values of - .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. - - :rtype: SkeletonState - """ - if self.is_local: - return self - return SkeletonState.from_rotation_and_root_translation( - self.skeleton_tree, - r=self.local_rotation, - t=self.root_translation, - is_local=True, - ) - - def global_repr(self): - """ - Convert the skeleton state into global representation. This will only affects the values of - .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. - - :rtype: SkeletonState - """ - if not self.is_local: - return self - return SkeletonState.from_rotation_and_root_translation( - self.skeleton_tree, - r=self.global_rotation, - t=self.root_translation, - is_local=False, - ) - - def _get_pairwise_average_translation(self): - global_transform_inv = transform_inverse(self.global_transformation) - p1 = global_transform_inv.unsqueeze(-2) - p2 = self.global_transformation.unsqueeze(-3) - - pairwise_translation = (transform_translation(transform_mul(p1, p2)).reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3).mean(axis=0)) - return pairwise_translation - - def _transfer_to(self, new_skeleton_tree: SkeletonTree): - old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree)) - return SkeletonState.from_rotation_and_root_translation( - new_skeleton_tree, - r=self.global_rotation[..., old_indices, :], - t=self.root_translation, - is_local=False, - ) - - def drop_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> "SkeletonState": - """ - Drop a list of nodes from the skeleton and re-compute the local rotation to match the - original joint position as much as possible. - - :param node_names: a list node names that specifies the nodes need to be dropped - :type node_names: List of strings - :param estimate_local_translation_from_states: the boolean indicator that specifies whether\ - or not to re-estimate the local translation from the states (avg.) - :type estimate_local_translation_from_states: boolean - :rtype: SkeletonState - """ - if estimate_local_translation_from_states: - pairwise_translation = self._get_pairwise_average_translation() - else: - pairwise_translation = None - new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(node_names, pairwise_translation) - return self._transfer_to(new_skeleton_tree) - - def keep_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> "SkeletonState": - """ - Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local - rotation to match the original joint position as much as possible. - - :param node_names: a list node names that specifies the nodes need to be dropped - :type node_names: List of strings - :param estimate_local_translation_from_states: the boolean indicator that specifies whether\ - or not to re-estimate the local translation from the states (avg.) - :type estimate_local_translation_from_states: boolean - :rtype: SkeletonState - """ - return self.drop_nodes_by_names( - list(filter(lambda x: (x not in node_names), self)), - estimate_local_translation_from_states, - ) - - def _remapped_to(self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree): - joint_mapping_inv = {target: source for source, target in joint_mapping.items()} - reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(list(joint_mapping_inv)) - n_joints = ( - len(joint_mapping), - len(self.skeleton_tree), - len(reduced_target_skeleton_tree), - ) - assert (len(set(n_joints)) == 1), "the joint mapping is not consistent with the skeleton trees" - source_indices = list(map( - lambda x: self.skeleton_tree.index(joint_mapping_inv[x]), - reduced_target_skeleton_tree, - )) - target_local_rotation = self.local_rotation[..., source_indices, :] - return SkeletonState.from_rotation_and_root_translation( - skeleton_tree=reduced_target_skeleton_tree, - r=target_local_rotation, - t=self.root_translation, - is_local=True, - ) - - def retarget_to( - self, - joint_mapping: Dict[str, str], - source_tpose_local_rotation, - source_tpose_root_translation: np.ndarray, - target_skeleton_tree: SkeletonTree, - target_tpose_local_rotation, - target_tpose_root_translation: np.ndarray, - rotation_to_target_skeleton, - scale_to_target_skeleton: float, - z_up: bool = True, - ) -> "SkeletonState": - """ - Retarget the skeleton state to a target skeleton tree. This is a naive retarget - implementation with rough approximations. The function follows the procedures below. - - Steps: - 1. Drop the joints from the source (self) that do not belong to the joint mapping\ - with an implementation that is similar to "keep_nodes_by_names()" - take a\ - look at the function doc for more details (same for source_tpose) - - 2. Rotate the source state and the source tpose by "rotation_to_target_skeleton"\ - to align the source with the target orientation - - 3. Extract the root translation and normalize it to match the scale of the target\ - skeleton - - 4. Extract the global rotation from source state relative to source tpose and\ - re-apply the relative rotation to the target tpose to construct the global\ - rotation after retargetting - - 5. Combine the computed global rotation and the root translation from 3 and 4 to\ - complete the retargeting. - - 6. Make feet on the ground (global translation z) - - :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \ - the target skeleton - :type joint_mapping: Dict[str, str] - - :param source_tpose_local_rotation: the local rotation of the source skeleton - :type source_tpose_local_rotation: Tensor - - :param source_tpose_root_translation: the root translation of the source tpose - :type source_tpose_root_translation: np.ndarray - - :param target_skeleton_tree: the target skeleton tree - :type target_skeleton_tree: SkeletonTree - - :param target_tpose_local_rotation: the local rotation of the target skeleton - :type target_tpose_local_rotation: Tensor - - :param target_tpose_root_translation: the root translation of the target tpose - :type target_tpose_root_translation: Tensor - - :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\ - skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\ - the frame of reference of the target skeleton and s is the frame of reference of the source\ - skeleton - :type rotation_to_target_skeleton: Tensor - :param scale_to_target_skeleton: the factor that needs to be multiplied from source\ - skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \ - factor needs to be 0.01. - :type scale_to_target_skeleton: float - :rtype: SkeletonState - """ - - # STEP 0: Preprocess - source_tpose = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=self.skeleton_tree, - r=source_tpose_local_rotation, - t=source_tpose_root_translation, - is_local=True, - ) - target_tpose = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=target_skeleton_tree, - r=target_tpose_local_rotation, - t=target_tpose_root_translation, - is_local=True, - ) - - # STEP 1: Drop the irrelevant joints - pairwise_translation = self._get_pairwise_average_translation() - node_names = list(joint_mapping) - new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(node_names, pairwise_translation) - - # TODO: combine the following steps before STEP 3 - source_tpose = source_tpose._transfer_to(new_skeleton_tree) - source_state = self._transfer_to(new_skeleton_tree) - - source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree) - source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree) - - # STEP 2: Rotate the source to align with the target - new_local_rotation = source_tpose.local_rotation.clone() - new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]) - - source_tpose = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=source_tpose.skeleton_tree, - r=new_local_rotation, - t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation), - is_local=True, - ) - - new_local_rotation = source_state.local_rotation.clone() - new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]) - source_state = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=source_state.skeleton_tree, - r=new_local_rotation, - t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation), - is_local=True, - ) - - # STEP 3: Normalize to match the target scale - root_translation_diff = (source_state.root_translation - source_tpose.root_translation) * scale_to_target_skeleton - - # STEP 4: the global rotation from source state relative to source tpose and - # re-apply to the target - current_skeleton_tree = source_state.skeleton_tree - target_tpose_global_rotation = source_state.global_rotation[0, :].clone() - for current_index, name in enumerate(current_skeleton_tree): - if name in target_tpose.skeleton_tree: - target_tpose_global_rotation[current_index, :] = target_tpose.global_rotation[target_tpose.skeleton_tree.index(name), :] - - global_rotation_diff = quat_mul_norm(source_state.global_rotation, quat_inverse(source_tpose.global_rotation)) - new_global_rotation = quat_mul_norm(global_rotation_diff, target_tpose_global_rotation) - - # STEP 5: Putting 3 and 4 together - current_skeleton_tree = source_state.skeleton_tree - shape = source_state.global_rotation.shape[:-1] - shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1] - new_global_rotation_output = quat_identity(shape) - for current_index, name in enumerate(target_skeleton_tree): - while name not in current_skeleton_tree: - name = target_skeleton_tree.parent_of(name) - parent_index = current_skeleton_tree.index(name) - new_global_rotation_output[:, current_index, :] = new_global_rotation[:, parent_index, :] - - source_state = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=target_skeleton_tree, - r=new_global_rotation_output, - t=target_tpose.root_translation + root_translation_diff, - is_local=False, - ).local_repr() - - return source_state - - def retarget_to_by_tpose( - self, - joint_mapping: Dict[str, str], - source_tpose: "SkeletonState", - target_tpose: "SkeletonState", - rotation_to_target_skeleton, - scale_to_target_skeleton: float, - ) -> "SkeletonState": - """ - Retarget the skeleton state to a target skeleton tree. This is a naive retarget - implementation with rough approximations. See the method `retarget_to()` for more information - - :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \ - the target skeleton - :type joint_mapping: Dict[str, str] - - :param source_tpose: t-pose of the source skeleton - :type source_tpose: SkeletonState - - :param target_tpose: t-pose of the target skeleton - :type target_tpose: SkeletonState - - :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\ - skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\ - the frame of reference of the target skeleton and s is the frame of reference of the source\ - skeleton - :type rotation_to_target_skeleton: Tensor - :param scale_to_target_skeleton: the factor that needs to be multiplied from source\ - skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \ - factor needs to be 0.01. - :type scale_to_target_skeleton: float - :rtype: SkeletonState - """ - assert (len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0), "the retargeting script currently doesn't support vectorized operations" - return self.retarget_to( - joint_mapping, - source_tpose.local_rotation, - source_tpose.root_translation, - target_tpose.skeleton_tree, - target_tpose.local_rotation, - target_tpose.root_translation, - rotation_to_target_skeleton, - scale_to_target_skeleton, - ) - - -class SkeletonMotion(SkeletonState): - - def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs): - self._fps = fps - super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs) - - def clone(self): - return SkeletonMotion(self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps) - - @property - def invariant_property(self): - return { - "skeleton_tree": self.skeleton_tree, - "is_local": self.is_local, - "fps": self.fps, - } - - @property - def global_velocity(self): - """ global velocity """ - curr_index = self.num_joints * 4 + 3 - return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3))) - - @property - def global_angular_velocity(self): - """ global angular velocity """ - curr_index = self.num_joints * 7 + 3 - return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3))) - - @property - def fps(self): - """ number of frames per second """ - return self._fps - - @property - def time_delta(self): - """ time between two adjacent frames """ - return 1.0 / self.fps - - @property - def global_root_velocity(self): - """ global root velocity """ - return self.global_velocity[..., 0, :] - - @property - def global_root_angular_velocity(self): - """ global root angular velocity """ - return self.global_angular_velocity[..., 0, :] - - @classmethod - def from_state_vector_and_velocity( - cls, - skeleton_tree, - state_vector, - global_velocity, - global_angular_velocity, - is_local, - fps, - ): - """ - Construct a skeleton motion from a skeleton state vector, global velocity and angular - velocity at each joint. - - :param skeleton_tree: the skeleton tree that the motion is based on - :type skeleton_tree: SkeletonTree - :param state_vector: the state vector from the skeleton state by `.tensor` - :type state_vector: Tensor - :param global_velocity: the global velocity at each joint - :type global_velocity: Tensor - :param global_angular_velocity: the global angular velocity at each joint - :type global_angular_velocity: Tensor - :param is_local: if the rotation ins the state vector is given in local frame - :type is_local: boolean - :param fps: number of frames per second - :type fps: int - - :rtype: SkeletonMotion - """ - state_shape = state_vector.shape[:-1] - v = global_velocity.reshape(*(state_shape + (-1,))) - av = global_angular_velocity.reshape(*(state_shape + (-1,))) - new_state_vector = torch.cat([state_vector, v, av], axis=-1) - return cls( - new_state_vector, - skeleton_tree=skeleton_tree, - is_local=is_local, - fps=fps, - ) - - @classmethod - def from_skeleton_state(cls: Type["SkeletonMotion"], skeleton_state: SkeletonState, fps: int): - """ - Construct a skeleton motion from a skeleton state. The velocities are estimated using second - order guassian filter along the last axis. The skeleton state must have at least .dim >= 1 - - :param skeleton_state: the skeleton state that the motion is based on - :type skeleton_state: SkeletonState - :param fps: number of frames per second - :type fps: int - - :rtype: SkeletonMotion - """ - assert (type(skeleton_state) == SkeletonState), "expected type of {}, got {}".format(SkeletonState, type(skeleton_state)) - global_velocity = SkeletonMotion._compute_velocity(p=skeleton_state.global_translation, time_delta=1 / fps) - global_angular_velocity = SkeletonMotion._compute_angular_velocity(r=skeleton_state.global_rotation, time_delta=1 / fps) - return cls.from_state_vector_and_velocity( - skeleton_tree=skeleton_state.skeleton_tree, - state_vector=skeleton_state.tensor, - global_velocity=global_velocity, - global_angular_velocity=global_angular_velocity, - is_local=skeleton_state.is_local, - fps=fps, - ) - - @staticmethod - def _to_state_vector(rot, rt, vel, avel): - state_shape = rot.shape[:-2] - skeleton_state_v = SkeletonState._to_state_vector(rot, rt) - v = vel.reshape(*(state_shape + (-1,))) - av = avel.reshape(*(state_shape + (-1,))) - skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1) - return skeleton_motion_v - - @classmethod - def from_dict(cls: Type["SkeletonMotion"], dict_repr: OrderedDict, *args, **kwargs) -> "SkeletonMotion": - rot = TensorUtils.from_dict(dict_repr["rotation"], *args, **kwargs) - rt = TensorUtils.from_dict(dict_repr["root_translation"], *args, **kwargs) - vel = TensorUtils.from_dict(dict_repr["global_velocity"], *args, **kwargs) - avel = TensorUtils.from_dict(dict_repr["global_angular_velocity"], *args, **kwargs) - return cls( - SkeletonMotion._to_state_vector(rot, rt, vel, avel), - skeleton_tree=SkeletonTree.from_dict(dict_repr["skeleton_tree"], *args, **kwargs), - is_local=dict_repr["is_local"], - fps=dict_repr["fps"], - ) - - def to_dict(self) -> OrderedDict: - return OrderedDict([ - ("rotation", tensor_to_dict(self.rotation)), - ("root_translation", tensor_to_dict(self.root_translation)), - ("global_velocity", tensor_to_dict(self.global_velocity)), - ("global_angular_velocity", tensor_to_dict(self.global_angular_velocity)), - ("skeleton_tree", self.skeleton_tree.to_dict()), - ("is_local", self.is_local), - ("fps", self.fps), - ]) - - @classmethod - def from_fbx( - cls: Type["SkeletonMotion"], - fbx_file_path, - fbx_configs, - skeleton_tree=None, - is_local=True, - fps=120, - root_joint="", - root_trans_index=0, - *args, - **kwargs, - ) -> "SkeletonMotion": - """ - Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree - is not given, it will use the first frame of the mocap to construct the skeleton tree. - - :param fbx_file_path: the path of the fbx file - :type fbx_file_path: string - :param fbx_configs: the configuration in terms of {"tmp_path": ..., "fbx_py27_path": ...} - :type fbx_configs: dict - :param skeleton_tree: the optional skeleton tree that the rotation will be applied to - :type skeleton_tree: SkeletonTree, optional - :param is_local: the state vector uses local or global rotation as the representation - :type is_local: bool, optional, default=True - :rtype: SkeletonMotion - """ - joint_names, joint_parents, transforms, fps = fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps) - # swap the last two axis to match the convention - local_transform = euclidean_to_transform(transformation_matrix=torch.from_numpy(np.swapaxes(np.array(transforms), -1, -2),).float()) - local_rotation = transform_rotation(local_transform) - root_translation = transform_translation(local_transform)[..., root_trans_index, :] - joint_parents = torch.from_numpy(np.array(joint_parents)).int() - - if skeleton_tree is None: - local_translation = transform_translation(local_transform).reshape(-1, len(joint_parents), 3)[0] - skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation) - skeleton_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, r=local_rotation, t=root_translation, is_local=True) - if not is_local: - skeleton_state = skeleton_state.global_repr() - return cls.from_skeleton_state(skeleton_state=skeleton_state, fps=fps) - - @staticmethod - def _compute_velocity(p, time_delta, guassian_filter=True): - velocity = np.gradient(p.numpy(), axis=-3) / time_delta - if guassian_filter: - velocity = torch.from_numpy(filters.gaussian_filter1d(velocity, 2, axis=-3, mode="nearest")).to(p) - else: - velocity = torch.from_numpy(velocity).to(p) - - return velocity - - @staticmethod - def _compute_angular_velocity(r, time_delta: float, guassian_filter=True): - # assume the second last dimension is the time axis - diff_quat_data = quat_identity_like(r).to(r) - diff_quat_data[..., :-1, :, :] = quat_mul_norm(r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])) - diff_angle, diff_axis = quat_angle_axis(diff_quat_data) - angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta - if guassian_filter: - angular_velocity = torch.from_numpy(filters.gaussian_filter1d(angular_velocity.numpy(), 2, axis=-3, mode="nearest"),) - return angular_velocity - - def crop(self, start: int, end: int, fps: Optional[int] = None): - """ - Crop the motion along its last axis. This is equivalent to performing a slicing on the - object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the - new fps provided must be a factor of the original fps. - - :param start: the beginning frame index - :type start: int - :param end: the ending frame index - :type end: int - :param fps: number of frames per second in the output (if not given the original fps will be used) - :type fps: int, optional - :rtype: SkeletonMotion - """ - if fps is None: - new_fps = int(self.fps) - old_fps = int(self.fps) - else: - new_fps = int(fps) - old_fps = int(self.fps) - assert old_fps % fps == 0, ("the resampling doesn't support fps with non-integer division " - "from the original fps: {} => {}".format(old_fps, fps)) - skip_every = old_fps // new_fps - s = slice(start, end, skip_every) - z = self[..., s] - - rot = z.local_rotation if z.is_local else z.global_rotation - rt = z.root_translation - vel = z.global_velocity - avel = z.global_angular_velocity - return SkeletonMotion( - SkeletonMotion._to_state_vector(rot, rt, vel, avel), - skeleton_tree=z.skeleton_tree, - is_local=z.is_local, - fps=new_fps, - ) - - def retarget_to( - self, - joint_mapping: Dict[str, str], - source_tpose_local_rotation, - source_tpose_root_translation: np.ndarray, - target_skeleton_tree: "SkeletonTree", - target_tpose_local_rotation, - target_tpose_root_translation: np.ndarray, - rotation_to_target_skeleton, - scale_to_target_skeleton: float, - z_up: bool = True, - ) -> "SkeletonMotion": - """ - Same as the one in :class:`SkeletonState`. This method discards all velocity information before - retargeting and re-estimate the velocity after the retargeting. The same fps is used in the - new retargetted motion. - - :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \ - the target skeleton - :type joint_mapping: Dict[str, str] - - :param source_tpose_local_rotation: the local rotation of the source skeleton - :type source_tpose_local_rotation: Tensor - - :param source_tpose_root_translation: the root translation of the source tpose - :type source_tpose_root_translation: np.ndarray - - :param target_skeleton_tree: the target skeleton tree - :type target_skeleton_tree: SkeletonTree - - :param target_tpose_local_rotation: the local rotation of the target skeleton - :type target_tpose_local_rotation: Tensor - - :param target_tpose_root_translation: the root translation of the target tpose - :type target_tpose_root_translation: Tensor - - :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\ - skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\ - the frame of reference of the target skeleton and s is the frame of reference of the source\ - skeleton - :type rotation_to_target_skeleton: Tensor - :param scale_to_target_skeleton: the factor that needs to be multiplied from source\ - skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \ - factor needs to be 0.01. - :type scale_to_target_skeleton: float - :rtype: SkeletonMotion - """ - return SkeletonMotion.from_skeleton_state( - super().retarget_to( - joint_mapping, - source_tpose_local_rotation, - source_tpose_root_translation, - target_skeleton_tree, - target_tpose_local_rotation, - target_tpose_root_translation, - rotation_to_target_skeleton, - scale_to_target_skeleton, - z_up, - ), - self.fps, - ) - - def retarget_to_by_tpose( - self, - joint_mapping: Dict[str, str], - source_tpose: "SkeletonState", - target_tpose: "SkeletonState", - rotation_to_target_skeleton, - scale_to_target_skeleton: float, - z_up: bool = True, - ) -> "SkeletonMotion": - """ - Same as the one in :class:`SkeletonState`. This method discards all velocity information before - retargeting and re-estimate the velocity after the retargeting. The same fps is used in the - new retargetted motion. - - :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \ - the target skeleton - :type joint_mapping: Dict[str, str] - - :param source_tpose: t-pose of the source skeleton - :type source_tpose: SkeletonState - - :param target_tpose: t-pose of the target skeleton - :type target_tpose: SkeletonState - - :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\ - skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\ - the frame of reference of the target skeleton and s is the frame of reference of the source\ - skeleton - :type rotation_to_target_skeleton: Tensor - :param scale_to_target_skeleton: the factor that needs to be multiplied from source\ - skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \ - factor needs to be 0.01. - :type scale_to_target_skeleton: float - :rtype: SkeletonMotion - """ - return self.retarget_to( - joint_mapping, - source_tpose.local_rotation, - source_tpose.root_translation, - target_tpose.skeleton_tree, - target_tpose.local_rotation, - target_tpose.root_translation, - rotation_to_target_skeleton, - scale_to_target_skeleton, - z_up, - ) diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/__init__.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/ant.xml b/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/ant.xml deleted file mode 100644 index 311d96f..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/ant.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/test_skeleton.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/test_skeleton.py deleted file mode 100644 index aa9edc3..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/test_skeleton.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from ...core import * -from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion - -import numpy as np -import torch - -from ...visualization.common import ( - plot_skeleton_state, - plot_skeleton_motion_interactive, -) - -from ...visualization.plt_plotter import Matplotlib3DPlotter -from ...visualization.skeleton_plotter_tasks import ( - Draw3DSkeletonMotion, - Draw3DSkeletonState, -) - - -def test_skel_tree(): - skel_tree = SkeletonTree.from_mjcf( - "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", - backend="pytorch", - ) - skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") - # assert skel_tree.to_str() == skel_tree_rec.to_str() - print(skel_tree.node_names) - print(skel_tree.local_translation) - print(skel_tree.parent_indices) - skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) - plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) - skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) - plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) - - -def test_skel_motion(): - skel_motion = SkeletonMotion.from_file( - "/tmp/tmp.npy", backend="pytorch", load_context=True - ) - - plot_skeleton_motion_interactive(skel_motion) - - -def test_grad(): - source_motion = SkeletonMotion.from_file( - "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", - backend="pytorch", - device="cuda:0", - ) - source_tpose = SkeletonState.from_file( - "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", - backend="pytorch", - device="cuda:0", - ) - - target_tpose = SkeletonState.from_file( - "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", - backend="pytorch", - device="cuda:0", - ) - target_skeleton_tree = target_tpose.skeleton_tree - - joint_mapping = { - "upArm_r": "right_shoulder", - "upArm_l": "left_shoulder", - "loArm_r": "right_elbow", - "loArm_l": "left_elbow", - "upLeg_r": "right_hip", - "upLeg_l": "left_hip", - "loLeg_r": "right_knee", - "loLeg_l": "left_knee", - "foot_r": "right_ankle", - "foot_l": "left_ankle", - "hips": "pelvis", - "neckA": "neck", - "spineA": "abdomen", - } - - rotation_to_target_skeleton = quat_from_angle_axis( - angle=torch.tensor(90.0).float(), - axis=torch.tensor([1, 0, 0]).float(), - degree=True, - ) - - target_motion = source_motion.retarget_to( - joint_mapping=joint_mapping, - source_tpose_local_rotation=source_tpose.local_rotation, - source_tpose_root_translation=source_tpose.root_translation, - target_skeleton_tree=target_skeleton_tree, - target_tpose_local_rotation=target_tpose.local_rotation, - target_tpose_root_translation=target_tpose.root_translation, - rotation_to_target_skeleton=rotation_to_target_skeleton, - scale_to_target_skeleton=0.01, - ) - - target_state = SkeletonState( - target_motion.tensor[800, :], - target_motion.skeleton_tree, - target_motion.is_local, - ) - - skeleton_tree = target_state.skeleton_tree - root_translation = target_state.root_translation - global_translation = target_state.global_translation - - q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) - q[..., 3] = 1.0 - q = torch.from_numpy(q) - max_its = 10000 - - task = Draw3DSkeletonState(task_name="", skeleton_state=target_state) - plotter = Matplotlib3DPlotter(task) - - for i in range(max_its): - r = quat_normalize(q) - s = SkeletonState.from_rotation_and_root_translation( - skeleton_tree, r=r, t=root_translation, is_local=True - ) - print(" quat norm: {}".format(q.norm(p=2, dim=-1).mean().numpy())) - - task.update(s) - plotter.update() - plotter.show() - - -test_grad() \ No newline at end of file diff --git a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/transfer_npy.py b/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/transfer_npy.py deleted file mode 100644 index dfcd96b..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/skeleton/tests/transfer_npy.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import numpy as np -from ...core import Tensor, SO3, Quaternion, Vector3D -from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion - -tpose = np.load( - "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose.npy" -).item() - -local_rotation = SO3.from_numpy(tpose["local_rotation"], dtype="float32") -root_translation = Vector3D.from_numpy(tpose["root_translation"], dtype="float32") -skeleton_tree = tpose["skeleton_tree"] -parent_indices = Tensor.from_numpy(skeleton_tree["parent_indices"], dtype="int32") -local_translation = Vector3D.from_numpy( - skeleton_tree["local_translation"], dtype="float32" -) -node_names = skeleton_tree["node_names"] -skeleton_tree = SkeletonTree(node_names, parent_indices, local_translation) -skeleton_state = SkeletonState.from_rotation_and_root_translation( - skeleton_tree=skeleton_tree, r=local_rotation, t=root_translation, is_local=True -) - -skeleton_state.to_file( - "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose_new.npy" -) diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/__init__.py b/smpl_sim/envs/nv/poselib/poselib/visualization/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/common.py b/smpl_sim/envs/nv/poselib/poselib/visualization/common.py deleted file mode 100644 index 18c20a4..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/common.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os - -from ..core import logger -from .plt_plotter import Matplotlib3DPlotter -from .skeleton_plotter_tasks import Draw3DSkeletonMotion, Draw3DSkeletonState - - -def plot_skeleton_state(skeleton_state, task_name="", elev:float=30, azim:float=35, roll:float=0): - """ - Visualize a skeleton state - - :param skeleton_state: - :param task_name: - :type skeleton_state: SkeletonState - :type task_name: string, optional - """ - logger.info("plotting {}".format(task_name)) - task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state) - plotter = Matplotlib3DPlotter(task, elev=elev, azim=azim, roll=roll) - plotter.show() - - -def plot_skeleton_states(skeleton_state, skip_n=1, task_name=""): - """ - Visualize a sequence of skeleton state. The dimension of the skeleton state must be 1 - - :param skeleton_state: - :param task_name: - :type skeleton_state: SkeletonState - :type task_name: string, optional - """ - logger.info("plotting {} motion".format(task_name)) - assert len(skeleton_state.shape) == 1, "the state must have only one dimension" - task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state[0]) - plotter = Matplotlib3DPlotter(task) - for frame_id in range(skeleton_state.shape[0]): - if frame_id % skip_n != 0: - continue - task.update(skeleton_state[frame_id]) - plotter.update() - plotter.show() - - -def plot_skeleton_motion(skeleton_motion, skip_n=1, task_name=""): - """ - Visualize a skeleton motion along its first dimension. - - :param skeleton_motion: - :param task_name: - :type skeleton_motion: SkeletonMotion - :type task_name: string, optional - """ - logger.info("plotting {} motion".format(task_name)) - task = Draw3DSkeletonMotion( - task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0 - ) - plotter = Matplotlib3DPlotter(task) - for frame_id in range(len(skeleton_motion)): - if frame_id % skip_n != 0: - continue - task.update(frame_id) - plotter.update() - plotter.show() - - -def plot_skeleton_motion_interactive_base(skeleton_motion, task_name=""): - class PlotParams: - def __init__(self, total_num_frames): - self.current_frame = 0 - self.playing = False - self.looping = False - self.confirmed = False - self.playback_speed = 4 - self.total_num_frames = total_num_frames - - def sync(self, other): - self.current_frame = other.current_frame - self.playing = other.playing - self.looping = other.current_frame - self.confirmed = other.confirmed - self.playback_speed = other.playback_speed - self.total_num_frames = other.total_num_frames - - task = Draw3DSkeletonMotion( - task_name=task_name, skeleton_motion=skeleton_motion, frame_index=0 - ) - plotter = Matplotlib3DPlotter(task) - - plot_params = PlotParams(total_num_frames=len(skeleton_motion)) - print("Entered interactive plot - press 'n' to quit, 'h' for a list of commands") - - def press(event): - if event.key == "x": - plot_params.playing = not plot_params.playing - elif event.key == "z": - plot_params.current_frame = plot_params.current_frame - 1 - elif event.key == "c": - plot_params.current_frame = plot_params.current_frame + 1 - elif event.key == "a": - plot_params.current_frame = plot_params.current_frame - 20 - elif event.key == "d": - plot_params.current_frame = plot_params.current_frame + 20 - elif event.key == "w": - plot_params.looping = not plot_params.looping - print("Looping: {}".format(plot_params.looping)) - elif event.key == "v": - plot_params.playback_speed *= 2 - print("playback speed: {}".format(plot_params.playback_speed)) - elif event.key == "b": - if plot_params.playback_speed != 1: - plot_params.playback_speed //= 2 - print("playback speed: {}".format(plot_params.playback_speed)) - elif event.key == "n": - plot_params.confirmed = True - elif event.key == "h": - rows, columns = os.popen("stty size", "r").read().split() - columns = int(columns) - print("=" * columns) - print("x: play/pause") - print("z: previous frame") - print("c: next frame") - print("a: jump 10 frames back") - print("d: jump 10 frames forward") - print("w: looping/non-looping") - print("v: double speed (this can be applied multiple times)") - print("b: half speed (this can be applied multiple times)") - print("n: quit") - print("h: help") - print("=" * columns) - - print( - 'current frame index: {}/{} (press "n" to quit)'.format( - plot_params.current_frame, plot_params.total_num_frames - 1 - ) - ) - - plotter.fig.canvas.mpl_connect("key_press_event", press) - while True: - reset_trail = False - if plot_params.confirmed: - break - if plot_params.playing: - plot_params.current_frame += plot_params.playback_speed - if plot_params.current_frame >= plot_params.total_num_frames: - if plot_params.looping: - plot_params.current_frame %= plot_params.total_num_frames - reset_trail = True - else: - plot_params.current_frame = plot_params.total_num_frames - 1 - if plot_params.current_frame < 0: - if plot_params.looping: - plot_params.current_frame %= plot_params.total_num_frames - reset_trail = True - else: - plot_params.current_frame = 0 - yield plot_params - task.update(plot_params.current_frame, reset_trail) - plotter.update() - - -def plot_skeleton_motion_interactive(skeleton_motion, task_name=""): - """ - Visualize a skeleton motion along its first dimension interactively. - - :param skeleton_motion: - :param task_name: - :type skeleton_motion: SkeletonMotion - :type task_name: string, optional - """ - for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name): - pass - - -def plot_skeleton_motion_interactive_multiple(*callables, sync=True): - for _ in zip(*callables): - if sync: - for p1, p2 in zip(_[:-1], _[1:]): - p2.sync(p1) - - -# def plot_skeleton_motion_interactive_multiple_same(skeleton_motions, task_name=""): - diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/core.py b/smpl_sim/envs/nv/poselib/poselib/visualization/core.py deleted file mode 100644 index 3c7a176..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/core.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -""" -The base abstract classes for plotter and the plotting tasks. It describes how the plotter -deals with the tasks in the general cases -""" -from typing import List - - -class BasePlotterTask(object): - _task_name: str # unique name of the task - _task_type: str # type of the task is used to identify which callable - - def __init__(self, task_name: str, task_type: str) -> None: - self._task_name = task_name - self._task_type = task_type - - @property - def task_name(self): - return self._task_name - - @property - def task_type(self): - return self._task_type - - def get_scoped_name(self, name): - return self._task_name + "/" + name - - def __iter__(self): - """Should override this function to return a list of task primitives - """ - raise NotImplementedError - - -class BasePlotterTasks(object): - def __init__(self, tasks) -> None: - self._tasks = tasks - - def __iter__(self): - for task in self._tasks: - yield from task - - -class BasePlotter(object): - """An abstract plotter which deals with a plotting task. The children class needs to implement - the functions to create/update the objects according to the task given - """ - - _task_primitives: List[BasePlotterTask] - - def __init__(self, task: BasePlotterTask) -> None: - self._task_primitives = [] - self.create(task) - - @property - def task_primitives(self): - return self._task_primitives - - def create(self, task: BasePlotterTask) -> None: - """Create more task primitives from a task for the plotter""" - new_task_primitives = list(task) # get all task primitives - self._task_primitives += new_task_primitives # append them - self._create_impl(new_task_primitives) - - def update(self) -> None: - """Update the plotter for any updates in the task primitives""" - self._update_impl(self._task_primitives) - - def _update_impl(self, task_list: List[BasePlotterTask]) -> None: - raise NotImplementedError - - def _create_impl(self, task_list: List[BasePlotterTask]) -> None: - raise NotImplementedError diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/plt_plotter.py b/smpl_sim/envs/nv/poselib/poselib/visualization/plt_plotter.py deleted file mode 100644 index db578a9..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/plt_plotter.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -""" -The matplotlib plotter implementation for all the primitive tasks (in our case: lines and -dots) -""" -from typing import Any, Callable, Dict, List - -import matplotlib.pyplot as plt -import mpl_toolkits.mplot3d.axes3d as p3 - -import numpy as np - -from .core import BasePlotter, BasePlotterTask - - -class Matplotlib2DPlotter(BasePlotter): - _fig: plt.figure # plt figure - _ax: plt.axis # plt axis - # stores artist objects for each task (task name as the key) - _artist_cache: Dict[str, Any] - # callables for each task primitives - _create_impl_callables: Dict[str, Callable] - _update_impl_callables: Dict[str, Callable] - - def __init__(self, task: "BasePlotterTask") -> None: - fig, ax = plt.subplots() - self._fig = fig - self._ax = ax - self._artist_cache = {} - - self._create_impl_callables = { - "Draw2DLines": self._lines_create_impl, - "Draw2DDots": self._dots_create_impl, - "Draw2DTrail": self._trail_create_impl, - } - self._update_impl_callables = { - "Draw2DLines": self._lines_update_impl, - "Draw2DDots": self._dots_update_impl, - "Draw2DTrail": self._trail_update_impl, - } - self._init_lim() - super().__init__(task) - - @property - def ax(self): - return self._ax - - @property - def fig(self): - return self._fig - - def show(self): - plt.show() - - def _min(self, x, y): - if x is None: - return y - if y is None: - return x - return min(x, y) - - def _max(self, x, y): - if x is None: - return y - if y is None: - return x - return max(x, y) - - def _init_lim(self): - self._curr_x_min = None - self._curr_y_min = None - self._curr_x_max = None - self._curr_y_max = None - - def _update_lim(self, xs, ys): - self._curr_x_min = self._min(np.min(xs), self._curr_x_min) - self._curr_y_min = self._min(np.min(ys), self._curr_y_min) - self._curr_x_max = self._max(np.max(xs), self._curr_x_max) - self._curr_y_max = self._max(np.max(ys), self._curr_y_max) - - def _set_lim(self): - if not ( - self._curr_x_min is None - or self._curr_x_max is None - or self._curr_y_min is None - or self._curr_y_max is None - ): - self._ax.set_xlim(self._curr_x_min, self._curr_x_max) - self._ax.set_ylim(self._curr_y_min, self._curr_y_max) - self._init_lim() - - @staticmethod - def _lines_extract_xy_impl(index, lines_task): - return lines_task[index, :, 0], lines_task[index, :, 1] - - @staticmethod - def _trail_extract_xy_impl(index, trail_task): - return (trail_task[index : index + 2, 0], trail_task[index : index + 2, 1]) - - def _lines_create_impl(self, lines_task): - color = lines_task.color - self._artist_cache[lines_task.task_name] = [ - self._ax.plot( - *Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task), - color=color, - linewidth=lines_task.line_width, - alpha=lines_task.alpha - )[0] - for i in range(len(lines_task)) - ] - - def _lines_update_impl(self, lines_task): - lines_artists = self._artist_cache[lines_task.task_name] - for i in range(len(lines_task)): - artist = lines_artists[i] - xs, ys = Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task) - artist.set_data(xs, ys) - if lines_task.influence_lim: - self._update_lim(xs, ys) - - def _dots_create_impl(self, dots_task): - color = dots_task.color - self._artist_cache[dots_task.task_name] = self._ax.plot( - dots_task[:, 0], - dots_task[:, 1], - c=color, - linestyle="", - marker=".", - markersize=dots_task.marker_size, - alpha=dots_task.alpha, - )[0] - - def _dots_update_impl(self, dots_task): - dots_artist = self._artist_cache[dots_task.task_name] - dots_artist.set_data(dots_task[:, 0], dots_task[:, 1]) - if dots_task.influence_lim: - self._update_lim(dots_task[:, 0], dots_task[:, 1]) - - def _trail_create_impl(self, trail_task): - color = trail_task.color - trail_length = len(trail_task) - 1 - self._artist_cache[trail_task.task_name] = [ - self._ax.plot( - *Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task), - color=trail_task.color, - linewidth=trail_task.line_width, - alpha=trail_task.alpha * (1.0 - i / (trail_length - 1)) - )[0] - for i in range(trail_length) - ] - - def _trail_update_impl(self, trail_task): - trails_artists = self._artist_cache[trail_task.task_name] - for i in range(len(trail_task) - 1): - artist = trails_artists[i] - xs, ys = Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task) - artist.set_data(xs, ys) - if trail_task.influence_lim: - self._update_lim(xs, ys) - - def _create_impl(self, task_list): - for task in task_list: - self._create_impl_callables[task.task_type](task) - self._draw() - - def _update_impl(self, task_list): - for task in task_list: - self._update_impl_callables[task.task_type](task) - self._draw() - - def _set_aspect_equal_2d(self, zero_centered=True): - xlim = self._ax.get_xlim() - ylim = self._ax.get_ylim() - - if not zero_centered: - xmean = np.mean(xlim) - ymean = np.mean(ylim) - else: - xmean = 0 - ymean = 0 - - plot_radius = max( - [ - abs(lim - mean_) - for lims, mean_ in ((xlim, xmean), (ylim, ymean)) - for lim in lims - ] - ) - - self._ax.set_xlim([xmean - plot_radius, xmean + plot_radius]) - self._ax.set_ylim([ymean - plot_radius, ymean + plot_radius]) - - def _draw(self): - self._set_lim() - self._set_aspect_equal_2d() - self._fig.canvas.draw() - self._fig.canvas.flush_events() - plt.pause(0.00001) - - -class Matplotlib3DPlotter(BasePlotter): - _fig: plt.figure # plt figure - _ax: p3.Axes3D # plt 3d axis - # stores artist objects for each task (task name as the key) - _artist_cache: Dict[str, Any] - # callables for each task primitives - _create_impl_callables: Dict[str, Callable] - _update_impl_callables: Dict[str, Callable] - - def __init__(self, task: "BasePlotterTask", - elev:float=30, azim:float=35, roll:float=0) -> None: - self._fig = plt.figure() - # updated to more recent matplotlib versions - self._ax = self._fig.add_subplot(projection='3d') - self._ax.view_init(elev=elev, azim=azim, roll=roll) - self._artist_cache = {} - - self._create_impl_callables = { - "Draw3DLines": self._lines_create_impl, - "Draw3DDots": self._dots_create_impl, - "Draw3DTrail": self._trail_create_impl, - } - self._update_impl_callables = { - "Draw3DLines": self._lines_update_impl, - "Draw3DDots": self._dots_update_impl, - "Draw3DTrail": self._trail_update_impl, - } - self._init_lim() - super().__init__(task) - - @property - def ax(self): - return self._ax - - @property - def fig(self): - return self._fig - - def show(self): - plt.show() - - def _min(self, x, y): - if x is None: - return y - if y is None: - return x - return min(x, y) - - def _max(self, x, y): - if x is None: - return y - if y is None: - return x - return max(x, y) - - def _init_lim(self): - self._curr_x_min = None - self._curr_y_min = None - self._curr_z_min = None - self._curr_x_max = None - self._curr_y_max = None - self._curr_z_max = None - - def _update_lim(self, xs, ys, zs): - self._curr_x_min = self._min(np.min(xs), self._curr_x_min) - self._curr_y_min = self._min(np.min(ys), self._curr_y_min) - self._curr_z_min = self._min(np.min(zs), self._curr_z_min) - self._curr_x_max = self._max(np.max(xs), self._curr_x_max) - self._curr_y_max = self._max(np.max(ys), self._curr_y_max) - self._curr_z_max = self._max(np.max(zs), self._curr_z_max) - - def _set_lim(self): - if not ( - self._curr_x_min is None - or self._curr_x_max is None - or self._curr_y_min is None - or self._curr_y_max is None - or self._curr_z_min is None - or self._curr_z_max is None - ): - self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max) - self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max) - self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max) - self._init_lim() - - @staticmethod - def _lines_extract_xyz_impl(index, lines_task): - return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2] - - @staticmethod - def _trail_extract_xyz_impl(index, trail_task): - return ( - trail_task[index : index + 2, 0], - trail_task[index : index + 2, 1], - trail_task[index : index + 2, 2], - ) - - def _lines_create_impl(self, lines_task): - color = lines_task.color - self._artist_cache[lines_task.task_name] = [ - self._ax.plot( - *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task), - color=color, - linewidth=lines_task.line_width, - alpha=lines_task.alpha - )[0] - for i in range(len(lines_task)) - ] - - def _lines_update_impl(self, lines_task): - lines_artists = self._artist_cache[lines_task.task_name] - for i in range(len(lines_task)): - artist = lines_artists[i] - xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task) - artist.set_data(xs, ys) - artist.set_3d_properties(zs) - if lines_task.influence_lim: - self._update_lim(xs, ys, zs) - - def _dots_create_impl(self, dots_task): - color = dots_task.color - self._artist_cache[dots_task.task_name] = self._ax.plot( - dots_task[:, 0], - dots_task[:, 1], - dots_task[:, 2], - c=color, - linestyle="", - marker=".", - markersize=dots_task.marker_size, - alpha=dots_task.alpha, - )[0] - - def _dots_update_impl(self, dots_task): - dots_artist = self._artist_cache[dots_task.task_name] - dots_artist.set_data(dots_task[:, 0], dots_task[:, 1]) - dots_artist.set_3d_properties(dots_task[:, 2]) - if dots_task.influence_lim: - self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2]) - - def _trail_create_impl(self, trail_task): - color = trail_task.color - trail_length = len(trail_task) - 1 - self._artist_cache[trail_task.task_name] = [ - self._ax.plot( - *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task), - color=trail_task.color, - linewidth=trail_task.line_width, - alpha=trail_task.alpha * (1.0 - i / (trail_length - 1)) - )[0] - for i in range(trail_length) - ] - - def _trail_update_impl(self, trail_task): - trails_artists = self._artist_cache[trail_task.task_name] - for i in range(len(trail_task) - 1): - artist = trails_artists[i] - xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task) - artist.set_data(xs, ys) - artist.set_3d_properties(zs) - if trail_task.influence_lim: - self._update_lim(xs, ys, zs) - - def _create_impl(self, task_list): - for task in task_list: - self._create_impl_callables[task.task_type](task) - self._draw() - - def _update_impl(self, task_list): - for task in task_list: - self._update_impl_callables[task.task_type](task) - self._draw() - - def _set_aspect_equal_3d(self): - xlim = self._ax.get_xlim3d() - ylim = self._ax.get_ylim3d() - zlim = self._ax.get_zlim3d() - - xmean = np.mean(xlim) - ymean = np.mean(ylim) - zmean = np.mean(zlim) - - plot_radius = max( - [ - abs(lim - mean_) - for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean)) - for lim in lims - ] - ) - - self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius]) - self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius]) - self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius]) - - def _draw(self): - self._set_lim() - self._set_aspect_equal_3d() - self._fig.canvas.draw() - self._ax.set_xlabel("x") - self._ax.set_ylabel("y") - self._ax.set_zlabel("z") - self._fig.canvas.flush_events() - plt.pause(0.00001) diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/simple_plotter_tasks.py b/smpl_sim/envs/nv/poselib/poselib/visualization/simple_plotter_tasks.py deleted file mode 100644 index fec4c88..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/simple_plotter_tasks.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -""" -This is where all the task primitives are defined -""" -import numpy as np - -from .core import BasePlotterTask - - -class DrawXDLines(BasePlotterTask): - _lines: np.ndarray - _color: str - _line_width: int - _alpha: float - _influence_lim: bool - - def __init__( - self, - task_name: str, - lines: np.ndarray, - color: str = "blue", - line_width: int = 2, - alpha: float = 1.0, - influence_lim: bool = True, - ) -> None: - super().__init__(task_name=task_name, task_type=self.__class__.__name__) - self._color = color - self._line_width = line_width - self._alpha = alpha - self._influence_lim = influence_lim - self.update(lines) - - @property - def influence_lim(self) -> bool: - return self._influence_lim - - @property - def raw_data(self): - return self._lines - - @property - def color(self): - return self._color - - @property - def line_width(self): - return self._line_width - - @property - def alpha(self): - return self._alpha - - @property - def dim(self): - raise NotImplementedError - - @property - def name(self): - return "{}DLines".format(self.dim) - - def update(self, lines): - self._lines = np.array(lines) - shape = self._lines.shape - assert shape[-1] == self.dim and shape[-2] == 2 and len(shape) == 3 - - def __getitem__(self, index): - return self._lines[index] - - def __len__(self): - return self._lines.shape[0] - - def __iter__(self): - yield self - - -class DrawXDDots(BasePlotterTask): - _dots: np.ndarray - _color: str - _marker_size: int - _alpha: float - _influence_lim: bool - - def __init__( - self, - task_name: str, - dots: np.ndarray, - color: str = "blue", - marker_size: int = 10, - alpha: float = 1.0, - influence_lim: bool = True, - ) -> None: - super().__init__(task_name=task_name, task_type=self.__class__.__name__) - self._color = color - self._marker_size = marker_size - self._alpha = alpha - self._influence_lim = influence_lim - self.update(dots) - - def update(self, dots): - self._dots = np.array(dots) - shape = self._dots.shape - assert shape[-1] == self.dim and len(shape) == 2 - - def __getitem__(self, index): - return self._dots[index] - - def __len__(self): - return self._dots.shape[0] - - def __iter__(self): - yield self - - @property - def influence_lim(self) -> bool: - return self._influence_lim - - @property - def raw_data(self): - return self._dots - - @property - def color(self): - return self._color - - @property - def marker_size(self): - return self._marker_size - - @property - def alpha(self): - return self._alpha - - @property - def dim(self): - raise NotImplementedError - - @property - def name(self): - return "{}DDots".format(self.dim) - - -class DrawXDTrail(DrawXDDots): - @property - def line_width(self): - return self.marker_size - - @property - def name(self): - return "{}DTrail".format(self.dim) - - -class Draw2DLines(DrawXDLines): - @property - def dim(self): - return 2 - - -class Draw3DLines(DrawXDLines): - @property - def dim(self): - return 3 - - -class Draw2DDots(DrawXDDots): - @property - def dim(self): - return 2 - - -class Draw3DDots(DrawXDDots): - @property - def dim(self): - return 3 - - -class Draw2DTrail(DrawXDTrail): - @property - def dim(self): - return 2 - - -class Draw3DTrail(DrawXDTrail): - @property - def dim(self): - return 3 - diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/skeleton_plotter_tasks.py b/smpl_sim/envs/nv/poselib/poselib/visualization/skeleton_plotter_tasks.py deleted file mode 100644 index 637497b..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/skeleton_plotter_tasks.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -""" -This is where all skeleton related complex tasks are defined (skeleton state and skeleton -motion) -""" -import numpy as np - -from .core import BasePlotterTask -from .simple_plotter_tasks import Draw3DDots, Draw3DLines, Draw3DTrail - - -class Draw3DSkeletonState(BasePlotterTask): - _lines_task: Draw3DLines # sub-task for drawing lines - _dots_task: Draw3DDots # sub-task for drawing dots - - def __init__( - self, - task_name: str, - skeleton_state, - joints_color: str = "red", - lines_color: str = "blue", - alpha=1.0, - ) -> None: - super().__init__(task_name=task_name, task_type="3DSkeletonState") - lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state) - self._lines_task = Draw3DLines( - self.get_scoped_name("bodies"), lines, joints_color, alpha=alpha - ) - self._dots_task = Draw3DDots( - self.get_scoped_name("joints"), dots, lines_color, alpha=alpha - ) - - @property - def name(self): - return "3DSkeleton" - - def update(self, skeleton_state) -> None: - self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state)) - - @staticmethod - def _get_lines_and_dots(skeleton_state): - """Get all the lines and dots needed to draw the skeleton state - """ - assert ( - len(skeleton_state.tensor.shape) == 1 - ), "the state has to be zero dimensional" - dots = skeleton_state.global_translation.numpy() - skeleton_tree = skeleton_state.skeleton_tree - parent_indices = skeleton_tree.parent_indices.numpy() - lines = [] - for node_index in range(len(skeleton_tree)): - parent_index = parent_indices[node_index] - if parent_index != -1: - lines.append([dots[node_index], dots[parent_index]]) - lines = np.array(lines) - return lines, dots - - def _update(self, lines, dots) -> None: - self._lines_task.update(lines) - self._dots_task.update(dots) - - def __iter__(self): - yield from self._lines_task - yield from self._dots_task - - -class Draw3DSkeletonMotion(BasePlotterTask): - def __init__( - self, - task_name: str, - skeleton_motion, - frame_index=None, - joints_color="red", - lines_color="blue", - velocity_color="green", - angular_velocity_color="purple", - trail_color="black", - trail_length=10, - alpha=1.0, - ) -> None: - super().__init__(task_name=task_name, task_type="3DSkeletonMotion") - self._trail_length = trail_length - self._skeleton_motion = skeleton_motion - # if frame_index is None: - curr_skeleton_motion = self._skeleton_motion.clone() - if frame_index is not None: - curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :] - # else: - # curr_skeleton_motion = self._skeleton_motion[frame_index, :] - self._skeleton_state_task = Draw3DSkeletonState( - self.get_scoped_name("skeleton_state"), - curr_skeleton_motion, - joints_color=joints_color, - lines_color=lines_color, - alpha=alpha, - ) - vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel( - curr_skeleton_motion - ) - self._com_pos = curr_skeleton_motion.root_translation.numpy()[ - np.newaxis, ... - ].repeat(trail_length, axis=0) - self._vel_task = Draw3DLines( - self.get_scoped_name("velocity"), - vel_lines, - velocity_color, - influence_lim=False, - alpha=alpha, - ) - self._avel_task = Draw3DLines( - self.get_scoped_name("angular_velocity"), - avel_lines, - angular_velocity_color, - influence_lim=False, - alpha=alpha, - ) - self._com_trail_task = Draw3DTrail( - self.get_scoped_name("com_trail"), - self._com_pos, - trail_color, - marker_size=2, - influence_lim=True, - alpha=alpha, - ) - - @property - def name(self): - return "3DSkeletonMotion" - - def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None: - if skeleton_motion is not None: - self._skeleton_motion = skeleton_motion - - curr_skeleton_motion = self._skeleton_motion.clone() - if frame_index is not None: - curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :] - if reset_trail: - self._com_pos = curr_skeleton_motion.root_translation.numpy()[ - np.newaxis, ... - ].repeat(self._trail_length, axis=0) - else: - self._com_pos = np.concatenate( - ( - curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...], - self._com_pos[:-1], - ), - axis=0, - ) - self._skeleton_state_task.update(curr_skeleton_motion) - self._com_trail_task.update(self._com_pos) - self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion)) - - @staticmethod - def _get_vel_and_avel(skeleton_motion): - """Get all the velocity and angular velocity lines - """ - pos = skeleton_motion.global_translation.numpy() - vel = skeleton_motion.global_velocity.numpy() - avel = skeleton_motion.global_angular_velocity.numpy() - - vel_lines = np.stack((pos, pos + vel * 0.02), axis=1) - avel_lines = np.stack((pos, pos + avel * 0.01), axis=1) - return vel_lines, avel_lines - - def _update(self, vel_lines, avel_lines) -> None: - self._vel_task.update(vel_lines) - self._avel_task.update(avel_lines) - - def __iter__(self): - yield from self._skeleton_state_task - yield from self._vel_task - yield from self._avel_task - yield from self._com_trail_task - - -class Draw3DSkeletonMotions(BasePlotterTask): - def __init__(self, skeleton_motion_tasks) -> None: - self._skeleton_motion_tasks = skeleton_motion_tasks - - @property - def name(self): - return "3DSkeletonMotions" - - def update(self, frame_index) -> None: - list(map(lambda x: x.update(frame_index), self._skeleton_motion_tasks)) - - def __iter__(self): - yield from self._skeleton_state_tasks diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/tests/__init__.py b/smpl_sim/envs/nv/poselib/poselib/visualization/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/smpl_sim/envs/nv/poselib/poselib/visualization/tests/test_plotter.py b/smpl_sim/envs/nv/poselib/poselib/visualization/tests/test_plotter.py deleted file mode 100644 index 7ef1fb6..0000000 --- a/smpl_sim/envs/nv/poselib/poselib/visualization/tests/test_plotter.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import cast - -import matplotlib.pyplot as plt -import numpy as np - -from ..core import BasePlotterTask, BasePlotterTasks -from ..plt_plotter import Matplotlib3DPlotter -from ..simple_plotter_tasks import Draw3DDots, Draw3DLines - -task = Draw3DLines(task_name="test", - lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue") -task2 = Draw3DDots(task_name="test2", - dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red") -task3 = BasePlotterTasks([task, task2]) -plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3)) -plt.show() diff --git a/smpl_sim/envs/nv/poselib/setup.py b/smpl_sim/envs/nv/poselib/setup.py deleted file mode 100644 index c041197..0000000 --- a/smpl_sim/envs/nv/poselib/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -from setuptools import setup - -setup( - name="poselib", - packages=["poselib"], - version="0.0.42", - description="Framework Agnostic Tensor Programming", - author="Qiyang Li, Kelly Guo, Brendon Matusch", - classifiers=[ - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: GNU General Public License (GPL)", - "Operating System :: OS Independent", - "Development Status :: 1 - Planning", - "Environment :: Console", - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering :: GIS", - ], -) diff --git a/smpl_sim/poselib/skeleton/skeleton3d.py b/smpl_sim/poselib/skeleton/skeleton3d.py index 8602a55..3f00951 100755 --- a/smpl_sim/poselib/skeleton/skeleton3d.py +++ b/smpl_sim/poselib/skeleton/skeleton3d.py @@ -15,7 +15,7 @@ from ..core import * from .backend.fbx.fbx_read_wrapper import fbx_to_array -import scipy.ndimage.filters as filters +import scipy.ndimage as filters class SkeletonTree(Serializable): diff --git a/smpl_sim/smpllib/smpl_local_robot.py b/smpl_sim/smpllib/smpl_local_robot.py index 5d976d8..31701cf 100644 --- a/smpl_sim/smpllib/smpl_local_robot.py +++ b/smpl_sim/smpllib/smpl_local_robot.py @@ -1973,7 +1973,6 @@ def get_gnn_edges(self): params_names = smpl_robot.get_params(get_name=True) betas = torch.zeros(1, 16) - betas[0] = 2 gender = [0] t0 = time.time() params = smpl_robot.get_params() diff --git a/test.xml b/test.xml index 5d8cb3e..5d1d27a 100644 --- a/test.xml +++ b/test.xml @@ -14,191 +14,191 @@ - + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + @@ -206,113 +206,113 @@ - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + - + - - + + - - + + - + From c2701839869f320b1dc73100d131c6a160cc7d1e Mon Sep 17 00:00:00 2001 From: zhengyiluo Date: Thu, 28 Nov 2024 19:39:12 -0500 Subject: [PATCH 2/4] update eval functions --- smpl_sim/smpllib/smpl_eval.py | 44 ++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/smpl_sim/smpllib/smpl_eval.py b/smpl_sim/smpllib/smpl_eval.py index 6d803bd..da69c06 100644 --- a/smpl_sim/smpllib/smpl_eval.py +++ b/smpl_sim/smpllib/smpl_eval.py @@ -19,9 +19,44 @@ from smpl_sim.utils.transformation import euler_from_quaternion, quaternion_matrix from smpl_sim.utils.math_utils import * import copy +from scipy.spatial.transform import Rotation as sRot +def compute_metrics_obj(pred_pos_all, gt_pos_all, pred_rot_all, gt_rot_all, root_idx = 0, use_tqdm = True, concatenate = True): + # Rotation expect xyzw + metrics = defaultdict(list) + if use_tqdm: + pbar = tqdm(range(len(pred_pos_all))) + else: + pbar = range(len(pred_pos_all)) + + assert(len(pred_pos_all) == len(pred_rot_all)) + + for idx in pbar: + jpos_pred = pred_pos_all[idx].copy() + jpos_gt = gt_pos_all[idx].copy() + rot_pred = pred_rot_all[idx].copy() + rot_gt = gt_rot_all[idx].copy() + mpjpe_g = np.linalg.norm(jpos_gt - jpos_pred, axis=2) * 1000 + + vel_dist = (compute_error_vel(jpos_pred, jpos_gt)) * 1000 + accel_dist = (compute_error_accel(jpos_pred, jpos_gt)) * 1000 -def compute_metrics_lite(pred_pos_all, gt_pos_all, root_idx = 0, use_tqdm = True, concatenate = True): + jpos_pred = jpos_pred - jpos_pred[:, [root_idx]] # zero out root + jpos_gt = jpos_gt - jpos_gt[:, [root_idx]] + rot_error = np.linalg.norm((sRot.from_quat(rot_gt.reshape(-1, 4)) * sRot.from_quat(rot_pred.reshape(-1, 4)).inv()).as_rotvec(), axis = -1) + metrics["TTR"].append(mpjpe_g < 120) + metrics["mpjpe_g"].append(mpjpe_g) + metrics["rot_error"].append(rot_error) + metrics["accel_dist"].append(accel_dist) + metrics["vel_dist"].append(vel_dist) + if concatenate: + metrics = {k:np.concatenate(v) for k, v in metrics.items()} + return metrics + + + +def compute_metrics_lite(pred_pos_all, gt_pos_all, pred_rot_all=None, gt_rot_all=None, root_idx = 0, use_tqdm = True, concatenate = True): + # Rotation expect xyzw metrics = defaultdict(list) if use_tqdm: pbar = tqdm(range(len(pred_pos_all))) @@ -31,6 +66,8 @@ def compute_metrics_lite(pred_pos_all, gt_pos_all, root_idx = 0, use_tqdm = True for idx in pbar: jpos_pred = pred_pos_all[idx].copy() jpos_gt = gt_pos_all[idx].copy() + rot_pred = pred_rot_all[idx].copy() if pred_rot_all is not None else None + rot_gt = gt_rot_all[idx].copy() if gt_rot_all is not None else None mpjpe_g = np.linalg.norm(jpos_gt - jpos_pred, axis=2) * 1000 @@ -42,8 +79,12 @@ def compute_metrics_lite(pred_pos_all, gt_pos_all, root_idx = 0, use_tqdm = True pa_mpjpe = p_mpjpe(jpos_pred, jpos_gt) * 1000 mpjpe = np.linalg.norm(jpos_pred - jpos_gt, axis=2)* 1000 + if rot_pred is not None and rot_gt is not None: + rot_error = np.linalg.norm((sRot.from_quat(rot_gt.reshape(-1, 4)) * sRot.from_quat(rot_pred.reshape(-1, 4)).inv()).as_rotvec(), axis = -1) metrics["mpjpe_g"].append(mpjpe_g) + if rot_pred is not None and rot_gt is not None: + metrics["rot_error"].append(rot_error) metrics["mpjpe_l"].append(mpjpe) metrics["mpjpe_pa"].append(pa_mpjpe) metrics["accel_dist"].append(accel_dist) @@ -53,6 +94,7 @@ def compute_metrics_lite(pred_pos_all, gt_pos_all, root_idx = 0, use_tqdm = True return metrics + def p_mpjpe(predicted, target): """ Pose error: MPJPE after rigid alignment (scale, rotation, and translation), From 1329c13a5b916f787d18e1859037ef22de858074 Mon Sep 17 00:00:00 2001 From: Howard Nguyen-Huu Date: Wed, 4 Dec 2024 21:24:52 -0500 Subject: [PATCH 3/4] replace deprecated mjv_makeConnector call --- requirement.txt | 2 +- smpl_sim/utils/mujoco_utils.py | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/requirement.txt b/requirement.txt index e1c9254..2f20e53 100644 --- a/requirement.txt +++ b/requirement.txt @@ -12,7 +12,7 @@ opencv-python==4.6.0.66 tqdm pyyaml wandb -mujoco +mujoco>=2.3.7 scikit-image gym git+https://github.com/ZhengyiLuo/smplx.git@master diff --git a/smpl_sim/utils/mujoco_utils.py b/smpl_sim/utils/mujoco_utils.py index 51f3969..87bf065 100644 --- a/smpl_sim/utils/mujoco_utils.py +++ b/smpl_sim/utils/mujoco_utils.py @@ -2,6 +2,7 @@ import mujoco from scipy.spatial.transform import Rotation as sRot + def get_body_qposaddr(model): # adapted to mujoco 2.3+ body_qposaddr = dict() @@ -23,6 +24,7 @@ def get_body_qposaddr(model): body_qposaddr[body_name] = (start_qposaddr, end_qposaddr) return body_qposaddr + def get_body_qveladdr(model): # adapted to mujoco 2.3+ body_qveladdr = dict() @@ -42,6 +44,7 @@ def get_body_qveladdr(model): body_qveladdr[body_name] = (start_qveladdr, end_qveladdr) return body_qveladdr + def get_jnt_range(model): jnt_range = dict() for i in range(model.njnt): @@ -53,6 +56,7 @@ def get_jnt_range(model): jnt_range[name] = model.jnt_range[i] return jnt_range + def get_actuator_names(model): actuators = [] for i in range(model.nu): @@ -76,25 +80,23 @@ def add_visual_capsule(scene, point1, point2, radius, rgba): if scene.ngeom >= scene.maxgeom: return scene.ngeom += 1 # increment ngeom - # initialise a new capsule, add it to the scene using mjv_makeConnector + # initialise a new capsule, add it to the scene using mjv_connector mujoco.mjv_initGeom(scene.geoms[scene.ngeom-1], mujoco.mjtGeom.mjGEOM_CAPSULE, np.zeros(3), np.zeros(3), np.zeros(9), rgba.astype(np.float32)) - mujoco.mjv_makeConnector(scene.geoms[scene.ngeom-1], + mujoco.mjv_connector(scene.geoms[scene.ngeom-1], mujoco.mjtGeom.mjGEOM_CAPSULE, radius, - point1[0], point1[1], point1[2], - point2[0], point2[1], point2[2]) + point1, point2) def add_visual_rbox(scene, point1, point2, rgba): """Adds one rectangle to an mjvScene.""" if scene.ngeom >= scene.maxgeom: return scene.ngeom += 1 # increment ngeom - # initialise a new capsule, add it to the scene using mjv_makeConnector + # initialise a new capsule, add it to the scene using mjv_connector mujoco.mjv_initGeom(scene.geoms[scene.ngeom-1], mujoco.mjtGeom.mjGEOM_BOX, np.zeros(3), np.zeros(3), np.zeros(9), rgba.astype(np.float32)) - mujoco.mjv_makeConnector(scene.geoms[scene.ngeom-1], + mujoco.mjv_connector(scene.geoms[scene.ngeom-1], mujoco.mjtGeom.mjGEOM_BOX, 0.01, - point1[0], point1[1], point1[2], - point2[0], point2[1], point2[2]) \ No newline at end of file + point1, point2) From 79761e502298b8b1d4f518419783df89ad7b4880 Mon Sep 17 00:00:00 2001 From: Howard Nguyen-Huu <55764261+howird@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:38:29 -0500 Subject: [PATCH 4/4] fix: get_mesh_offsets should only be provided with arg, v_template, when the model is "smplx" --- smpl_sim/smpllib/smpl_local_robot.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/smpl_sim/smpllib/smpl_local_robot.py b/smpl_sim/smpllib/smpl_local_robot.py index 31701cf..5ff6915 100644 --- a/smpl_sim/smpllib/smpl_local_robot.py +++ b/smpl_sim/smpllib/smpl_local_robot.py @@ -1244,6 +1244,8 @@ def __init__(self, cfg, data_dir="data/smpl"): flat_hand_mean=True, num_betas=20, ) + elif self.smpl_model == "mano": + raise NotImplementedError("mano SMPL_Robot not implemented yet") self.load_from_skeleton() self.joint_names = [b.name for b in self.bodies] @@ -1330,11 +1332,11 @@ def load_from_skeleton( self.model_dirs.append(f"/tmp/smpl/{uuid.uuid4()}") self.skeleton = SkeletonMesh(self.model_dirs[-1]) - if self.smpl_model in ["smpl"]: + if self.smpl_model == "smpl": zero_pose = torch.zeros((1, 72)) - elif self.smpl_model in ["smpl", "smplh", "smplx"]: + elif self.smpl_model in ["smplh", "smplx"]: zero_pose = torch.zeros((1, 156)) - elif self.smpl_model in ["mano"]: + elif self.smpl_model == "mano": zero_pose = torch.zeros((1, 48)) if self.upright_start: @@ -1351,7 +1353,11 @@ def load_from_skeleton( joint_range, contype, conaffinity, - ) = (smpl_parser.get_mesh_offsets(zero_pose=zero_pose, v_template = v_template, betas=self.beta, flatfoot=self.flatfoot) ) + ) = ( + smpl_parser.get_mesh_offsets(zero_pose=zero_pose, v_template=v_template, betas=self.beta, flatfoot=self.flatfoot) + if self.smpl_model == "smplx" else + smpl_parser.get_mesh_offsets(zero_pose=zero_pose, betas=self.beta, flatfoot=self.flatfoot) + ) if self.rel_joint_lm: if self.upright_start: