Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__init__.py +2 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/sapien_env.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/scene.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/minimal_template.py +66 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/sapien_env.py +1357 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/scene.py +1163 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/__init__.py +11 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__init__.py +3 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/rotate_single_object_in_hand.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/rotate_valve.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/rotate_single_object_in_hand.py +373 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/rotate_valve.py +312 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/empty_env.py +51 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/fmb/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/fmb/__pycache__/fmb.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/__init__.py +3 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/humanoid_stand.py +129 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/transport_box.py +311 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/mobile_manipulation/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/mobile_manipulation/open_cabinet_drawer.py +366 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/rotate_cube.py +403 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/template.py +205 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/bowl_on_rack.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/grasp_bowl_v0.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/pull_cube_tool.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/stack_mug_on_rack.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/assets/data.py +216 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/_mjcf_loader.py +921 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/actor_builder.py +368 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/ground.py +107 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/mjcf_loader.py +113 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/urdf_loader.py +123 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__init__.py +1 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/bounding_cylinder.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/geometry.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/rotation_conversions.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/trimesh_utils.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/bounding_cylinder.py +137 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/geometry.py +201 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/rotation_conversions.py +633 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/trimesh_utils.py +119 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__init__.py +1 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/registration.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/scene_builder.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/ai2thor/__init__.py +6 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/ai2thor/__pycache__/__init__.cpython-310.pyc +0 -0
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .scenes import *
|
| 2 |
+
from .tasks import *
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (242 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/sapien_env.cpython-310.pyc
ADDED
|
Binary file (47.6 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/__pycache__/scene.cpython-310.pyc
ADDED
|
Binary file (30.1 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/minimal_template.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from mani_skill.agents.robots import Fetch, Panda
|
| 8 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 9 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 10 |
+
from mani_skill.utils import common, sapien_utils
|
| 11 |
+
from mani_skill.utils.registration import register_env
|
| 12 |
+
from mani_skill.utils.structs.types import SimConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@register_env("CustomEnv-v1", max_episode_steps=200)
|
| 16 |
+
class CustomEnv(BaseEnv):
|
| 17 |
+
|
| 18 |
+
SUPPORTED_ROBOTS = ["panda", "fetch"]
|
| 19 |
+
agent: Union[Panda, Fetch]
|
| 20 |
+
|
| 21 |
+
def __init__(self, *args, robot_uids="panda", robot_init_qpos_noise=0.02, **kwargs):
|
| 22 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 23 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 24 |
+
|
| 25 |
+
@property
|
| 26 |
+
def _default_sim_config(self):
|
| 27 |
+
return SimConfig()
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def _default_sensor_configs(self):
|
| 31 |
+
pose = sapien_utils.look_at(eye=[0.3, 0, 0.6], target=[-0.1, 0, 0.1])
|
| 32 |
+
return [
|
| 33 |
+
CameraConfig("base_camera", pose=pose, width=128, height=128, fov=np.pi / 2)
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def _default_human_render_camera_configs(self):
|
| 38 |
+
pose = sapien_utils.look_at([0.6, 0.7, 0.6], [0.0, 0.0, 0.35])
|
| 39 |
+
return CameraConfig("render_camera", pose=pose, width=512, height=512, fov=1)
|
| 40 |
+
|
| 41 |
+
def _load_agent(self, options: dict):
|
| 42 |
+
super()._load_agent(options, sapien.Pose(p=[0, 0, 0]))
|
| 43 |
+
|
| 44 |
+
def _load_scene(self, options: dict):
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
def evaluate(self):
|
| 51 |
+
return {
|
| 52 |
+
"success": torch.zeros(self.num_envs, device=self.device, dtype=bool),
|
| 53 |
+
"fail": torch.zeros(self.num_envs, device=self.device, dtype=bool),
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
def _get_obs_extra(self, info: Dict):
|
| 57 |
+
return dict()
|
| 58 |
+
|
| 59 |
+
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 60 |
+
return torch.zeros(self.num_envs, device=self.device)
|
| 61 |
+
|
| 62 |
+
def compute_normalized_dense_reward(
|
| 63 |
+
self, obs: Any, action: torch.Tensor, info: Dict
|
| 64 |
+
):
|
| 65 |
+
max_reward = 1.0
|
| 66 |
+
return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/sapien_env.py
ADDED
|
@@ -0,0 +1,1357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import gc
|
| 3 |
+
import os
|
| 4 |
+
from functools import cached_property
|
| 5 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import dacite
|
| 8 |
+
import gymnasium as gym
|
| 9 |
+
import numpy as np
|
| 10 |
+
import sapien
|
| 11 |
+
import sapien.physx as physx
|
| 12 |
+
import sapien.render
|
| 13 |
+
import sapien.utils.viewer.control_window
|
| 14 |
+
import torch
|
| 15 |
+
from gymnasium.vector.utils import batch_space
|
| 16 |
+
|
| 17 |
+
from mani_skill import PACKAGE_ASSET_DIR, logger
|
| 18 |
+
from mani_skill.agents import REGISTERED_AGENTS
|
| 19 |
+
from mani_skill.agents.base_agent import BaseAgent
|
| 20 |
+
from mani_skill.agents.multi_agent import MultiAgent
|
| 21 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 22 |
+
from mani_skill.envs.utils.observations import (
|
| 23 |
+
parse_obs_mode_to_struct,
|
| 24 |
+
sensor_data_to_pointcloud,
|
| 25 |
+
)
|
| 26 |
+
from mani_skill.envs.utils.randomization.batched_rng import BatchedRNG
|
| 27 |
+
from mani_skill.envs.utils.system.backend import parse_sim_and_render_backend
|
| 28 |
+
from mani_skill.sensors.base_sensor import BaseSensor, BaseSensorConfig
|
| 29 |
+
from mani_skill.sensors.camera import (
|
| 30 |
+
Camera,
|
| 31 |
+
CameraConfig,
|
| 32 |
+
parse_camera_configs,
|
| 33 |
+
update_camera_configs_from_dict,
|
| 34 |
+
)
|
| 35 |
+
from mani_skill.sensors.depth_camera import StereoDepthCamera, StereoDepthCameraConfig
|
| 36 |
+
from mani_skill.utils import common, gym_utils, sapien_utils
|
| 37 |
+
from mani_skill.utils.structs import Actor, Articulation
|
| 38 |
+
from mani_skill.utils.structs.pose import Pose
|
| 39 |
+
from mani_skill.utils.structs.types import Array, SimConfig
|
| 40 |
+
from mani_skill.utils.visualization.misc import tile_images
|
| 41 |
+
from mani_skill.viewer import create_viewer
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class BaseEnv(gym.Env):
|
| 45 |
+
"""Superclass for ManiSkill environments.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
num_envs: number of parallel environments to run. By default this is 1, which means a CPU simulation is used. If greater than 1,
|
| 49 |
+
then we initialize the GPU simulation setup. Note that not all environments are faster when simulated on the GPU due to limitations of
|
| 50 |
+
GPU simulations. For example, environments with many moving objects are better simulated by parallelizing across CPUs.
|
| 51 |
+
|
| 52 |
+
obs_mode: observation mode to be used. Must be one of ("state", "state_dict", "none", "sensor_data", "rgb", "depth", "segmentation", "rgbd", "rgb+depth", "rgb+depth+segmentation", "rgb+segmentation", "depth+segmentation", "pointcloud")
|
| 53 |
+
The obs_mode is mostly for convenience to automatically optimize/setup all sensors/cameras for the given observation mode to render the correct data and try to ignore unecesary rendering.
|
| 54 |
+
For the most advanced use cases (e.g. you have 1 RGB only camera and 1 depth only camera)
|
| 55 |
+
|
| 56 |
+
reward_mode: reward mode to use. Must be one of ("normalized_dense", "dense", "sparse", "none"). With "none" the reward returned is always 0
|
| 57 |
+
|
| 58 |
+
control_mode: control mode of the agent.
|
| 59 |
+
"*" represents all registered controllers, and the action space will be a dict.
|
| 60 |
+
|
| 61 |
+
render_mode: render mode registered in @SUPPORTED_RENDER_MODES.
|
| 62 |
+
|
| 63 |
+
shader_dir (Optional[str]): shader directory. Defaults to None.
|
| 64 |
+
Setting this will override the shader used for all cameras in the environment. This is legacy behavior kept for backwards compatibility.
|
| 65 |
+
The proper way to change the shaders used for cameras is to either change the environment code or pass in sensor_configs/human_render_camera_configs with the desired shaders.
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
Previously the options are "default", "rt", "rt-fast". "rt" means ray-tracing which results
|
| 69 |
+
in more photorealistic renders but is slow, "rt-fast" is a lower quality but faster version of "rt".
|
| 70 |
+
|
| 71 |
+
enable_shadow (bool): whether to enable shadow for lights. Defaults to False.
|
| 72 |
+
|
| 73 |
+
sensor_configs (dict): configurations of sensors to override any environment defaults.
|
| 74 |
+
If the key is one of sensor names (e.g. a camera), the config value will be applied to the corresponding sensor.
|
| 75 |
+
Otherwise, the value will be applied to all sensors (but overridden by sensor-specific values). For possible configurations
|
| 76 |
+
see the documentation see :doc:`the sensors documentation </user_guide/tutorials/sensors/index>`.
|
| 77 |
+
|
| 78 |
+
human_render_camera_configs (dict): configurations of human rendering cameras to override any environment defaults. Similar usage as @sensor_configs.
|
| 79 |
+
|
| 80 |
+
viewer_camera_configs (dict): configurations of the viewer camera in the GUI to override any environment defaults. Similar usage as @sensor_configs.
|
| 81 |
+
|
| 82 |
+
robot_uids (Union[str, BaseAgent, List[Union[str, BaseAgent]]]): List of robots to instantiate and control in the environment.
|
| 83 |
+
|
| 84 |
+
sim_config (Union[SimConfig, dict]): Configurations for simulation if used that override the environment defaults. If given
|
| 85 |
+
a dictionary, it can just override specific attributes e.g. ``sim_config=dict(scene_config=dict(solver_iterations=25))``. If
|
| 86 |
+
passing in a SimConfig object, while typed, will override every attribute including the task defaults. Some environments
|
| 87 |
+
define their own recommended default sim configurations via the ``self._default_sim_config`` attribute that generally should not be
|
| 88 |
+
completely overriden.
|
| 89 |
+
|
| 90 |
+
reconfiguration_freq (int): How frequently to call reconfigure when environment is reset via `self.reset(...)`
|
| 91 |
+
Generally for most users who are not building tasks this does not need to be changed. The default is 0, which means
|
| 92 |
+
the environment reconfigures upon creation, and never again.
|
| 93 |
+
|
| 94 |
+
sim_backend (str): By default this is "auto". If sim_backend is "auto", then if ``num_envs == 1``, we use the PhysX CPU sim backend, otherwise
|
| 95 |
+
we use the PhysX GPU sim backend and automatically pick a GPU to use.
|
| 96 |
+
Can also be "physx_cpu" or "physx_cuda" to force usage of a particular sim backend.
|
| 97 |
+
To select a particular GPU to run the simulation on, you can pass "cuda:n" where n is the ID of the GPU,
|
| 98 |
+
similar to the way PyTorch selects GPUs.
|
| 99 |
+
Note that if this is "physx_cpu", num_envs can only be equal to 1.
|
| 100 |
+
|
| 101 |
+
render_backend (str): By default this is "gpu". If render_backend is "gpu", then we auto select a GPU to render with.
|
| 102 |
+
It can be "cuda:n" where n is the ID of the GPU to render with. If this is "cpu", then we render on the CPU.
|
| 103 |
+
|
| 104 |
+
parallel_in_single_scene (bool): By default this is False. If True, rendered images and the GUI will show all objects in one view.
|
| 105 |
+
This is only really useful for generating cool videos showing all environments at once but it is not recommended
|
| 106 |
+
otherwise as it slows down simulation and rendering.
|
| 107 |
+
|
| 108 |
+
enhanced_determinism (bool): By default this is False and env resets will reset the episode RNG only when a seed / seed list is given.
|
| 109 |
+
If True, the environment will reset the episode RNG upon each reset regardless of whether a seed is provided.
|
| 110 |
+
Generally enhanced_determinisim is not needed and users are recommended to pass seeds into the env reset function instead.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
# fmt: off
|
| 114 |
+
SUPPORTED_ROBOTS: List[Union[str, Tuple[str]]] = None
|
| 115 |
+
"""Override this to enforce which robots or tuples of robots together are supported in the task. During env creation,
|
| 116 |
+
setting robot_uids auto loads all desired robots into the scene, but not all tasks are designed to support some robot setups"""
|
| 117 |
+
SUPPORTED_OBS_MODES = ("state", "state_dict", "none", "sensor_data", "any_textures", "pointcloud")
|
| 118 |
+
"""The string observation modes the environment supports. Note that "none" and "any_texture" are special keys. none indicates no observation data is generated.
|
| 119 |
+
"any_texture" indicates that any combination of image textures generated by cameras are supported e.g. rgb+depth, normal+segmentation, albedo+rgb+depth etc.
|
| 120 |
+
For a full list of supported textures see """
|
| 121 |
+
SUPPORTED_REWARD_MODES = ("normalized_dense", "dense", "sparse", "none")
|
| 122 |
+
SUPPORTED_RENDER_MODES = ("human", "rgb_array", "sensors", "all")
|
| 123 |
+
"""The supported render modes. Human opens up a GUI viewer. rgb_array returns an rgb array showing the current environment state.
|
| 124 |
+
sensors returns an rgb array but only showing all data collected by sensors as images put together"""
|
| 125 |
+
|
| 126 |
+
metadata = {"render_modes": SUPPORTED_RENDER_MODES}
|
| 127 |
+
|
| 128 |
+
scene: ManiSkillScene = None
|
| 129 |
+
"""the main scene, which manages all sub scenes. In CPU simulation there is only one sub-scene"""
|
| 130 |
+
|
| 131 |
+
agent: BaseAgent
|
| 132 |
+
|
| 133 |
+
action_space: gym.Space
|
| 134 |
+
"""the batched action space of the environment, which is also the action space of the agent"""
|
| 135 |
+
single_action_space: gym.Space
|
| 136 |
+
"""the unbatched action space of the environment"""
|
| 137 |
+
|
| 138 |
+
_sensors: Dict[str, BaseSensor]
|
| 139 |
+
"""all sensors configured in this environment"""
|
| 140 |
+
_sensor_configs: Dict[str, BaseSensorConfig]
|
| 141 |
+
"""all sensor configurations parsed from self._sensor_configs and agent._sensor_configs"""
|
| 142 |
+
_agent_sensor_configs: Dict[str, BaseSensorConfig]
|
| 143 |
+
"""all agent sensor configs parsed from agent._sensor_configs"""
|
| 144 |
+
_human_render_cameras: Dict[str, Camera]
|
| 145 |
+
"""cameras used for rendering the current environment retrievable via `env.render_rgb_array()`. These are not used to generate observations"""
|
| 146 |
+
_default_human_render_camera_configs: Dict[str, CameraConfig]
|
| 147 |
+
"""all camera configurations for cameras used for human render"""
|
| 148 |
+
_human_render_camera_configs: Dict[str, CameraConfig]
|
| 149 |
+
"""all camera configurations parsed from self._human_render_camera_configs"""
|
| 150 |
+
|
| 151 |
+
_hidden_objects: List[Union[Actor, Articulation]] = []
|
| 152 |
+
"""list of objects that are hidden during rendering when generating visual observations / running render_cameras()"""
|
| 153 |
+
|
| 154 |
+
_main_rng: np.random.RandomState = None
|
| 155 |
+
"""main rng generator that generates episode seed sequences. For internal use only"""
|
| 156 |
+
_batched_main_rng: BatchedRNG = None
|
| 157 |
+
"""the batched main RNG that generates episode seed sequences. For internal use only"""
|
| 158 |
+
_main_seed: List[int] = None
|
| 159 |
+
"""main seed list for _main_rng and _batched_main_rng. _main_rng uses _main_seed[0]. For internal use only"""
|
| 160 |
+
_episode_rng: np.random.RandomState = None
|
| 161 |
+
"""the numpy RNG that you can use to generate random numpy data. It is not recommended to use this. Instead use the _batched_episode_rng which helps ensure GPU and CPU simulation generate the same data with the same seeds."""
|
| 162 |
+
_batched_episode_rng: BatchedRNG = None
|
| 163 |
+
"""the recommended batched episode RNG to generate random numpy data consistently between single and parallel environments"""
|
| 164 |
+
_episode_seed: np.ndarray = None
|
| 165 |
+
"""episode seed list for _episode_rng and _batched_episode_rng. _episode_rng uses _episode_seed[0]."""
|
| 166 |
+
_batched_rng_backend = "numpy:random_state"
|
| 167 |
+
"""the backend to use for the batched RNG"""
|
| 168 |
+
_enhanced_determinism: bool = False
|
| 169 |
+
"""whether to reset the episode RNG upon each reset regardless of whether a seed is provided"""
|
| 170 |
+
|
| 171 |
+
_parallel_in_single_scene: bool = False
|
| 172 |
+
"""whether all objects are placed in one scene for the purpose of rendering all objects together instead of in parallel"""
|
| 173 |
+
|
| 174 |
+
_sim_device: sapien.Device = None
|
| 175 |
+
"""the sapien device object the simulation runs on"""
|
| 176 |
+
|
| 177 |
+
_render_device: sapien.Device = None
|
| 178 |
+
"""the sapien device object the renderer runs on"""
|
| 179 |
+
|
| 180 |
+
_viewer: Union[sapien.utils.Viewer, None] = None
|
| 181 |
+
|
| 182 |
+
_sample_video_link: Optional[str] = None
|
| 183 |
+
"""a link to a sample video of the task. This is mostly used for automatic documentation generation"""
|
| 184 |
+
|
| 185 |
+
def __init__(
|
| 186 |
+
self,
|
| 187 |
+
num_envs: int = 1,
|
| 188 |
+
obs_mode: Optional[str] = None,
|
| 189 |
+
reward_mode: Optional[str] = None,
|
| 190 |
+
control_mode: Optional[str] = None,
|
| 191 |
+
render_mode: Optional[str] = None,
|
| 192 |
+
shader_dir: Optional[str] = None,
|
| 193 |
+
enable_shadow: bool = False,
|
| 194 |
+
sensor_configs: Optional[dict] = dict(),
|
| 195 |
+
human_render_camera_configs: Optional[dict] = dict(),
|
| 196 |
+
viewer_camera_configs: Optional[dict] = dict(),
|
| 197 |
+
robot_uids: Union[str, BaseAgent, List[Union[str, BaseAgent]]] = None,
|
| 198 |
+
sim_config: Union[SimConfig, dict] = dict(),
|
| 199 |
+
reconfiguration_freq: Optional[int] = None,
|
| 200 |
+
sim_backend: str = "auto",
|
| 201 |
+
render_backend: str = "gpu",
|
| 202 |
+
parallel_in_single_scene: bool = False,
|
| 203 |
+
enhanced_determinism: bool = False,
|
| 204 |
+
):
|
| 205 |
+
self._enhanced_determinism = enhanced_determinism
|
| 206 |
+
|
| 207 |
+
self.num_envs = num_envs
|
| 208 |
+
self.reconfiguration_freq = reconfiguration_freq if reconfiguration_freq is not None else 0
|
| 209 |
+
self._reconfig_counter = 0
|
| 210 |
+
if shader_dir is not None:
|
| 211 |
+
logger.warn("shader_dir argument will be deprecated after ManiSkill v3.0.0 official release. Please use sensor_configs/human_render_camera_configs to set shaders.")
|
| 212 |
+
sensor_configs |= dict(shader_pack=shader_dir)
|
| 213 |
+
human_render_camera_configs |= dict(shader_pack=shader_dir)
|
| 214 |
+
viewer_camera_configs |= dict(shader_pack=shader_dir)
|
| 215 |
+
self._custom_sensor_configs = sensor_configs
|
| 216 |
+
self._custom_human_render_camera_configs = human_render_camera_configs
|
| 217 |
+
self._custom_viewer_camera_configs = viewer_camera_configs
|
| 218 |
+
self._parallel_in_single_scene = parallel_in_single_scene
|
| 219 |
+
self.robot_uids = robot_uids
|
| 220 |
+
if isinstance(robot_uids, tuple) and len(robot_uids) == 1:
|
| 221 |
+
self.robot_uids = robot_uids[0]
|
| 222 |
+
if self.SUPPORTED_ROBOTS is not None:
|
| 223 |
+
if self.robot_uids not in self.SUPPORTED_ROBOTS:
|
| 224 |
+
logger.warn(f"{self.robot_uids} is not in the task's list of supported robots. Code may not run as intended")
|
| 225 |
+
|
| 226 |
+
if sim_backend == "auto":
|
| 227 |
+
if num_envs > 1:
|
| 228 |
+
sim_backend = "physx_cuda"
|
| 229 |
+
else:
|
| 230 |
+
sim_backend = "physx_cpu"
|
| 231 |
+
self.backend = parse_sim_and_render_backend(sim_backend, render_backend)
|
| 232 |
+
# determine the sim and render devices
|
| 233 |
+
self.device = self.backend.device
|
| 234 |
+
self._sim_device = self.backend.sim_device
|
| 235 |
+
self._render_device = self.backend.render_device
|
| 236 |
+
if self.device.type == "cuda":
|
| 237 |
+
if not physx.is_gpu_enabled():
|
| 238 |
+
physx.enable_gpu()
|
| 239 |
+
|
| 240 |
+
# raise a number of nicer errors
|
| 241 |
+
if sim_backend == "cpu" and num_envs > 1:
|
| 242 |
+
raise RuntimeError("""Cannot set the sim backend to 'cpu' and have multiple environments.
|
| 243 |
+
If you want to do CPU sim backends and have environment vectorization you must use multi-processing across CPUs.
|
| 244 |
+
This can be done via the gymnasium's AsyncVectorEnv API""")
|
| 245 |
+
|
| 246 |
+
if shader_dir is not None:
|
| 247 |
+
if "rt" == shader_dir[:2]:
|
| 248 |
+
if num_envs > 1 and parallel_in_single_scene == False:
|
| 249 |
+
raise RuntimeError("""Currently you cannot run ray-tracing on more than one environment in a single process""")
|
| 250 |
+
|
| 251 |
+
assert not parallel_in_single_scene or (obs_mode not in ["sensor_data", "pointcloud", "rgb", "depth", "rgbd"]), \
|
| 252 |
+
"Parallel rendering from parallel cameras is only supported when the gui/viewer is not used. parallel_in_single_scene must be False if using parallel rendering. If True only state based observations are supported."
|
| 253 |
+
|
| 254 |
+
if isinstance(sim_config, SimConfig):
|
| 255 |
+
sim_config = sim_config.dict()
|
| 256 |
+
merged_gpu_sim_config = self._default_sim_config.dict()
|
| 257 |
+
common.dict_merge(merged_gpu_sim_config, sim_config)
|
| 258 |
+
self.sim_config = dacite.from_dict(data_class=SimConfig, data=merged_gpu_sim_config, config=dacite.Config(strict=True))
|
| 259 |
+
"""the final sim config after merging user overrides with the environment default"""
|
| 260 |
+
physx.set_gpu_memory_config(**self.sim_config.gpu_memory_config.dict())
|
| 261 |
+
sapien.render.set_log_level(os.getenv("MS_RENDERER_LOG_LEVEL", "warn"))
|
| 262 |
+
|
| 263 |
+
# Set simulation and control frequency
|
| 264 |
+
self._sim_freq = self.sim_config.sim_freq
|
| 265 |
+
self._control_freq = self.sim_config.control_freq
|
| 266 |
+
assert self._sim_freq % self._control_freq == 0, f"sim_freq({self._sim_freq}) is not divisible by control_freq({self._control_freq})."
|
| 267 |
+
self._sim_steps_per_control = self._sim_freq // self._control_freq
|
| 268 |
+
|
| 269 |
+
# Observation mode
|
| 270 |
+
if obs_mode is None:
|
| 271 |
+
obs_mode = self.SUPPORTED_OBS_MODES[0]
|
| 272 |
+
if obs_mode not in self.SUPPORTED_OBS_MODES:
|
| 273 |
+
# we permit any combination of visual observation textures e.g. rgb+normal, depth+segmentation, etc.
|
| 274 |
+
if "any_textures" in self.SUPPORTED_OBS_MODES:
|
| 275 |
+
# the parse_visual_obs_mode_to_struct will check if the textures requested are valid
|
| 276 |
+
pass
|
| 277 |
+
else:
|
| 278 |
+
raise NotImplementedError(f"Unsupported obs mode: {obs_mode}. Must be one of {self.SUPPORTED_OBS_MODES}")
|
| 279 |
+
self._obs_mode = obs_mode
|
| 280 |
+
self.obs_mode_struct = parse_obs_mode_to_struct(self._obs_mode)
|
| 281 |
+
"""dataclass describing what observation data is being requested by the user, detailing if state data is requested and what visual data is requested"""
|
| 282 |
+
|
| 283 |
+
# Reward mode
|
| 284 |
+
if reward_mode is None:
|
| 285 |
+
reward_mode = self.SUPPORTED_REWARD_MODES[0]
|
| 286 |
+
if reward_mode not in self.SUPPORTED_REWARD_MODES:
|
| 287 |
+
raise NotImplementedError("Unsupported reward mode: {}".format(reward_mode))
|
| 288 |
+
self._reward_mode = reward_mode
|
| 289 |
+
|
| 290 |
+
# Control mode
|
| 291 |
+
self._control_mode = control_mode
|
| 292 |
+
# TODO(jigu): Support dict action space
|
| 293 |
+
if control_mode == "*":
|
| 294 |
+
raise NotImplementedError("Multiple controllers are not supported yet.")
|
| 295 |
+
|
| 296 |
+
# Render mode
|
| 297 |
+
self.render_mode = render_mode
|
| 298 |
+
self._viewer = None
|
| 299 |
+
|
| 300 |
+
# Lighting
|
| 301 |
+
self.enable_shadow = enable_shadow
|
| 302 |
+
|
| 303 |
+
# Use a fixed (main) seed to enhance determinism
|
| 304 |
+
self._main_seed = None
|
| 305 |
+
self._set_main_rng([2022 + i for i in range(self.num_envs)])
|
| 306 |
+
self._elapsed_steps = (
|
| 307 |
+
torch.zeros(self.num_envs, device=self.device, dtype=torch.int32)
|
| 308 |
+
)
|
| 309 |
+
obs, _ = self.reset(seed=[2022 + i for i in range(self.num_envs)], options=dict(reconfigure=True))
|
| 310 |
+
|
| 311 |
+
self._init_raw_obs = common.to_cpu_tensor(obs)
|
| 312 |
+
"""the raw observation returned by the env.reset (a cpu torch tensor/dict of tensors). Useful for future observation wrappers to use to auto generate observation spaces"""
|
| 313 |
+
self._init_raw_state = common.to_cpu_tensor(self.get_state_dict())
|
| 314 |
+
"""the initial raw state returned by env.get_state. Useful for reconstructing state dictionaries from flattened state vectors"""
|
| 315 |
+
|
| 316 |
+
if self.agent is not None:
|
| 317 |
+
self.action_space = self.agent.action_space
|
| 318 |
+
"""the batched action space of the environment, which is also the action space of the agent"""
|
| 319 |
+
self.single_action_space = self.agent.single_action_space
|
| 320 |
+
"""the unbatched action space of the environment"""
|
| 321 |
+
self._orig_single_action_space = copy.deepcopy(self.single_action_space)
|
| 322 |
+
"""the original unbatched action space of the environment"""
|
| 323 |
+
else:
|
| 324 |
+
self.action_space = None
|
| 325 |
+
# initialize the cached properties
|
| 326 |
+
self.single_observation_space
|
| 327 |
+
self.observation_space
|
| 328 |
+
|
| 329 |
+
def update_obs_space(self, obs: torch.Tensor):
|
| 330 |
+
"""A convenient function to auto generate observation spaces if you modify them.
|
| 331 |
+
Call this function if you modify the observations returned by env.step and env.reset via an observation wrapper.
|
| 332 |
+
|
| 333 |
+
The recommended way to use this is in a observation wrapper is as so
|
| 334 |
+
|
| 335 |
+
.. code-block:: python
|
| 336 |
+
|
| 337 |
+
import gymnasium as gym
|
| 338 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 339 |
+
class YourObservationWrapper(gym.ObservationWrapper):
|
| 340 |
+
def __init__(self, env):
|
| 341 |
+
super().__init__(env)
|
| 342 |
+
self.base_env.update_obs_space(self.observation(self.base_env._init_raw_obs))
|
| 343 |
+
@property
|
| 344 |
+
def base_env(self) -> BaseEnv:
|
| 345 |
+
return self.env.unwrapped
|
| 346 |
+
def observation(self, obs):
|
| 347 |
+
# your code for transforming the observation
|
| 348 |
+
"""
|
| 349 |
+
self._init_raw_obs = obs
|
| 350 |
+
del self.single_observation_space
|
| 351 |
+
del self.observation_space
|
| 352 |
+
self.single_observation_space
|
| 353 |
+
self.observation_space
|
| 354 |
+
|
| 355 |
+
@cached_property
|
| 356 |
+
def single_observation_space(self) -> gym.Space:
|
| 357 |
+
"""the unbatched observation space of the environment"""
|
| 358 |
+
return gym_utils.convert_observation_to_space(common.to_numpy(self._init_raw_obs), unbatched=True)
|
| 359 |
+
|
| 360 |
+
@cached_property
|
| 361 |
+
def observation_space(self) -> gym.Space:
|
| 362 |
+
"""the batched observation space of the environment"""
|
| 363 |
+
return batch_space(self.single_observation_space, n=self.num_envs)
|
| 364 |
+
|
| 365 |
+
@property
|
| 366 |
+
def gpu_sim_enabled(self):
|
| 367 |
+
"""Whether the gpu simulation is enabled."""
|
| 368 |
+
return self.scene.gpu_sim_enabled
|
| 369 |
+
|
| 370 |
+
@property
|
| 371 |
+
def _default_sim_config(self):
|
| 372 |
+
return SimConfig()
|
| 373 |
+
def _load_agent(self, options: dict, initial_agent_poses: Optional[Union[sapien.Pose, Pose]] = None):
|
| 374 |
+
agents = []
|
| 375 |
+
robot_uids = self.robot_uids
|
| 376 |
+
if not isinstance(initial_agent_poses, list):
|
| 377 |
+
initial_agent_poses = [initial_agent_poses]
|
| 378 |
+
if robot_uids == "none" or robot_uids == ("none", ):
|
| 379 |
+
self.agent = None
|
| 380 |
+
return
|
| 381 |
+
if robot_uids is not None:
|
| 382 |
+
if not isinstance(robot_uids, tuple):
|
| 383 |
+
robot_uids = [robot_uids]
|
| 384 |
+
for i, robot_uid in enumerate(robot_uids):
|
| 385 |
+
if isinstance(robot_uid, type(BaseAgent)):
|
| 386 |
+
agent_cls = robot_uid
|
| 387 |
+
else:
|
| 388 |
+
if robot_uid not in REGISTERED_AGENTS:
|
| 389 |
+
raise RuntimeError(
|
| 390 |
+
f"Agent {robot_uid} not found in the dict of registered agents. If the id is not a typo then make sure to apply the @register_agent() decorator."
|
| 391 |
+
)
|
| 392 |
+
agent_cls = REGISTERED_AGENTS[robot_uid].agent_cls
|
| 393 |
+
agent: BaseAgent = agent_cls(
|
| 394 |
+
self.scene,
|
| 395 |
+
self._control_freq,
|
| 396 |
+
self._control_mode,
|
| 397 |
+
agent_idx=i if len(robot_uids) > 1 else None,
|
| 398 |
+
initial_pose=initial_agent_poses[i] if initial_agent_poses is not None else None,
|
| 399 |
+
)
|
| 400 |
+
agents.append(agent)
|
| 401 |
+
if len(agents) == 1:
|
| 402 |
+
self.agent = agents[0]
|
| 403 |
+
else:
|
| 404 |
+
self.agent = MultiAgent(agents)
|
| 405 |
+
|
| 406 |
+
@property
|
| 407 |
+
def _default_sensor_configs(
|
| 408 |
+
self,
|
| 409 |
+
) -> Union[
|
| 410 |
+
BaseSensorConfig, Sequence[BaseSensorConfig], Dict[str, BaseSensorConfig]
|
| 411 |
+
]:
|
| 412 |
+
"""Add default (non-agent) sensors to the environment by returning sensor configurations. These can be overriden by the user at
|
| 413 |
+
env creation time"""
|
| 414 |
+
return []
|
| 415 |
+
@property
|
| 416 |
+
def _default_human_render_camera_configs(
|
| 417 |
+
self,
|
| 418 |
+
) -> Union[
|
| 419 |
+
CameraConfig, Sequence[CameraConfig], Dict[str, CameraConfig]
|
| 420 |
+
]:
|
| 421 |
+
"""Add default cameras for rendering when using render_mode='rgb_array'. These can be overriden by the user at env creation time """
|
| 422 |
+
return []
|
| 423 |
+
|
| 424 |
+
@property
|
| 425 |
+
def _default_viewer_camera_configs(
|
| 426 |
+
self,
|
| 427 |
+
) -> CameraConfig:
|
| 428 |
+
"""Default configuration for the viewer camera, controlling shader, fov, etc. By default if there is a human render camera called "render_camera" then the viewer will use that camera's pose."""
|
| 429 |
+
return CameraConfig(uid="viewer", pose=sapien.Pose([0, 0, 1]), width=1920, height=1080, shader_pack="default", near=0.0, far=1000, fov=np.pi / 2)
|
| 430 |
+
|
| 431 |
+
@property
|
| 432 |
+
def sim_freq(self) -> int:
|
| 433 |
+
"""The frequency (Hz) of the simulation loop"""
|
| 434 |
+
return self._sim_freq
|
| 435 |
+
|
| 436 |
+
@property
|
| 437 |
+
def control_freq(self):
|
| 438 |
+
"""The frequency (Hz) of the control loop"""
|
| 439 |
+
return self._control_freq
|
| 440 |
+
|
| 441 |
+
@property
|
| 442 |
+
def sim_timestep(self):
|
| 443 |
+
"""The timestep (dt) of the simulation loop"""
|
| 444 |
+
return 1.0 / self._sim_freq
|
| 445 |
+
|
| 446 |
+
@property
|
| 447 |
+
def control_timestep(self):
|
| 448 |
+
"""The timestep (dt) of the control loop"""
|
| 449 |
+
return 1.0 / self._control_freq
|
| 450 |
+
|
| 451 |
+
@property
|
| 452 |
+
def control_mode(self) -> str:
|
| 453 |
+
"""The control mode of the agent"""
|
| 454 |
+
return self.agent.control_mode
|
| 455 |
+
|
| 456 |
+
@property
|
| 457 |
+
def elapsed_steps(self) -> torch.Tensor:
|
| 458 |
+
"""The number of steps that have elapsed in the environment"""
|
| 459 |
+
return self._elapsed_steps
|
| 460 |
+
|
| 461 |
+
# ---------------------------------------------------------------------------- #
|
| 462 |
+
# Observation
|
| 463 |
+
# ---------------------------------------------------------------------------- #
|
| 464 |
+
@property
|
| 465 |
+
def obs_mode(self) -> str:
|
| 466 |
+
"""The current observation mode. This affects the observation returned by env.get_obs()"""
|
| 467 |
+
return self._obs_mode
|
| 468 |
+
|
| 469 |
+
def get_obs(self, info: Optional[Dict] = None):
|
| 470 |
+
"""
|
| 471 |
+
Return the current observation of the environment. User may call this directly to get the current observation
|
| 472 |
+
as opposed to taking a step with actions in the environment.
|
| 473 |
+
|
| 474 |
+
Note that some tasks use info of the current environment state to populate the observations to avoid having to
|
| 475 |
+
compute slow operations twice. For example a state based observation may wish to include a boolean indicating
|
| 476 |
+
if a robot is grasping an object. Computing this boolean correctly is slow, so it is preferable to generate that
|
| 477 |
+
data in the info object by overriding the `self.evaluate` function.
|
| 478 |
+
|
| 479 |
+
Args:
|
| 480 |
+
info (Dict): The info object of the environment. Generally should always be the result of `self.get_info()`.
|
| 481 |
+
If this is None (the default), this function will call `self.get_info()` itself
|
| 482 |
+
"""
|
| 483 |
+
if info is None:
|
| 484 |
+
info = self.get_info()
|
| 485 |
+
if self._obs_mode == "none":
|
| 486 |
+
# Some cases do not need observations, e.g., MPC
|
| 487 |
+
return dict()
|
| 488 |
+
elif self._obs_mode == "state":
|
| 489 |
+
state_dict = self._get_obs_state_dict(info)
|
| 490 |
+
obs = common.flatten_state_dict(state_dict, use_torch=True, device=self.device)
|
| 491 |
+
elif self._obs_mode == "state_dict":
|
| 492 |
+
obs = self._get_obs_state_dict(info)
|
| 493 |
+
elif self._obs_mode == "pointcloud":
|
| 494 |
+
obs = self._get_obs_with_sensor_data(info)
|
| 495 |
+
obs = sensor_data_to_pointcloud(obs, self._sensors)
|
| 496 |
+
elif self._obs_mode == "sensor_data":
|
| 497 |
+
# return raw texture data dependent on choice of shader
|
| 498 |
+
obs = self._get_obs_with_sensor_data(info, apply_texture_transforms=False)
|
| 499 |
+
else:
|
| 500 |
+
obs = self._get_obs_with_sensor_data(info)
|
| 501 |
+
|
| 502 |
+
# flatten parts of the state observation if requested
|
| 503 |
+
if self.obs_mode_struct.state:
|
| 504 |
+
if isinstance(obs, dict):
|
| 505 |
+
data = dict(agent=obs.pop("agent"), extra=obs.pop("extra"))
|
| 506 |
+
obs["state"] = common.flatten_state_dict(data, use_torch=True, device=self.device)
|
| 507 |
+
return obs
|
| 508 |
+
|
| 509 |
+
def _get_obs_state_dict(self, info: Dict):
|
| 510 |
+
"""Get (ground-truth) state-based observations."""
|
| 511 |
+
return dict(
|
| 512 |
+
agent=self._get_obs_agent(),
|
| 513 |
+
extra=self._get_obs_extra(info),
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
def _get_obs_agent(self):
|
| 517 |
+
"""Get observations about the agent's state. By default it is proprioceptive observations which include qpos and qvel.
|
| 518 |
+
Controller state is also included although most default controllers do not have any state."""
|
| 519 |
+
return self.agent.get_proprioception()
|
| 520 |
+
|
| 521 |
+
def _get_obs_extra(self, info: Dict):
|
| 522 |
+
"""Get task-relevant extra observations. Usually defined on a task by task basis"""
|
| 523 |
+
return dict()
|
| 524 |
+
|
| 525 |
+
def capture_sensor_data(self):
|
| 526 |
+
"""Capture data from all sensors (non-blocking)"""
|
| 527 |
+
for sensor in self._sensors.values():
|
| 528 |
+
sensor.capture()
|
| 529 |
+
|
| 530 |
+
def get_sensor_images(self) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 531 |
+
"""Get image (RGB) visualizations of what sensors currently sense. This function calls self._get_obs_sensor_data() internally which automatically hides objects and updates the render"""
|
| 532 |
+
return self.scene.get_sensor_images(self._get_obs_sensor_data())
|
| 533 |
+
|
| 534 |
+
def get_sensor_params(self) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 535 |
+
"""Get all sensor parameters."""
|
| 536 |
+
params = dict()
|
| 537 |
+
for name, sensor in self._sensors.items():
|
| 538 |
+
params[name] = sensor.get_params()
|
| 539 |
+
return params
|
| 540 |
+
|
| 541 |
+
def _get_obs_sensor_data(self, apply_texture_transforms: bool = True) -> dict:
|
| 542 |
+
"""get only data from sensors. Auto hides any objects that are designated to be hidden"""
|
| 543 |
+
for obj in self._hidden_objects:
|
| 544 |
+
obj.hide_visual()
|
| 545 |
+
self.scene.update_render(update_sensors=True, update_human_render_cameras=False)
|
| 546 |
+
self.capture_sensor_data()
|
| 547 |
+
sensor_obs = dict()
|
| 548 |
+
for name, sensor in self.scene.sensors.items():
|
| 549 |
+
if isinstance(sensor, Camera):
|
| 550 |
+
if self.obs_mode in ["state", "state_dict"]:
|
| 551 |
+
# normally in non visual observation modes we do not render sensor observations. But some users may want to render sensor data for debugging or various algorithms
|
| 552 |
+
sensor_obs[name] = sensor.get_obs(position=False, segmentation=False, apply_texture_transforms=apply_texture_transforms)
|
| 553 |
+
else:
|
| 554 |
+
sensor_obs[name] = sensor.get_obs(
|
| 555 |
+
rgb=self.obs_mode_struct.visual.rgb,
|
| 556 |
+
depth=self.obs_mode_struct.visual.depth,
|
| 557 |
+
position=self.obs_mode_struct.visual.position,
|
| 558 |
+
segmentation=self.obs_mode_struct.visual.segmentation,
|
| 559 |
+
normal=self.obs_mode_struct.visual.normal,
|
| 560 |
+
albedo=self.obs_mode_struct.visual.albedo,
|
| 561 |
+
apply_texture_transforms=apply_texture_transforms
|
| 562 |
+
)
|
| 563 |
+
# explicitly synchronize and wait for cuda kernels to finish
|
| 564 |
+
# this prevents the GPU from making poor scheduling decisions when other physx code begins to run
|
| 565 |
+
torch.cuda.synchronize()
|
| 566 |
+
return sensor_obs
|
| 567 |
+
def _get_obs_with_sensor_data(self, info: Dict, apply_texture_transforms: bool = True) -> dict:
|
| 568 |
+
"""Get the observation with sensor data"""
|
| 569 |
+
return dict(
|
| 570 |
+
agent=self._get_obs_agent(),
|
| 571 |
+
extra=self._get_obs_extra(info),
|
| 572 |
+
sensor_param=self.get_sensor_params(),
|
| 573 |
+
sensor_data=self._get_obs_sensor_data(apply_texture_transforms),
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
@property
|
| 577 |
+
def robot_link_names(self):
|
| 578 |
+
"""Get link ids for the robot. This is used for segmentation observations."""
|
| 579 |
+
return self.agent.robot_link_names
|
| 580 |
+
|
| 581 |
+
# -------------------------------------------------------------------------- #
|
| 582 |
+
# Reward mode
|
| 583 |
+
# -------------------------------------------------------------------------- #
|
| 584 |
+
@property
|
| 585 |
+
def reward_mode(self):
|
| 586 |
+
return self._reward_mode
|
| 587 |
+
|
| 588 |
+
def get_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 589 |
+
if self._reward_mode == "sparse":
|
| 590 |
+
reward = self.compute_sparse_reward(obs=obs, action=action, info=info)
|
| 591 |
+
elif self._reward_mode == "dense":
|
| 592 |
+
reward = self.compute_dense_reward(obs=obs, action=action, info=info)
|
| 593 |
+
elif self._reward_mode == "normalized_dense":
|
| 594 |
+
reward = self.compute_normalized_dense_reward(
|
| 595 |
+
obs=obs, action=action, info=info
|
| 596 |
+
)
|
| 597 |
+
elif self._reward_mode == "none":
|
| 598 |
+
reward = torch.zeros((self.num_envs, ), dtype=torch.float, device=self.device)
|
| 599 |
+
else:
|
| 600 |
+
raise NotImplementedError(self._reward_mode)
|
| 601 |
+
return reward
|
| 602 |
+
|
| 603 |
+
def compute_sparse_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 604 |
+
"""
|
| 605 |
+
Computes the sparse reward. By default this function tries to use the success/fail information in
|
| 606 |
+
returned by the evaluate function and gives +1 if success, -1 if fail, 0 otherwise"""
|
| 607 |
+
if "success" in info:
|
| 608 |
+
if "fail" in info:
|
| 609 |
+
if isinstance(info["success"], torch.Tensor):
|
| 610 |
+
reward = info["success"].to(torch.float) - info["fail"].to(torch.float)
|
| 611 |
+
else:
|
| 612 |
+
reward = info["success"] - info["fail"]
|
| 613 |
+
else:
|
| 614 |
+
reward = info["success"]
|
| 615 |
+
else:
|
| 616 |
+
if "fail" in info:
|
| 617 |
+
reward = -info["fail"]
|
| 618 |
+
else:
|
| 619 |
+
reward = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
|
| 620 |
+
return reward
|
| 621 |
+
|
| 622 |
+
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 623 |
+
raise NotImplementedError()
|
| 624 |
+
|
| 625 |
+
def compute_normalized_dense_reward(
|
| 626 |
+
self, obs: Any, action: torch.Tensor, info: Dict
|
| 627 |
+
):
|
| 628 |
+
raise NotImplementedError()
|
| 629 |
+
|
| 630 |
+
# -------------------------------------------------------------------------- #
|
| 631 |
+
# Reconfigure
|
| 632 |
+
# -------------------------------------------------------------------------- #
|
| 633 |
+
def _reconfigure(self, options = dict()):
|
| 634 |
+
"""Reconfigure the simulation scene instance.
|
| 635 |
+
This function clears the previous scene and creates a new one.
|
| 636 |
+
|
| 637 |
+
Note this function is not always called when an environment is reset, and
|
| 638 |
+
should only be used if any agents, assets, sensors, lighting need to change
|
| 639 |
+
to save compute time.
|
| 640 |
+
|
| 641 |
+
Tasks like PegInsertionSide and TurnFaucet will call this each time as the peg
|
| 642 |
+
shape changes each time and the faucet model changes each time respectively.
|
| 643 |
+
"""
|
| 644 |
+
|
| 645 |
+
self._clear()
|
| 646 |
+
# load everything into the scene first before initializing anything
|
| 647 |
+
self._setup_scene()
|
| 648 |
+
|
| 649 |
+
self._load_agent(options)
|
| 650 |
+
|
| 651 |
+
self._load_scene(options)
|
| 652 |
+
self._load_lighting(options)
|
| 653 |
+
|
| 654 |
+
self.scene._setup(enable_gpu=self.gpu_sim_enabled)
|
| 655 |
+
# for GPU sim, we have to setup sensors after we call setup gpu in order to enable loading mounted sensors as they depend on GPU buffer data
|
| 656 |
+
self._setup_sensors(options)
|
| 657 |
+
if self._viewer is not None:
|
| 658 |
+
self._setup_viewer()
|
| 659 |
+
self._reconfig_counter = self.reconfiguration_freq
|
| 660 |
+
|
| 661 |
+
# delete various cached properties and reinitialize
|
| 662 |
+
# TODO (stao): The code is 3 lines because you have to initialize it once somewhere...
|
| 663 |
+
self.segmentation_id_map
|
| 664 |
+
del self.segmentation_id_map
|
| 665 |
+
self.segmentation_id_map
|
| 666 |
+
|
| 667 |
+
def _after_reconfigure(self, options):
|
| 668 |
+
"""Add code here that should run immediately after self._reconfigure is called. The torch RNG context is still active so RNG is still
|
| 669 |
+
seeded here by self._episode_seed. This is useful if you need to run something that only happens after reconfiguration but need the
|
| 670 |
+
GPU initialized so that you can check e.g. collisons, poses etc."""
|
| 671 |
+
|
| 672 |
+
def _load_scene(self, options: dict):
|
| 673 |
+
"""Loads all objects like actors and articulations into the scene. Called by `self._reconfigure`. Given options argument
|
| 674 |
+
is the same options dictionary passed to the self.reset function"""
|
| 675 |
+
|
| 676 |
+
# TODO (stao): refactor this into sensor API
|
| 677 |
+
def _setup_sensors(self, options: dict):
|
| 678 |
+
"""Setup sensor configurations and the sensor objects in the scene. Called by `self._reconfigure`"""
|
| 679 |
+
|
| 680 |
+
# First create all the configurations
|
| 681 |
+
self._sensor_configs = dict()
|
| 682 |
+
|
| 683 |
+
# Add task/external sensors
|
| 684 |
+
self._sensor_configs.update(parse_camera_configs(self._default_sensor_configs))
|
| 685 |
+
|
| 686 |
+
# Add agent sensors
|
| 687 |
+
self._agent_sensor_configs = dict()
|
| 688 |
+
if self.agent is not None:
|
| 689 |
+
self._agent_sensor_configs = parse_camera_configs(self.agent._sensor_configs)
|
| 690 |
+
self._sensor_configs.update(self._agent_sensor_configs)
|
| 691 |
+
|
| 692 |
+
# Add human render camera configs
|
| 693 |
+
self._human_render_camera_configs = parse_camera_configs(
|
| 694 |
+
self._default_human_render_camera_configs
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
self._viewer_camera_config = parse_camera_configs(
|
| 698 |
+
self._default_viewer_camera_configs
|
| 699 |
+
)
|
| 700 |
+
|
| 701 |
+
# Override camera configurations with user supplied configurations
|
| 702 |
+
if self._custom_sensor_configs is not None:
|
| 703 |
+
update_camera_configs_from_dict(
|
| 704 |
+
self._sensor_configs, self._custom_sensor_configs
|
| 705 |
+
)
|
| 706 |
+
if self._custom_human_render_camera_configs is not None:
|
| 707 |
+
update_camera_configs_from_dict(
|
| 708 |
+
self._human_render_camera_configs,
|
| 709 |
+
self._custom_human_render_camera_configs,
|
| 710 |
+
)
|
| 711 |
+
if self._custom_viewer_camera_configs is not None:
|
| 712 |
+
update_camera_configs_from_dict(
|
| 713 |
+
self._viewer_camera_config,
|
| 714 |
+
self._custom_viewer_camera_configs,
|
| 715 |
+
)
|
| 716 |
+
self._viewer_camera_config = self._viewer_camera_config["viewer"]
|
| 717 |
+
|
| 718 |
+
# Now we instantiate the actual sensor objects
|
| 719 |
+
self._sensors = dict()
|
| 720 |
+
|
| 721 |
+
for uid, sensor_config in self._sensor_configs.items():
|
| 722 |
+
if uid in self._agent_sensor_configs:
|
| 723 |
+
articulation = self.agent.robot
|
| 724 |
+
else:
|
| 725 |
+
articulation = None
|
| 726 |
+
if isinstance(sensor_config, StereoDepthCameraConfig):
|
| 727 |
+
sensor_cls = StereoDepthCamera
|
| 728 |
+
elif isinstance(sensor_config, CameraConfig):
|
| 729 |
+
sensor_cls = Camera
|
| 730 |
+
self._sensors[uid] = sensor_cls(
|
| 731 |
+
sensor_config,
|
| 732 |
+
self.scene,
|
| 733 |
+
articulation=articulation,
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
# Cameras for rendering only
|
| 737 |
+
self._human_render_cameras = dict()
|
| 738 |
+
for uid, camera_config in self._human_render_camera_configs.items():
|
| 739 |
+
self._human_render_cameras[uid] = Camera(
|
| 740 |
+
camera_config,
|
| 741 |
+
self.scene,
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
self.scene.sensors = self._sensors
|
| 745 |
+
self.scene.human_render_cameras = self._human_render_cameras
|
| 746 |
+
|
| 747 |
+
def _load_lighting(self, options: dict):
|
| 748 |
+
"""Loads lighting into the scene. Called by `self._reconfigure`. If not overriden will set some simple default lighting"""
|
| 749 |
+
|
| 750 |
+
shadow = self.enable_shadow
|
| 751 |
+
self.scene.set_ambient_light([0.3, 0.3, 0.3])
|
| 752 |
+
self.scene.add_directional_light(
|
| 753 |
+
[1, 1, -1], [1, 1, 1], shadow=shadow, shadow_scale=5, shadow_map_size=2048
|
| 754 |
+
)
|
| 755 |
+
self.scene.add_directional_light([0, 0, -1], [1, 1, 1])
|
| 756 |
+
# -------------------------------------------------------------------------- #
|
| 757 |
+
# Reset
|
| 758 |
+
# -------------------------------------------------------------------------- #
|
| 759 |
+
def reset(self, seed: Union[None, int, list[int]] = None, options: Union[None, dict] = None):
|
| 760 |
+
"""Reset the ManiSkill environment with given seed(s) and options. Typically seed is either None (for unseeded reset) or an int (seeded reset).
|
| 761 |
+
For GPU parallelized environments you can also pass a list of seeds for each parallel environment to seed each one separately.
|
| 762 |
+
|
| 763 |
+
If options["env_idx"] is given, will only reset the selected parallel environments. If
|
| 764 |
+
options["reconfigure"] is True, will call self._reconfigure() which deletes the entire physx scene and reconstructs everything.
|
| 765 |
+
Users building custom tasks generally do not need to override this function.
|
| 766 |
+
|
| 767 |
+
Returns the first observation and a info dictionary. The info dictionary is of type
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
.. highlight:: python
|
| 771 |
+
.. code-block:: python
|
| 772 |
+
|
| 773 |
+
{
|
| 774 |
+
"reconfigure": bool # (True if the env reconfigured. False otherwise)
|
| 775 |
+
}
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
Note that ManiSkill always holds two RNG states, a main RNG, and an episode RNG. The main RNG is used purely to sample an episode seed which
|
| 780 |
+
helps with reproducibility of episodes and is for internal use only. The episode RNG is used by the environment/task itself to
|
| 781 |
+
e.g. randomize object positions, randomize assets etc. Episode RNG is accessible by using `self._batched_episode_rng` which is numpy based and `torch.rand`
|
| 782 |
+
which can be used to generate random data on the GPU directly and is seeded. Note that it is recommended to use `self._batched_episode_rng`
|
| 783 |
+
if you need to ensure during reconfiguration the same objects are loaded. Reproducibility and seeding when there is GPU and CPU simulation can be tricky and we recommend reading
|
| 784 |
+
the documentation for more recommendations and details on RNG https://maniskill.readthedocs.io/en/latest/user_guide/concepts/rng.html
|
| 785 |
+
|
| 786 |
+
Upon environment creation via gym.make, the main RNG is set with fixed seeds of 2022 to 2022 + num_envs - 1 (seed is just 2022 if there is only one environment)
|
| 787 |
+
During each reset call, if seed is None, main RNG is unchanged and an episode seed is sampled from the main RNG to create the episode RNG.
|
| 788 |
+
If seed is not None, main RNG is set to that seed and the episode seed is also set to that seed. This design means the main RNG determines
|
| 789 |
+
the episode RNG deterministically.
|
| 790 |
+
|
| 791 |
+
"""
|
| 792 |
+
if options is None:
|
| 793 |
+
options = dict()
|
| 794 |
+
reconfigure = options.get("reconfigure", False)
|
| 795 |
+
reconfigure = reconfigure or (
|
| 796 |
+
self._reconfig_counter == 0 and self.reconfiguration_freq != 0
|
| 797 |
+
)
|
| 798 |
+
if "env_idx" in options:
|
| 799 |
+
env_idx = options["env_idx"]
|
| 800 |
+
if len(env_idx) != self.num_envs and reconfigure:
|
| 801 |
+
raise RuntimeError("Cannot do a partial reset and reconfigure the environment. You must do one or the other.")
|
| 802 |
+
else:
|
| 803 |
+
env_idx = torch.arange(0, self.num_envs, device=self.device)
|
| 804 |
+
|
| 805 |
+
self._set_main_rng(seed)
|
| 806 |
+
|
| 807 |
+
if reconfigure:
|
| 808 |
+
self._set_episode_rng(seed if seed is not None else self._batched_main_rng.randint(2**31), env_idx)
|
| 809 |
+
with torch.random.fork_rng():
|
| 810 |
+
torch.manual_seed(seed=self._episode_seed[0])
|
| 811 |
+
self._reconfigure(options)
|
| 812 |
+
self._after_reconfigure(options)
|
| 813 |
+
# Set the episode rng again after reconfiguration to guarantee seed reproducibility
|
| 814 |
+
self._set_episode_rng(self._episode_seed, env_idx)
|
| 815 |
+
else:
|
| 816 |
+
self._set_episode_rng(seed, env_idx)
|
| 817 |
+
|
| 818 |
+
# TODO (stao): Reconfiguration when there is partial reset might not make sense and certainly broken here now.
|
| 819 |
+
# Solution to resolve that would be to ensure tasks that do reconfigure more than once are single-env only / cpu sim only
|
| 820 |
+
# or disable partial reset features explicitly for tasks that have a reconfiguration frequency
|
| 821 |
+
self.scene._reset_mask = torch.zeros(
|
| 822 |
+
self.num_envs, dtype=torch.bool, device=self.device
|
| 823 |
+
)
|
| 824 |
+
self.scene._reset_mask[env_idx] = True
|
| 825 |
+
self._elapsed_steps[env_idx] = 0
|
| 826 |
+
|
| 827 |
+
self._clear_sim_state()
|
| 828 |
+
if self.reconfiguration_freq != 0:
|
| 829 |
+
self._reconfig_counter -= 1
|
| 830 |
+
|
| 831 |
+
if self.agent is not None:
|
| 832 |
+
self.agent.reset()
|
| 833 |
+
|
| 834 |
+
if seed is not None or self._enhanced_determinism:
|
| 835 |
+
with torch.random.fork_rng():
|
| 836 |
+
torch.manual_seed(self._episode_seed[0])
|
| 837 |
+
self._initialize_episode(env_idx, options)
|
| 838 |
+
else:
|
| 839 |
+
self._initialize_episode(env_idx, options)
|
| 840 |
+
# reset the reset mask back to all ones so any internal code in maniskill can continue to manipulate all scenes at once as usual
|
| 841 |
+
self.scene._reset_mask = torch.ones(
|
| 842 |
+
self.num_envs, dtype=bool, device=self.device
|
| 843 |
+
)
|
| 844 |
+
if self.gpu_sim_enabled:
|
| 845 |
+
# ensure all updates to object poses and configurations are applied on GPU after task initialization
|
| 846 |
+
self.scene._gpu_apply_all()
|
| 847 |
+
self.scene.px.gpu_update_articulation_kinematics()
|
| 848 |
+
self.scene._gpu_fetch_all()
|
| 849 |
+
|
| 850 |
+
# we reset controllers here because some controllers depend on the agent/articulation qpos/poses
|
| 851 |
+
if self.agent is not None:
|
| 852 |
+
if isinstance(self.agent.controller, dict):
|
| 853 |
+
for controller in self.agent.controller.values():
|
| 854 |
+
controller.reset()
|
| 855 |
+
else:
|
| 856 |
+
self.agent.controller.reset()
|
| 857 |
+
|
| 858 |
+
info = self.get_info()
|
| 859 |
+
obs = self.get_obs(info)
|
| 860 |
+
|
| 861 |
+
info["reconfigure"] = reconfigure
|
| 862 |
+
return obs, info
|
| 863 |
+
|
| 864 |
+
def _set_main_rng(self, seed):
|
| 865 |
+
"""Set the main random generator which is only used to set the seed of the episode RNG to improve reproducibility.
|
| 866 |
+
|
| 867 |
+
Note that while _set_main_rng and _set_episode_rng are setting a seed and numpy random state, when using GPU sim
|
| 868 |
+
parallelization it is highly recommended to use torch random functions as they will make things run faster. The use
|
| 869 |
+
of torch random functions when building tasks in ManiSkill are automatically seeded via `torch.random.fork`
|
| 870 |
+
"""
|
| 871 |
+
if seed is None:
|
| 872 |
+
if self._main_seed is not None:
|
| 873 |
+
return
|
| 874 |
+
seed = np.random.RandomState().randint(2**31, size=(self.num_envs,))
|
| 875 |
+
if not np.iterable(seed):
|
| 876 |
+
seed = [seed]
|
| 877 |
+
self._main_seed = seed
|
| 878 |
+
self._main_rng = np.random.RandomState(self._main_seed[0])
|
| 879 |
+
if len(self._main_seed) == 1 and self.num_envs > 1:
|
| 880 |
+
self._main_seed = self._main_seed + np.random.RandomState(self._main_seed[0]).randint(2**31, size=(self.num_envs - 1,)).tolist()
|
| 881 |
+
self._batched_main_rng = BatchedRNG.from_seeds(self._main_seed, backend=self._batched_rng_backend)
|
| 882 |
+
|
| 883 |
+
def _set_episode_rng(self, seed: Union[None, list[int]], env_idx: torch.Tensor):
|
| 884 |
+
"""Set the random generator for current episode."""
|
| 885 |
+
if seed is not None or self._enhanced_determinism:
|
| 886 |
+
env_idx = common.to_numpy(env_idx)
|
| 887 |
+
if seed is None:
|
| 888 |
+
self._episode_seed[env_idx] = self._batched_main_rng[env_idx].randint(2**31)
|
| 889 |
+
else:
|
| 890 |
+
if not np.iterable(seed):
|
| 891 |
+
seed = [seed]
|
| 892 |
+
self._episode_seed = common.to_numpy(seed, dtype=np.int64)
|
| 893 |
+
if len(self._episode_seed) == 1 and self.num_envs > 1:
|
| 894 |
+
self._episode_seed = np.concatenate((self._episode_seed, np.random.RandomState(self._episode_seed[0]).randint(2**31, size=(self.num_envs - 1,))))
|
| 895 |
+
# we keep _episode_rng for backwards compatibility but recommend using _batched_episode_rng for randomization
|
| 896 |
+
if seed is not None or self._batched_episode_rng is None:
|
| 897 |
+
self._batched_episode_rng = BatchedRNG.from_seeds(self._episode_seed, backend=self._batched_rng_backend)
|
| 898 |
+
else:
|
| 899 |
+
self._batched_episode_rng[env_idx] = BatchedRNG.from_seeds(self._episode_seed[env_idx], backend=self._batched_rng_backend)
|
| 900 |
+
self._episode_rng = self._batched_episode_rng[0]
|
| 901 |
+
|
| 902 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 903 |
+
"""Initialize the episode, e.g., poses of actors and articulations, as well as task relevant data like randomizing
|
| 904 |
+
goal positions
|
| 905 |
+
"""
|
| 906 |
+
|
| 907 |
+
def _clear_sim_state(self):
|
| 908 |
+
"""Clear simulation state (velocities)"""
|
| 909 |
+
for actor in self.scene.actors.values():
|
| 910 |
+
if actor.px_body_type == "dynamic":
|
| 911 |
+
actor.set_linear_velocity(torch.zeros(3, device=self.device))
|
| 912 |
+
actor.set_angular_velocity(torch.zeros(3, device=self.device))
|
| 913 |
+
for articulation in self.scene.articulations.values():
|
| 914 |
+
articulation.set_qvel(torch.zeros(articulation.max_dof, device=self.device))
|
| 915 |
+
articulation.set_root_linear_velocity(torch.zeros(3, device=self.device))
|
| 916 |
+
articulation.set_root_angular_velocity(torch.zeros(3, device=self.device))
|
| 917 |
+
if self.gpu_sim_enabled:
|
| 918 |
+
self.scene._gpu_apply_all()
|
| 919 |
+
self.scene._gpu_fetch_all()
|
| 920 |
+
# TODO (stao): This may be an unnecessary fetch and apply.
|
| 921 |
+
|
| 922 |
+
# -------------------------------------------------------------------------- #
|
| 923 |
+
# Step
|
| 924 |
+
# -------------------------------------------------------------------------- #
|
| 925 |
+
|
| 926 |
+
def step(self, action: Union[None, np.ndarray, torch.Tensor, Dict]):
|
| 927 |
+
"""
|
| 928 |
+
Take a step through the environment with an action. Actions are automatically clipped to the action space.
|
| 929 |
+
|
| 930 |
+
If ``action`` is None, the environment will proceed forward in time without sending any actions/control signals to the agent
|
| 931 |
+
"""
|
| 932 |
+
action = self._step_action(action)
|
| 933 |
+
self._elapsed_steps += 1
|
| 934 |
+
info = self.get_info()
|
| 935 |
+
obs = self.get_obs(info)
|
| 936 |
+
reward = self.get_reward(obs=obs, action=action, info=info)
|
| 937 |
+
if "success" in info:
|
| 938 |
+
|
| 939 |
+
if "fail" in info:
|
| 940 |
+
terminated = torch.logical_or(info["success"], info["fail"])
|
| 941 |
+
else:
|
| 942 |
+
terminated = info["success"].clone()
|
| 943 |
+
else:
|
| 944 |
+
if "fail" in info:
|
| 945 |
+
terminated = info["fail"].clone()
|
| 946 |
+
else:
|
| 947 |
+
terminated = torch.zeros(self.num_envs, dtype=bool, device=self.device)
|
| 948 |
+
|
| 949 |
+
return (
|
| 950 |
+
obs,
|
| 951 |
+
reward,
|
| 952 |
+
terminated,
|
| 953 |
+
torch.zeros(self.num_envs, dtype=bool, device=self.device),
|
| 954 |
+
info,
|
| 955 |
+
)
|
| 956 |
+
|
| 957 |
+
def _step_action(
|
| 958 |
+
self, action: Union[None, np.ndarray, torch.Tensor, Dict]
|
| 959 |
+
) -> Union[None, torch.Tensor]:
|
| 960 |
+
set_action = False
|
| 961 |
+
action_is_unbatched = False
|
| 962 |
+
if action is None: # simulation without action
|
| 963 |
+
pass
|
| 964 |
+
elif isinstance(action, np.ndarray) or isinstance(action, torch.Tensor):
|
| 965 |
+
action = common.to_tensor(action, device=self.device)
|
| 966 |
+
if action.shape == self._orig_single_action_space.shape:
|
| 967 |
+
action_is_unbatched = True
|
| 968 |
+
set_action = True
|
| 969 |
+
elif isinstance(action, dict):
|
| 970 |
+
if "control_mode" in action:
|
| 971 |
+
if action["control_mode"] != self.agent.control_mode:
|
| 972 |
+
self.agent.set_control_mode(action["control_mode"])
|
| 973 |
+
self.agent.controller.reset()
|
| 974 |
+
action = common.to_tensor(action["action"], device=self.device)
|
| 975 |
+
if action.shape == self._orig_single_action_space.shape:
|
| 976 |
+
action_is_unbatched = True
|
| 977 |
+
else:
|
| 978 |
+
assert isinstance(
|
| 979 |
+
self.agent, MultiAgent
|
| 980 |
+
), "Received a dictionary for an action but there are not multiple robots in the environment"
|
| 981 |
+
# assume this is a multi-agent action
|
| 982 |
+
action = common.to_tensor(action, device=self.device)
|
| 983 |
+
for k, a in action.items():
|
| 984 |
+
if a.shape == self._orig_single_action_space[k].shape:
|
| 985 |
+
action_is_unbatched = True
|
| 986 |
+
break
|
| 987 |
+
set_action = True
|
| 988 |
+
else:
|
| 989 |
+
raise TypeError(type(action))
|
| 990 |
+
|
| 991 |
+
if set_action:
|
| 992 |
+
if self.num_envs == 1 and action_is_unbatched:
|
| 993 |
+
action = common.batch(action)
|
| 994 |
+
self.agent.set_action(action)
|
| 995 |
+
if self._sim_device.is_cuda():
|
| 996 |
+
self.scene.px.gpu_apply_articulation_target_position()
|
| 997 |
+
self.scene.px.gpu_apply_articulation_target_velocity()
|
| 998 |
+
self._before_control_step()
|
| 999 |
+
for _ in range(self._sim_steps_per_control):
|
| 1000 |
+
if self.agent is not None:
|
| 1001 |
+
self.agent.before_simulation_step()
|
| 1002 |
+
self._before_simulation_step()
|
| 1003 |
+
self.scene.step()
|
| 1004 |
+
self._after_simulation_step()
|
| 1005 |
+
self._after_control_step()
|
| 1006 |
+
if self.gpu_sim_enabled:
|
| 1007 |
+
self.scene._gpu_fetch_all()
|
| 1008 |
+
return action
|
| 1009 |
+
|
| 1010 |
+
def evaluate(self) -> dict:
|
| 1011 |
+
"""
|
| 1012 |
+
Evaluate whether the environment is currently in a success state by returning a dictionary with a "success" key or
|
| 1013 |
+
a failure state via a "fail" key
|
| 1014 |
+
|
| 1015 |
+
This function may also return additional data that has been computed (e.g. is the robot grasping some object) that may be
|
| 1016 |
+
reused when generating observations and rewards.
|
| 1017 |
+
|
| 1018 |
+
By default if not overriden this function returns an empty dictionary
|
| 1019 |
+
"""
|
| 1020 |
+
return dict()
|
| 1021 |
+
|
| 1022 |
+
def get_info(self) -> dict:
|
| 1023 |
+
"""
|
| 1024 |
+
Get info about the current environment state, include elapsed steps and evaluation information
|
| 1025 |
+
"""
|
| 1026 |
+
info = dict(
|
| 1027 |
+
elapsed_steps=self.elapsed_steps
|
| 1028 |
+
if not self.gpu_sim_enabled
|
| 1029 |
+
else self._elapsed_steps.clone()
|
| 1030 |
+
)
|
| 1031 |
+
info.update(self.evaluate())
|
| 1032 |
+
return info
|
| 1033 |
+
|
| 1034 |
+
def _before_control_step(self):
|
| 1035 |
+
"""Code that runs before each action has been taken via env.step(action).
|
| 1036 |
+
On GPU simulation this is called before observations are fetched from the GPU buffers."""
|
| 1037 |
+
def _after_control_step(self):
|
| 1038 |
+
"""Code that runs after each action has been taken.
|
| 1039 |
+
On GPU simulation this is called right before observations are fetched from the GPU buffers."""
|
| 1040 |
+
|
| 1041 |
+
def _before_simulation_step(self):
|
| 1042 |
+
"""Code to run right before each physx_system.step is called"""
|
| 1043 |
+
def _after_simulation_step(self):
|
| 1044 |
+
"""Code to run right after each physx_system.step is called"""
|
| 1045 |
+
|
| 1046 |
+
# -------------------------------------------------------------------------- #
|
| 1047 |
+
# Simulation and other gym interfaces
|
| 1048 |
+
# -------------------------------------------------------------------------- #
|
| 1049 |
+
def _set_scene_config(self):
|
| 1050 |
+
physx.set_shape_config(contact_offset=self.sim_config.scene_config.contact_offset, rest_offset=self.sim_config.scene_config.rest_offset)
|
| 1051 |
+
physx.set_body_config(solver_position_iterations=self.sim_config.scene_config.solver_position_iterations, solver_velocity_iterations=self.sim_config.scene_config.solver_velocity_iterations, sleep_threshold=self.sim_config.scene_config.sleep_threshold)
|
| 1052 |
+
physx.set_scene_config(gravity=self.sim_config.scene_config.gravity, bounce_threshold=self.sim_config.scene_config.bounce_threshold, enable_pcm=self.sim_config.scene_config.enable_pcm, enable_tgs=self.sim_config.scene_config.enable_tgs, enable_ccd=self.sim_config.scene_config.enable_ccd, enable_enhanced_determinism=self.sim_config.scene_config.enable_enhanced_determinism, enable_friction_every_iteration=self.sim_config.scene_config.enable_friction_every_iteration, cpu_workers=self.sim_config.scene_config.cpu_workers )
|
| 1053 |
+
physx.set_default_material(**self.sim_config.default_materials_config.dict())
|
| 1054 |
+
|
| 1055 |
+
def _setup_scene(self):
|
| 1056 |
+
"""Setup the simulation scene instance.
|
| 1057 |
+
The function should be called in reset(). Called by `self._reconfigure`"""
|
| 1058 |
+
self._set_scene_config()
|
| 1059 |
+
if self._sim_device.is_cuda():
|
| 1060 |
+
physx_system = physx.PhysxGpuSystem(device=self._sim_device)
|
| 1061 |
+
# Create the scenes in a square grid
|
| 1062 |
+
sub_scenes = []
|
| 1063 |
+
scene_grid_length = int(np.ceil(np.sqrt(self.num_envs)))
|
| 1064 |
+
for scene_idx in range(self.num_envs):
|
| 1065 |
+
scene_x, scene_y = (
|
| 1066 |
+
scene_idx % scene_grid_length - scene_grid_length // 2,
|
| 1067 |
+
scene_idx // scene_grid_length - scene_grid_length // 2,
|
| 1068 |
+
)
|
| 1069 |
+
scene = sapien.Scene(
|
| 1070 |
+
systems=[physx_system, sapien.render.RenderSystem(self._render_device)]
|
| 1071 |
+
)
|
| 1072 |
+
physx_system.set_scene_offset(
|
| 1073 |
+
scene,
|
| 1074 |
+
[
|
| 1075 |
+
scene_x * self.sim_config.spacing,
|
| 1076 |
+
scene_y * self.sim_config.spacing,
|
| 1077 |
+
0,
|
| 1078 |
+
],
|
| 1079 |
+
)
|
| 1080 |
+
sub_scenes.append(scene)
|
| 1081 |
+
else:
|
| 1082 |
+
physx_system = physx.PhysxCpuSystem()
|
| 1083 |
+
sub_scenes = [
|
| 1084 |
+
sapien.Scene([physx_system, sapien.render.RenderSystem(self._render_device)])
|
| 1085 |
+
]
|
| 1086 |
+
# create a "global" scene object that users can work with that is linked with all other scenes created
|
| 1087 |
+
self.scene = ManiSkillScene(
|
| 1088 |
+
sub_scenes,
|
| 1089 |
+
sim_config=self.sim_config,
|
| 1090 |
+
device=self.device,
|
| 1091 |
+
parallel_in_single_scene=self._parallel_in_single_scene,
|
| 1092 |
+
backend=self.backend
|
| 1093 |
+
)
|
| 1094 |
+
self.scene.px.timestep = 1.0 / self._sim_freq
|
| 1095 |
+
|
| 1096 |
+
def _clear(self):
|
| 1097 |
+
"""Clear the simulation scene instance and other buffers.
|
| 1098 |
+
The function can be called in reset() before a new scene is created.
|
| 1099 |
+
Called by `self._reconfigure` and when the environment is closed/deleted
|
| 1100 |
+
"""
|
| 1101 |
+
self._close_viewer()
|
| 1102 |
+
self.agent = None
|
| 1103 |
+
self._sensors = dict()
|
| 1104 |
+
self._human_render_cameras = dict()
|
| 1105 |
+
self.scene = None
|
| 1106 |
+
self._hidden_objects = []
|
| 1107 |
+
gc.collect() # force gc to collect which releases most GPU memory
|
| 1108 |
+
|
| 1109 |
+
def close(self):
|
| 1110 |
+
self._clear()
|
| 1111 |
+
|
| 1112 |
+
def _close_viewer(self):
|
| 1113 |
+
if self._viewer is None:
|
| 1114 |
+
return
|
| 1115 |
+
self._viewer.close()
|
| 1116 |
+
self._viewer = None
|
| 1117 |
+
|
| 1118 |
+
@cached_property
|
| 1119 |
+
def segmentation_id_map(self):
|
| 1120 |
+
"""
|
| 1121 |
+
Returns a dictionary mapping every ID to the appropriate Actor or Link object
|
| 1122 |
+
"""
|
| 1123 |
+
res = dict()
|
| 1124 |
+
for actor in self.scene.actors.values():
|
| 1125 |
+
res[actor._objs[0].per_scene_id] = actor
|
| 1126 |
+
for art in self.scene.articulations.values():
|
| 1127 |
+
for link in art.links:
|
| 1128 |
+
res[link._objs[0].entity.per_scene_id] = link
|
| 1129 |
+
return res
|
| 1130 |
+
|
| 1131 |
+
def add_to_state_dict_registry(self, object: Union[Actor, Articulation]):
|
| 1132 |
+
self.scene.add_to_state_dict_registry(object)
|
| 1133 |
+
def remove_from_state_dict_registry(self, object: Union[Actor, Articulation]):
|
| 1134 |
+
self.scene.remove_from_state_dict_registry(object)
|
| 1135 |
+
|
| 1136 |
+
def get_state_dict(self):
|
| 1137 |
+
"""
|
| 1138 |
+
Get environment state dictionary. Override to include task information (e.g., goal)
|
| 1139 |
+
"""
|
| 1140 |
+
return self.scene.get_sim_state()
|
| 1141 |
+
|
| 1142 |
+
def get_state(self):
|
| 1143 |
+
"""
|
| 1144 |
+
Get environment state as a flat vector, which is just a ordered flattened version of the state_dict.
|
| 1145 |
+
|
| 1146 |
+
Users should not override this function
|
| 1147 |
+
"""
|
| 1148 |
+
return common.flatten_state_dict(self.get_state_dict(), use_torch=True)
|
| 1149 |
+
|
| 1150 |
+
def set_state_dict(self, state: Dict, env_idx: torch.Tensor = None):
|
| 1151 |
+
"""
|
| 1152 |
+
Set environment state with a state dictionary. Override to include task information (e.g., goal)
|
| 1153 |
+
|
| 1154 |
+
Note that it is recommended to keep around state dictionaries as opposed to state vectors. With state vectors we assume
|
| 1155 |
+
the order of data in the vector is the same exact order that would be returned by flattening the state dictionary you get from
|
| 1156 |
+
`env.get_state_dict()` or the result of `env.get_state()`
|
| 1157 |
+
"""
|
| 1158 |
+
self.scene.set_sim_state(state, env_idx)
|
| 1159 |
+
if self.gpu_sim_enabled:
|
| 1160 |
+
self.scene._gpu_apply_all()
|
| 1161 |
+
self.scene.px.gpu_update_articulation_kinematics()
|
| 1162 |
+
self.scene._gpu_fetch_all()
|
| 1163 |
+
|
| 1164 |
+
def set_state(self, state: Array, env_idx: torch.Tensor = None):
|
| 1165 |
+
"""
|
| 1166 |
+
Set environment state with a flat state vector. Internally this reconstructs the state dictionary and calls `env.set_state_dict`
|
| 1167 |
+
|
| 1168 |
+
Users should not override this function
|
| 1169 |
+
"""
|
| 1170 |
+
state_dict = dict()
|
| 1171 |
+
state_dict["actors"] = dict()
|
| 1172 |
+
state_dict["articulations"] = dict()
|
| 1173 |
+
KINEMATIC_DIM = 13 # [pos, quat, lin_vel, ang_vel]
|
| 1174 |
+
start = 0
|
| 1175 |
+
for actor_id in self._init_raw_state["actors"].keys():
|
| 1176 |
+
state_dict["actors"][actor_id] = state[:, start : start + KINEMATIC_DIM]
|
| 1177 |
+
start += KINEMATIC_DIM
|
| 1178 |
+
for art_id, art_state in self._init_raw_state["articulations"].items():
|
| 1179 |
+
size = art_state.shape[-1]
|
| 1180 |
+
state_dict["articulations"][art_id] = state[:, start : start + size]
|
| 1181 |
+
start += size
|
| 1182 |
+
self.set_state_dict(state_dict, env_idx)
|
| 1183 |
+
|
| 1184 |
+
# -------------------------------------------------------------------------- #
|
| 1185 |
+
# Visualization
|
| 1186 |
+
# -------------------------------------------------------------------------- #
|
| 1187 |
+
@property
|
| 1188 |
+
def viewer(self):
|
| 1189 |
+
return self._viewer
|
| 1190 |
+
|
| 1191 |
+
def _setup_viewer(self):
|
| 1192 |
+
"""Setup the interactive viewer.
|
| 1193 |
+
|
| 1194 |
+
The function should be called after a new scene is configured.
|
| 1195 |
+
In subclasses, this function can be overridden to set viewer cameras.
|
| 1196 |
+
|
| 1197 |
+
Called by `self._reconfigure`
|
| 1198 |
+
"""
|
| 1199 |
+
self._viewer.set_scene(self.scene.sub_scenes[0])
|
| 1200 |
+
control_window: sapien.utils.viewer.control_window.ControlWindow = (
|
| 1201 |
+
sapien_utils.get_obj_by_type(
|
| 1202 |
+
self._viewer.plugins, sapien.utils.viewer.control_window.ControlWindow
|
| 1203 |
+
)
|
| 1204 |
+
)
|
| 1205 |
+
control_window.show_joint_axes = False
|
| 1206 |
+
control_window.show_camera_linesets = False
|
| 1207 |
+
if "render_camera" in self._human_render_cameras:
|
| 1208 |
+
self._viewer.set_camera_pose(
|
| 1209 |
+
self._human_render_cameras["render_camera"].camera.global_pose[0].sp
|
| 1210 |
+
)
|
| 1211 |
+
|
| 1212 |
+
def render_human(self):
|
| 1213 |
+
"""render the environment by opening a GUI viewer. This also returns the viewer object. Any objects registered in the _hidden_objects list will be shown"""
|
| 1214 |
+
for obj in self._hidden_objects:
|
| 1215 |
+
obj.show_visual()
|
| 1216 |
+
if self._viewer is None:
|
| 1217 |
+
self._viewer = create_viewer(self._viewer_camera_config)
|
| 1218 |
+
self._setup_viewer()
|
| 1219 |
+
if self.gpu_sim_enabled and self.scene._gpu_sim_initialized:
|
| 1220 |
+
self.scene.px.sync_poses_gpu_to_cpu()
|
| 1221 |
+
self._viewer.render()
|
| 1222 |
+
for obj in self._hidden_objects:
|
| 1223 |
+
obj.hide_visual()
|
| 1224 |
+
return self._viewer
|
| 1225 |
+
|
| 1226 |
+
def render_rgb_array(self, camera_name: str = None):
|
| 1227 |
+
"""Returns an RGB array / image of size (num_envs, H, W, 3) of the current state of the environment.
|
| 1228 |
+
This is captured by any of the registered human render cameras. If a camera_name is given, only data from that camera is returned.
|
| 1229 |
+
Otherwise all camera data is captured and returned as a single batched image. Any objects registered in the _hidden_objects list will be shown"""
|
| 1230 |
+
for obj in self._hidden_objects:
|
| 1231 |
+
obj.show_visual()
|
| 1232 |
+
self.scene.update_render(update_sensors=False, update_human_render_cameras=True)
|
| 1233 |
+
images = []
|
| 1234 |
+
render_images = self.scene.get_human_render_camera_images(camera_name)
|
| 1235 |
+
for image in render_images.values():
|
| 1236 |
+
images.append(image)
|
| 1237 |
+
if len(images) == 0:
|
| 1238 |
+
return None
|
| 1239 |
+
if len(images) == 1:
|
| 1240 |
+
return images[0]
|
| 1241 |
+
for obj in self._hidden_objects:
|
| 1242 |
+
obj.hide_visual()
|
| 1243 |
+
return tile_images(images)
|
| 1244 |
+
|
| 1245 |
+
def render_sensors(self):
|
| 1246 |
+
"""
|
| 1247 |
+
Renders all sensors that the agent can use and see and displays them in a human readable image format.
|
| 1248 |
+
Any objects registered in the _hidden_objects list will not be shown
|
| 1249 |
+
"""
|
| 1250 |
+
images = []
|
| 1251 |
+
sensor_images = self.get_sensor_images()
|
| 1252 |
+
for image in sensor_images.values():
|
| 1253 |
+
for img in image.values():
|
| 1254 |
+
images.append(img)
|
| 1255 |
+
return tile_images(images)
|
| 1256 |
+
|
| 1257 |
+
def render_all(self):
|
| 1258 |
+
"""Renders all human render cameras and sensors together"""
|
| 1259 |
+
images = []
|
| 1260 |
+
for obj in self._hidden_objects:
|
| 1261 |
+
obj.show_visual()
|
| 1262 |
+
self.scene.update_render(update_sensors=True, update_human_render_cameras=True)
|
| 1263 |
+
render_images = self.scene.get_human_render_camera_images()
|
| 1264 |
+
# note that get_sensor_images function will update the render and hide objects itself
|
| 1265 |
+
sensor_images = self.get_sensor_images()
|
| 1266 |
+
for image in render_images.values():
|
| 1267 |
+
images.append(image)
|
| 1268 |
+
for image in sensor_images.values():
|
| 1269 |
+
for img in image.values():
|
| 1270 |
+
images.append(img)
|
| 1271 |
+
return tile_images(images)
|
| 1272 |
+
|
| 1273 |
+
def render(self):
|
| 1274 |
+
"""
|
| 1275 |
+
Either opens a viewer if ``self.render_mode`` is "human", or returns an array that you can use to save videos.
|
| 1276 |
+
|
| 1277 |
+
If ``self.render_mode`` is "rgb_array", usually a higher quality image is rendered for the purpose of viewing only.
|
| 1278 |
+
|
| 1279 |
+
If ``self.render_mode`` is "sensors", all visual observations the agent can see is provided
|
| 1280 |
+
|
| 1281 |
+
If ``self.render_mode`` is "all", this is then a combination of "rgb_array" and "sensors"
|
| 1282 |
+
"""
|
| 1283 |
+
if self.render_mode is None:
|
| 1284 |
+
raise RuntimeError("render_mode is not set.")
|
| 1285 |
+
if self.render_mode == "human":
|
| 1286 |
+
return self.render_human()
|
| 1287 |
+
elif self.render_mode == "rgb_array":
|
| 1288 |
+
res = self.render_rgb_array()
|
| 1289 |
+
return res
|
| 1290 |
+
elif self.render_mode == "sensors":
|
| 1291 |
+
res = self.render_sensors()
|
| 1292 |
+
return res
|
| 1293 |
+
elif self.render_mode == "all":
|
| 1294 |
+
return self.render_all()
|
| 1295 |
+
else:
|
| 1296 |
+
raise NotImplementedError(f"Unsupported render mode {self.render_mode}.")
|
| 1297 |
+
|
| 1298 |
+
# TODO (stao): re implement later
|
| 1299 |
+
# ---------------------------------------------------------------------------- #
|
| 1300 |
+
# Advanced
|
| 1301 |
+
# ---------------------------------------------------------------------------- #
|
| 1302 |
+
|
| 1303 |
+
# def gen_scene_pcd(self, num_points: int = int(1e5)) -> np.ndarray:
|
| 1304 |
+
# """Generate scene point cloud for motion planning, excluding the robot"""
|
| 1305 |
+
# meshes = []
|
| 1306 |
+
# articulations = self.scene.get_all_articulations()
|
| 1307 |
+
# if self.agent is not None:
|
| 1308 |
+
# articulations.pop(articulations.index(self.agent.robot))
|
| 1309 |
+
# for articulation in articulations:
|
| 1310 |
+
# articulation_mesh = merge_meshes(get_articulation_meshes(articulation))
|
| 1311 |
+
# if articulation_mesh:
|
| 1312 |
+
# meshes.append(articulation_mesh)
|
| 1313 |
+
|
| 1314 |
+
# for actor in self.scene.get_all_actors():
|
| 1315 |
+
# actor_mesh = merge_meshes(get_component_meshes(actor))
|
| 1316 |
+
# if actor_mesh:
|
| 1317 |
+
# meshes.append(
|
| 1318 |
+
# actor_mesh.apply_transform(
|
| 1319 |
+
# actor.get_pose().to_transformation_matrix()
|
| 1320 |
+
# )
|
| 1321 |
+
# )
|
| 1322 |
+
|
| 1323 |
+
# scene_mesh = merge_meshes(meshes)
|
| 1324 |
+
# scene_pcd = scene_mesh.sample(num_points)
|
| 1325 |
+
# return scene_pcd
|
| 1326 |
+
|
| 1327 |
+
|
| 1328 |
+
# Printing metrics/info
|
| 1329 |
+
def print_sim_details(self):
|
| 1330 |
+
"""Debug tool to call to simply print a bunch of details about the running environment, including the task ID, number of environments, sim backend, etc."""
|
| 1331 |
+
sensor_settings_str = []
|
| 1332 |
+
for uid, cam in self._sensors.items():
|
| 1333 |
+
if isinstance(cam, Camera):
|
| 1334 |
+
config = cam.config
|
| 1335 |
+
sensor_settings_str.append(f"RGBD({config.width}x{config.height})")
|
| 1336 |
+
sensor_settings_str = ", ".join(sensor_settings_str)
|
| 1337 |
+
sim_backend = self.backend.sim_backend
|
| 1338 |
+
print(
|
| 1339 |
+
"# -------------------------------------------------------------------------- #"
|
| 1340 |
+
)
|
| 1341 |
+
print(
|
| 1342 |
+
f"Task ID: {self.spec.id}, {self.num_envs} parallel environments, sim_backend={sim_backend}"
|
| 1343 |
+
)
|
| 1344 |
+
print(
|
| 1345 |
+
f"obs_mode={self.obs_mode}, control_mode={self.control_mode}"
|
| 1346 |
+
)
|
| 1347 |
+
print(
|
| 1348 |
+
f"render_mode={self.render_mode}, sensor_details={sensor_settings_str}"
|
| 1349 |
+
)
|
| 1350 |
+
print(
|
| 1351 |
+
f"sim_freq={self.sim_freq}, control_freq={self.control_freq}"
|
| 1352 |
+
)
|
| 1353 |
+
print(f"observation space: {self.observation_space}")
|
| 1354 |
+
print(f"(single) action space: {self.single_action_space}")
|
| 1355 |
+
print(
|
| 1356 |
+
"# -------------------------------------------------------------------------- #"
|
| 1357 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/scene.py
ADDED
|
@@ -0,0 +1,1163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from functools import cached_property
|
| 3 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import sapien
|
| 7 |
+
import sapien.physx as physx
|
| 8 |
+
import sapien.render
|
| 9 |
+
import torch
|
| 10 |
+
from sapien.render import RenderCameraComponent
|
| 11 |
+
|
| 12 |
+
from mani_skill.envs.utils.system.backend import BackendInfo
|
| 13 |
+
from mani_skill.render import SAPIEN_RENDER_SYSTEM
|
| 14 |
+
from mani_skill.sensors.base_sensor import BaseSensor
|
| 15 |
+
from mani_skill.sensors.camera import Camera
|
| 16 |
+
from mani_skill.utils import common, sapien_utils
|
| 17 |
+
from mani_skill.utils.structs.actor import Actor
|
| 18 |
+
from mani_skill.utils.structs.articulation import Articulation
|
| 19 |
+
from mani_skill.utils.structs.drive import Drive
|
| 20 |
+
from mani_skill.utils.structs.link import Link
|
| 21 |
+
from mani_skill.utils.structs.pose import Pose
|
| 22 |
+
from mani_skill.utils.structs.render_camera import RenderCamera
|
| 23 |
+
from mani_skill.utils.structs.types import Array, Device, SimConfig
|
| 24 |
+
|
| 25 |
+
# try and determine which render system is used by the installed sapien package
|
| 26 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 27 |
+
from sapien.wrapper.scene import get_camera_shader_pack
|
| 28 |
+
|
| 29 |
+
GlobalShaderPack = None
|
| 30 |
+
sapien.render.RenderCameraGroup = "oldtype" # type: ignore
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class StateDictRegistry:
|
| 35 |
+
actors: Dict[str, Actor]
|
| 36 |
+
articulations: Dict[str, Articulation]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ManiSkillScene:
|
| 40 |
+
"""
|
| 41 |
+
Class that manages a list of sub-scenes (sapien.Scene). In CPU simulation there should only be one sub-scene.
|
| 42 |
+
In GPU simulation, there can be many sub-scenes, and this wrapper ensures that use calls to many of the original sapien.Scene API
|
| 43 |
+
are applied to all sub-scenes. This includes calls to change object poses, velocities, drive targets etc.
|
| 44 |
+
|
| 45 |
+
This wrapper also helps manage GPU states if GPU simulation is used
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
sub_scenes: Optional[List[sapien.Scene]] = None,
|
| 51 |
+
sim_config: SimConfig = SimConfig(),
|
| 52 |
+
debug_mode: bool = True,
|
| 53 |
+
device: Device = None,
|
| 54 |
+
parallel_in_single_scene: bool = False,
|
| 55 |
+
backend: BackendInfo = None,
|
| 56 |
+
):
|
| 57 |
+
if sub_scenes is None:
|
| 58 |
+
sub_scenes = [sapien.Scene()]
|
| 59 |
+
self.sub_scenes = sub_scenes
|
| 60 |
+
self.px: Union[physx.PhysxCpuSystem, physx.PhysxGpuSystem] = self.sub_scenes[
|
| 61 |
+
0
|
| 62 |
+
].physx_system
|
| 63 |
+
assert all(
|
| 64 |
+
isinstance(s.physx_system, type(self.px)) for s in self.sub_scenes
|
| 65 |
+
), "all sub-scenes must use the same simulation backend"
|
| 66 |
+
self.gpu_sim_enabled = (
|
| 67 |
+
True if isinstance(self.px, physx.PhysxGpuSystem) else False
|
| 68 |
+
)
|
| 69 |
+
"""whether the sub scenes are using the GPU or CPU backend"""
|
| 70 |
+
self.sim_config = sim_config
|
| 71 |
+
self._gpu_sim_initialized = False
|
| 72 |
+
self.debug_mode = debug_mode
|
| 73 |
+
self.device = device
|
| 74 |
+
self.backend = backend # references the backend object stored in BaseEnv class
|
| 75 |
+
|
| 76 |
+
self.render_system_group: sapien.render.RenderSystemGroup = None
|
| 77 |
+
self.camera_groups: Dict[str, sapien.render.RenderCameraGroup] = dict()
|
| 78 |
+
|
| 79 |
+
self.actors: Dict[str, Actor] = dict()
|
| 80 |
+
self.articulations: Dict[str, Articulation] = dict()
|
| 81 |
+
|
| 82 |
+
self.actor_views: Dict[str, Actor] = dict()
|
| 83 |
+
"""views of actors in any sub-scenes created by using Actor.merge and queryable as if it were a single Actor"""
|
| 84 |
+
self.articulation_views: Dict[str, Articulation] = dict()
|
| 85 |
+
"""views of articulations in any sub-scenes created by using Articulation.merge and queryable as if it were a single Articulation"""
|
| 86 |
+
|
| 87 |
+
self.sensors: Dict[str, BaseSensor] = dict()
|
| 88 |
+
self.human_render_cameras: Dict[str, Camera] = dict()
|
| 89 |
+
self._sensors_initialized = False
|
| 90 |
+
self._human_render_cameras_initialized = False
|
| 91 |
+
|
| 92 |
+
self._reset_mask = torch.ones(len(sub_scenes), dtype=bool, device=self.device)
|
| 93 |
+
"""Used internally by various objects like Actor, Link, and Controllers to auto mask out sub-scenes so they do not get modified during
|
| 94 |
+
partial env resets"""
|
| 95 |
+
|
| 96 |
+
self._needs_fetch = False
|
| 97 |
+
"""Used internally to raise some errors ahead of time of when there may be undefined behaviors"""
|
| 98 |
+
|
| 99 |
+
self.pairwise_contact_queries: Dict[
|
| 100 |
+
str, physx.PhysxGpuContactPairImpulseQuery
|
| 101 |
+
] = dict()
|
| 102 |
+
"""dictionary mapping pairwise contact query keys to GPU contact queries. Used in GPU simulation only to cache queries as
|
| 103 |
+
query creation will pause any GPU sim computation"""
|
| 104 |
+
self._pairwise_contact_query_unique_hashes: Dict[str, int] = dict()
|
| 105 |
+
"""maps keys in self.pairwise_contact_queries to unique hashes dependent on the actual objects involved in the query.
|
| 106 |
+
This is used to determine automatically when to rebuild contact queries as keys for self.pairwise_contact_queries are kept
|
| 107 |
+
non-unique between episode resets in order to be easily rebuilt and deallocate old queries. This essentially acts as a way
|
| 108 |
+
to invalidate the cached queries."""
|
| 109 |
+
|
| 110 |
+
self.parallel_in_single_scene: bool = parallel_in_single_scene
|
| 111 |
+
"""Whether rendering all parallel scenes in the viewer/gui is enabled"""
|
| 112 |
+
|
| 113 |
+
self.state_dict_registry: StateDictRegistry = StateDictRegistry(
|
| 114 |
+
actors=dict(), articulations=dict()
|
| 115 |
+
)
|
| 116 |
+
"""state dict registry that map actor/articulation names to Actor/Articulation struct references. Only these structs are used for the environment state"""
|
| 117 |
+
|
| 118 |
+
# -------------------------------------------------------------------------- #
|
| 119 |
+
# Functions from sapien.Scene
|
| 120 |
+
# -------------------------------------------------------------------------- #
|
| 121 |
+
@property
|
| 122 |
+
def timestep(self):
|
| 123 |
+
"""The current simulation timestep"""
|
| 124 |
+
return self.px.timestep
|
| 125 |
+
|
| 126 |
+
@timestep.setter
|
| 127 |
+
def timestep(self, timestep):
|
| 128 |
+
self.px.timestep = timestep
|
| 129 |
+
|
| 130 |
+
def set_timestep(self, timestep):
|
| 131 |
+
"""Sets the current simulation timestep"""
|
| 132 |
+
self.timestep = timestep
|
| 133 |
+
|
| 134 |
+
def get_timestep(self):
|
| 135 |
+
"""Returns the current simulation timestep"""
|
| 136 |
+
return self.timestep
|
| 137 |
+
|
| 138 |
+
def create_actor_builder(self):
|
| 139 |
+
"""Creates an ActorBuilder object that can be used to build actors in this scene"""
|
| 140 |
+
from ..utils.building.actor_builder import ActorBuilder
|
| 141 |
+
|
| 142 |
+
return ActorBuilder().set_scene(self)
|
| 143 |
+
|
| 144 |
+
def create_articulation_builder(self):
|
| 145 |
+
"""Creates an ArticulationBuilder object that can be used to build articulations in this scene"""
|
| 146 |
+
from ..utils.building.articulation_builder import ArticulationBuilder
|
| 147 |
+
|
| 148 |
+
return ArticulationBuilder().set_scene(self)
|
| 149 |
+
|
| 150 |
+
def create_urdf_loader(self):
|
| 151 |
+
"""Creates a URDFLoader object that can be used to load URDF files into this scene"""
|
| 152 |
+
from ..utils.building.urdf_loader import URDFLoader
|
| 153 |
+
|
| 154 |
+
loader = URDFLoader()
|
| 155 |
+
loader.set_scene(self)
|
| 156 |
+
return loader
|
| 157 |
+
|
| 158 |
+
def create_mjcf_loader(self):
|
| 159 |
+
"""Creates a MJCFLoader object that can be used to load MJCF files into this scene"""
|
| 160 |
+
from ..utils.building.mjcf_loader import MJCFLoader
|
| 161 |
+
|
| 162 |
+
loader = MJCFLoader()
|
| 163 |
+
loader.set_scene(self)
|
| 164 |
+
return loader
|
| 165 |
+
|
| 166 |
+
# def create_physical_material(
|
| 167 |
+
# self, static_friction: float, dynamic_friction: float, restitution: float
|
| 168 |
+
# ):
|
| 169 |
+
# return physx.PhysxMaterial(static_friction, dynamic_friction, restitution)
|
| 170 |
+
|
| 171 |
+
def remove_actor(self, actor: Actor):
|
| 172 |
+
"""Removes an actor from the scene. Only works in CPU simulation."""
|
| 173 |
+
if self.gpu_sim_enabled:
|
| 174 |
+
raise NotImplementedError(
|
| 175 |
+
"Cannot remove actors after creating them in GPU sim at the moment"
|
| 176 |
+
)
|
| 177 |
+
else:
|
| 178 |
+
self.sub_scenes[0].remove_entity(actor._objs[0].entity)
|
| 179 |
+
self.actors.pop(actor.name)
|
| 180 |
+
|
| 181 |
+
def remove_articulation(self, articulation: Articulation):
|
| 182 |
+
"""Removes an articulation from the scene. Only works in CPU simulation."""
|
| 183 |
+
if self.gpu_sim_enabled:
|
| 184 |
+
raise NotImplementedError(
|
| 185 |
+
"Cannot remove articulations after creating them in GPU sim at the moment"
|
| 186 |
+
)
|
| 187 |
+
else:
|
| 188 |
+
entities = [l.entity for l in articulation._objs[0].links]
|
| 189 |
+
for e in entities:
|
| 190 |
+
self.sub_scenes[0].remove_entity(e)
|
| 191 |
+
self.articulations.pop(articulation.name)
|
| 192 |
+
|
| 193 |
+
def add_camera(
|
| 194 |
+
self,
|
| 195 |
+
name,
|
| 196 |
+
pose,
|
| 197 |
+
width,
|
| 198 |
+
height,
|
| 199 |
+
near,
|
| 200 |
+
far,
|
| 201 |
+
fovy: Union[float, List, None] = None,
|
| 202 |
+
intrinsic: Union[Array, None] = None,
|
| 203 |
+
mount: Union[Actor, Link, None] = None,
|
| 204 |
+
) -> RenderCamera:
|
| 205 |
+
"""Add's a (mounted) camera to the scene"""
|
| 206 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 207 |
+
return self._sapien_31_add_camera(
|
| 208 |
+
name, pose, width, height, near, far, fovy, intrinsic, mount
|
| 209 |
+
)
|
| 210 |
+
else:
|
| 211 |
+
return self._sapien_add_camera(
|
| 212 |
+
name, pose, width, height, near, far, fovy, intrinsic, mount
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
def _sapien_add_camera(
|
| 216 |
+
self,
|
| 217 |
+
name,
|
| 218 |
+
pose,
|
| 219 |
+
width,
|
| 220 |
+
height,
|
| 221 |
+
near,
|
| 222 |
+
far,
|
| 223 |
+
fovy: Union[float, List, None] = None,
|
| 224 |
+
intrinsic: Union[Array, None] = None,
|
| 225 |
+
mount: Union[Actor, Link, None] = None,
|
| 226 |
+
) -> RenderCamera:
|
| 227 |
+
"""internal helper function to add (mounted) cameras"""
|
| 228 |
+
cameras = []
|
| 229 |
+
pose = Pose.create(pose)
|
| 230 |
+
# TODO (stao): support scene idxs property for cameras in the future
|
| 231 |
+
# move intrinsic to np and batch intrinsic if it is not batched
|
| 232 |
+
if intrinsic is not None:
|
| 233 |
+
intrinsic = common.to_numpy(intrinsic)
|
| 234 |
+
if len(intrinsic.shape) == 2:
|
| 235 |
+
intrinsic = intrinsic[None, :]
|
| 236 |
+
if len(self.sub_scenes) > 1:
|
| 237 |
+
# repeat the intrinsic along batch dim
|
| 238 |
+
intrinsic = intrinsic.repeat(len(self.sub_scenes), 0)
|
| 239 |
+
assert len(intrinsic) == len(
|
| 240 |
+
self.sub_scenes
|
| 241 |
+
), "intrinsic matrix batch dim not equal to the number of sub-scenes"
|
| 242 |
+
for i, scene in enumerate(self.sub_scenes):
|
| 243 |
+
# Create camera component
|
| 244 |
+
camera = RenderCameraComponent(width, height)
|
| 245 |
+
if fovy is not None:
|
| 246 |
+
if isinstance(fovy, float) or isinstance(fovy, int):
|
| 247 |
+
camera.set_fovy(fovy, compute_x=True)
|
| 248 |
+
else:
|
| 249 |
+
camera.set_fovy(fovy[i], compute_x=True)
|
| 250 |
+
elif intrinsic is not None:
|
| 251 |
+
camera.set_focal_lengths(intrinsic[i, 0, 0], intrinsic[i, 1, 1])
|
| 252 |
+
camera.set_principal_point(intrinsic[i, 0, 2], intrinsic[i, 1, 2])
|
| 253 |
+
if isinstance(near, float) or isinstance(near, int):
|
| 254 |
+
camera.near = near
|
| 255 |
+
else:
|
| 256 |
+
camera.near = near[i]
|
| 257 |
+
if isinstance(far, float) or isinstance(far, int):
|
| 258 |
+
camera.far = far
|
| 259 |
+
else:
|
| 260 |
+
camera.far = far[i]
|
| 261 |
+
|
| 262 |
+
# mount camera to actor/link
|
| 263 |
+
if mount is not None:
|
| 264 |
+
if self.gpu_sim_enabled:
|
| 265 |
+
if isinstance(mount, Actor):
|
| 266 |
+
camera.set_gpu_pose_batch_index(
|
| 267 |
+
mount._objs[i]
|
| 268 |
+
.find_component_by_type(physx.PhysxRigidBodyComponent)
|
| 269 |
+
.gpu_pose_index
|
| 270 |
+
)
|
| 271 |
+
elif isinstance(mount, Link):
|
| 272 |
+
camera.set_gpu_pose_batch_index(mount._objs[i].gpu_pose_index)
|
| 273 |
+
else:
|
| 274 |
+
raise ValueError(
|
| 275 |
+
f"Tried to mount camera on object of type {mount.__class__}"
|
| 276 |
+
)
|
| 277 |
+
if isinstance(mount, Link):
|
| 278 |
+
mount._objs[i].entity.add_component(camera)
|
| 279 |
+
else:
|
| 280 |
+
mount._objs[i].add_component(camera)
|
| 281 |
+
else:
|
| 282 |
+
camera_mount = sapien.Entity()
|
| 283 |
+
camera_mount.add_component(camera)
|
| 284 |
+
scene.add_entity(camera_mount)
|
| 285 |
+
camera_mount.name = f"scene-{i}_{name}"
|
| 286 |
+
if len(pose) == 1:
|
| 287 |
+
camera.local_pose = pose.sp
|
| 288 |
+
else:
|
| 289 |
+
camera.local_pose = pose[i].sp
|
| 290 |
+
camera.name = f"scene-{i}_{name}"
|
| 291 |
+
cameras.append(camera)
|
| 292 |
+
return RenderCamera.create(cameras, self, mount=mount)
|
| 293 |
+
|
| 294 |
+
def _sapien_31_add_camera(
|
| 295 |
+
self,
|
| 296 |
+
name,
|
| 297 |
+
pose,
|
| 298 |
+
width,
|
| 299 |
+
height,
|
| 300 |
+
near,
|
| 301 |
+
far,
|
| 302 |
+
fovy: Union[float, List, None] = None,
|
| 303 |
+
intrinsic: Union[Array, None] = None,
|
| 304 |
+
mount: Union[Actor, Link, None] = None,
|
| 305 |
+
) -> RenderCamera:
|
| 306 |
+
"""internal helper function to add (mounted) cameras"""
|
| 307 |
+
cameras = []
|
| 308 |
+
pose = Pose.create(pose)
|
| 309 |
+
# TODO (stao): support scene idxs property for cameras in the future
|
| 310 |
+
# move intrinsic to np and batch intrinsic if it is not batched
|
| 311 |
+
if intrinsic is not None:
|
| 312 |
+
intrinsic = common.to_numpy(intrinsic)
|
| 313 |
+
if len(intrinsic.shape) == 2:
|
| 314 |
+
intrinsic = intrinsic[None, :]
|
| 315 |
+
if len(self.sub_scenes) > 1:
|
| 316 |
+
# repeat the intrinsic along batch dim
|
| 317 |
+
intrinsic = intrinsic.repeat(len(self.sub_scenes), 0)
|
| 318 |
+
assert len(intrinsic) == len(
|
| 319 |
+
self.sub_scenes
|
| 320 |
+
), "intrinsic matrix batch dim not equal to the number of sub-scenes"
|
| 321 |
+
|
| 322 |
+
for i, scene in enumerate(self.sub_scenes):
|
| 323 |
+
# Create camera component
|
| 324 |
+
camera = RenderCameraComponent(
|
| 325 |
+
width, height, GlobalShaderPack or get_camera_shader_pack()
|
| 326 |
+
)
|
| 327 |
+
if fovy is not None:
|
| 328 |
+
if isinstance(fovy, (float, int)):
|
| 329 |
+
camera.set_fovy(fovy, compute_x=True)
|
| 330 |
+
else:
|
| 331 |
+
camera.set_fovy(fovy[i], compute_x=True)
|
| 332 |
+
elif intrinsic is not None:
|
| 333 |
+
camera.set_focal_lengths(intrinsic[i, 0, 0], intrinsic[i, 1, 1])
|
| 334 |
+
camera.set_principal_point(intrinsic[i, 0, 2], intrinsic[i, 1, 2])
|
| 335 |
+
if isinstance(near, (float, int)):
|
| 336 |
+
camera.near = near
|
| 337 |
+
else:
|
| 338 |
+
camera.near = near[i]
|
| 339 |
+
if isinstance(far, (float, int)):
|
| 340 |
+
camera.far = far
|
| 341 |
+
else:
|
| 342 |
+
camera.far = far[i]
|
| 343 |
+
|
| 344 |
+
# mount camera to actor/link
|
| 345 |
+
if mount is not None:
|
| 346 |
+
if isinstance(mount, Link):
|
| 347 |
+
mount._objs[i].entity.add_component(camera)
|
| 348 |
+
else:
|
| 349 |
+
mount._objs[i].add_component(camera)
|
| 350 |
+
else:
|
| 351 |
+
camera_mount = sapien.Entity()
|
| 352 |
+
camera_mount.set_pose(sapien.Pose([0, 0, 0]))
|
| 353 |
+
camera_mount.add_component(camera)
|
| 354 |
+
camera_mount.name = f"scene-{i}_{name}"
|
| 355 |
+
scene.add_entity(camera_mount)
|
| 356 |
+
if len(pose) == 1:
|
| 357 |
+
camera.local_pose = pose.sp
|
| 358 |
+
else:
|
| 359 |
+
camera.local_pose = pose[i].sp
|
| 360 |
+
camera.name = f"scene-{i}_{name}"
|
| 361 |
+
cameras.append(camera)
|
| 362 |
+
scene.update_render()
|
| 363 |
+
return RenderCamera.create(cameras, self, mount=mount)
|
| 364 |
+
|
| 365 |
+
# def remove_camera(self, camera):
|
| 366 |
+
# self.remove_entity(camera.entity)
|
| 367 |
+
|
| 368 |
+
# def get_cameras(self):
|
| 369 |
+
# return self.render_system.cameras
|
| 370 |
+
|
| 371 |
+
# def get_mounted_cameras(self):
|
| 372 |
+
# return self.get_cameras()
|
| 373 |
+
|
| 374 |
+
def step(self):
|
| 375 |
+
self.px.step()
|
| 376 |
+
|
| 377 |
+
def update_render(
|
| 378 |
+
self, update_sensors: bool = True, update_human_render_cameras: bool = True
|
| 379 |
+
):
|
| 380 |
+
"""
|
| 381 |
+
Updates the renderer based on the current simulation state. Note that on the first call if a sensor/human render camera is required to be updated,
|
| 382 |
+
GPU memory will be allocated for the sensor/human render camera respectively.
|
| 383 |
+
|
| 384 |
+
Arguments:
|
| 385 |
+
update_sensors (bool): Whether to update the sensors.
|
| 386 |
+
update_human_render_cameras (bool): Whether to update the human render cameras.
|
| 387 |
+
"""
|
| 388 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 389 |
+
self._sapien_31_update_render(
|
| 390 |
+
update_sensors=update_sensors,
|
| 391 |
+
update_human_render_cameras=update_human_render_cameras,
|
| 392 |
+
)
|
| 393 |
+
else:
|
| 394 |
+
self._sapien_update_render(
|
| 395 |
+
update_sensors=update_sensors,
|
| 396 |
+
update_human_render_cameras=update_human_render_cameras,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
def _sapien_update_render(
|
| 400 |
+
self, update_sensors: bool = True, update_human_render_cameras: bool = True
|
| 401 |
+
):
|
| 402 |
+
# note that this design is such that no GPU memory is allocated for memory unless requested for, which can occur
|
| 403 |
+
# after the e.g. physx GPU simulation is initialized.
|
| 404 |
+
if self.gpu_sim_enabled:
|
| 405 |
+
if not self.parallel_in_single_scene:
|
| 406 |
+
if self.render_system_group is None:
|
| 407 |
+
self._setup_gpu_rendering()
|
| 408 |
+
if not self._sensors_initialized and update_sensors:
|
| 409 |
+
self._gpu_setup_sensors(self.sensors)
|
| 410 |
+
self._sensors_initialized = True
|
| 411 |
+
if (
|
| 412 |
+
not self._human_render_cameras_initialized
|
| 413 |
+
and update_human_render_cameras
|
| 414 |
+
):
|
| 415 |
+
self._gpu_setup_sensors(self.human_render_cameras)
|
| 416 |
+
self._human_render_cameras_initialized = True
|
| 417 |
+
self.render_system_group.update_render()
|
| 418 |
+
else:
|
| 419 |
+
self.px.sync_poses_gpu_to_cpu()
|
| 420 |
+
self.sub_scenes[0].update_render()
|
| 421 |
+
else:
|
| 422 |
+
self.sub_scenes[0].update_render()
|
| 423 |
+
|
| 424 |
+
def _sapien_31_update_render(
|
| 425 |
+
self, update_sensors: bool = True, update_human_render_cameras: bool = True
|
| 426 |
+
):
|
| 427 |
+
if self.gpu_sim_enabled:
|
| 428 |
+
if self.render_system_group is None:
|
| 429 |
+
# TODO (stao): for new render system support the parallel in single scene rendering option
|
| 430 |
+
for scene in self.sub_scenes:
|
| 431 |
+
scene.update_render()
|
| 432 |
+
self._setup_gpu_rendering()
|
| 433 |
+
if not self._sensors_initialized and update_sensors:
|
| 434 |
+
self._gpu_setup_sensors(self.sensors)
|
| 435 |
+
self._sensors_initialized = True
|
| 436 |
+
if (
|
| 437 |
+
not self._human_render_cameras_initialized
|
| 438 |
+
and update_human_render_cameras
|
| 439 |
+
):
|
| 440 |
+
self._gpu_setup_sensors(self.human_render_cameras)
|
| 441 |
+
self._human_render_cameras_initialized = True
|
| 442 |
+
|
| 443 |
+
manager: sapien.render.GpuSyncManager = self.render_system_group
|
| 444 |
+
manager.sync()
|
| 445 |
+
else:
|
| 446 |
+
self.sub_scenes[0].update_render()
|
| 447 |
+
|
| 448 |
+
def get_contacts(self):
|
| 449 |
+
# TODO (stao): deprecate this API
|
| 450 |
+
return self.px.get_contacts()
|
| 451 |
+
|
| 452 |
+
def get_all_actors(self):
|
| 453 |
+
"""
|
| 454 |
+
Returns list of all sapien.Entity objects that have rigid dynamic and static components across all sub scenes
|
| 455 |
+
"""
|
| 456 |
+
return [
|
| 457 |
+
c.entity
|
| 458 |
+
for c in self.px.rigid_dynamic_components + self.px.rigid_static_components
|
| 459 |
+
]
|
| 460 |
+
|
| 461 |
+
def get_all_articulations(self):
|
| 462 |
+
"""
|
| 463 |
+
Returns list of all physx articulation objects across all sub scenes
|
| 464 |
+
"""
|
| 465 |
+
return [
|
| 466 |
+
c.articulation for c in self.px.articulation_link_components if c.is_root
|
| 467 |
+
]
|
| 468 |
+
|
| 469 |
+
def create_drive(
|
| 470 |
+
self,
|
| 471 |
+
body0: Union[Actor, Link],
|
| 472 |
+
pose0: Union[sapien.Pose, Pose],
|
| 473 |
+
body1: Union[Actor, Link],
|
| 474 |
+
pose1: Union[sapien.Pose, Pose],
|
| 475 |
+
):
|
| 476 |
+
# body0 and body1 should be in parallel.
|
| 477 |
+
return Drive.create_from_actors_or_links(
|
| 478 |
+
self, body0, pose0, body1, pose1, body0._scene_idxs
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# def create_connection(
|
| 482 |
+
# self,
|
| 483 |
+
# body0: Optional[Union[sapien.Entity, physx.PhysxRigidBaseComponent]],
|
| 484 |
+
# pose0: sapien.Pose,
|
| 485 |
+
# body1: Union[sapien.Entity, physx.PhysxRigidBaseComponent],
|
| 486 |
+
# pose1: sapien.Pose,
|
| 487 |
+
# ):
|
| 488 |
+
# if body0 is None:
|
| 489 |
+
# c0 = None
|
| 490 |
+
# elif isinstance(body0, sapien.Entity):
|
| 491 |
+
# c0 = next(
|
| 492 |
+
# c
|
| 493 |
+
# for c in body0.components
|
| 494 |
+
# if isinstance(c, physx.PhysxRigidBaseComponent)
|
| 495 |
+
# )
|
| 496 |
+
# else:
|
| 497 |
+
# c0 = body0
|
| 498 |
+
|
| 499 |
+
# assert body1 is not None
|
| 500 |
+
# if isinstance(body1, sapien.Entity):
|
| 501 |
+
# e1 = body1
|
| 502 |
+
# c1 = next(
|
| 503 |
+
# c
|
| 504 |
+
# for c in body1.components
|
| 505 |
+
# if isinstance(c, physx.PhysxRigidBaseComponent)
|
| 506 |
+
# )
|
| 507 |
+
# else:
|
| 508 |
+
# e1 = body1.entity
|
| 509 |
+
# c1 = body1
|
| 510 |
+
|
| 511 |
+
# connection = physx.PhysxDistanceJointComponent(c1)
|
| 512 |
+
# connection.parent = c0
|
| 513 |
+
# connection.pose_in_child = pose1
|
| 514 |
+
# connection.pose_in_parent = pose0
|
| 515 |
+
# e1.add_component(connection)
|
| 516 |
+
# connection.set_limit(0, 0)
|
| 517 |
+
# return connection
|
| 518 |
+
|
| 519 |
+
# def create_gear(
|
| 520 |
+
# self,
|
| 521 |
+
# body0: Optional[Union[sapien.Entity, physx.PhysxRigidBaseComponent]],
|
| 522 |
+
# pose0: sapien.Pose,
|
| 523 |
+
# body1: Union[sapien.Entity, physx.PhysxRigidBaseComponent],
|
| 524 |
+
# pose1: sapien.Pose,
|
| 525 |
+
# ):
|
| 526 |
+
# if body0 is None:
|
| 527 |
+
# c0 = None
|
| 528 |
+
# elif isinstance(body0, sapien.Entity):
|
| 529 |
+
# c0 = next(
|
| 530 |
+
# c
|
| 531 |
+
# for c in body0.components
|
| 532 |
+
# if isinstance(c, physx.PhysxRigidBaseComponent)
|
| 533 |
+
# )
|
| 534 |
+
# else:
|
| 535 |
+
# c0 = body0
|
| 536 |
+
|
| 537 |
+
# assert body1 is not None
|
| 538 |
+
# if isinstance(body1, sapien.Entity):
|
| 539 |
+
# e1 = body1
|
| 540 |
+
# c1 = next(
|
| 541 |
+
# c
|
| 542 |
+
# for c in body1.components
|
| 543 |
+
# if isinstance(c, physx.PhysxRigidBaseComponent)
|
| 544 |
+
# )
|
| 545 |
+
# else:
|
| 546 |
+
# e1 = body1.entity
|
| 547 |
+
# c1 = body1
|
| 548 |
+
|
| 549 |
+
# gear = physx.PhysxGearComponent(c1)
|
| 550 |
+
# gear.parent = c0
|
| 551 |
+
# gear.pose_in_child = pose1
|
| 552 |
+
# gear.pose_in_parent = pose0
|
| 553 |
+
# e1.add_component(gear)
|
| 554 |
+
# return gear
|
| 555 |
+
|
| 556 |
+
# @property
|
| 557 |
+
# def render_id_to_visual_name(self):
|
| 558 |
+
# # TODO
|
| 559 |
+
# return
|
| 560 |
+
|
| 561 |
+
@property
|
| 562 |
+
def ambient_light(self):
|
| 563 |
+
return self.sub_scenes[0].ambient_light
|
| 564 |
+
|
| 565 |
+
@ambient_light.setter
|
| 566 |
+
def ambient_light(self, color):
|
| 567 |
+
for scene in self.sub_scenes:
|
| 568 |
+
scene.render_system.ambient_light = color
|
| 569 |
+
|
| 570 |
+
def set_ambient_light(self, color):
|
| 571 |
+
self.ambient_light = color
|
| 572 |
+
|
| 573 |
+
def add_point_light(
|
| 574 |
+
self,
|
| 575 |
+
position,
|
| 576 |
+
color,
|
| 577 |
+
shadow=False,
|
| 578 |
+
shadow_near=0.1,
|
| 579 |
+
shadow_far=10.0,
|
| 580 |
+
shadow_map_size=2048,
|
| 581 |
+
scene_idxs: Optional[List[int]] = None,
|
| 582 |
+
):
|
| 583 |
+
if scene_idxs is None:
|
| 584 |
+
scene_idxs = list(range(len(self.sub_scenes)))
|
| 585 |
+
for scene_idx in scene_idxs:
|
| 586 |
+
if self.parallel_in_single_scene:
|
| 587 |
+
scene = self.sub_scenes[0]
|
| 588 |
+
else:
|
| 589 |
+
scene = self.sub_scenes[scene_idx]
|
| 590 |
+
entity = sapien.Entity()
|
| 591 |
+
entity.name = "point_light"
|
| 592 |
+
light = sapien.render.RenderPointLightComponent()
|
| 593 |
+
entity.add_component(light)
|
| 594 |
+
light.color = color
|
| 595 |
+
light.shadow = shadow
|
| 596 |
+
light.shadow_near = shadow_near
|
| 597 |
+
light.shadow_far = shadow_far
|
| 598 |
+
light.shadow_map_size = shadow_map_size
|
| 599 |
+
if self.parallel_in_single_scene:
|
| 600 |
+
light.pose = sapien.Pose(position + self.scene_offsets_np[scene_idx])
|
| 601 |
+
else:
|
| 602 |
+
light.pose = sapien.Pose(position)
|
| 603 |
+
|
| 604 |
+
scene.add_entity(entity)
|
| 605 |
+
return light
|
| 606 |
+
|
| 607 |
+
def add_directional_light(
|
| 608 |
+
self,
|
| 609 |
+
direction,
|
| 610 |
+
color,
|
| 611 |
+
shadow=False,
|
| 612 |
+
position=[0, 0, 0],
|
| 613 |
+
shadow_scale=10.0,
|
| 614 |
+
shadow_near=-10.0,
|
| 615 |
+
shadow_far=10.0,
|
| 616 |
+
shadow_map_size=2048,
|
| 617 |
+
scene_idxs: Optional[List[int]] = None,
|
| 618 |
+
):
|
| 619 |
+
if scene_idxs is None:
|
| 620 |
+
scene_idxs = list(range(len(self.sub_scenes)))
|
| 621 |
+
for scene_idx in scene_idxs:
|
| 622 |
+
if self.parallel_in_single_scene:
|
| 623 |
+
scene = self.sub_scenes[0]
|
| 624 |
+
else:
|
| 625 |
+
scene = self.sub_scenes[scene_idx]
|
| 626 |
+
entity = sapien.Entity()
|
| 627 |
+
entity.name = "directional_light"
|
| 628 |
+
light = sapien.render.RenderDirectionalLightComponent()
|
| 629 |
+
entity.add_component(light)
|
| 630 |
+
light.color = color
|
| 631 |
+
light.shadow = shadow
|
| 632 |
+
light.shadow_near = shadow_near
|
| 633 |
+
light.shadow_far = shadow_far
|
| 634 |
+
light.shadow_half_size = shadow_scale
|
| 635 |
+
light.shadow_map_size = shadow_map_size
|
| 636 |
+
if self.parallel_in_single_scene:
|
| 637 |
+
light_position = position + self.scene_offsets_np[scene_idx]
|
| 638 |
+
else:
|
| 639 |
+
light_position = position
|
| 640 |
+
light.pose = sapien.Pose(
|
| 641 |
+
light_position, sapien.math.shortest_rotation([1, 0, 0], direction)
|
| 642 |
+
)
|
| 643 |
+
scene.add_entity(entity)
|
| 644 |
+
if self.parallel_in_single_scene:
|
| 645 |
+
# for directional lights adding multiple does not make much sense
|
| 646 |
+
# and for parallel gui rendering setup accurate lighting does not matter as it is only
|
| 647 |
+
# for demo purposes
|
| 648 |
+
break
|
| 649 |
+
return
|
| 650 |
+
|
| 651 |
+
def add_spot_light(
|
| 652 |
+
self,
|
| 653 |
+
position,
|
| 654 |
+
direction,
|
| 655 |
+
inner_fov: float,
|
| 656 |
+
outer_fov: float,
|
| 657 |
+
color,
|
| 658 |
+
shadow=False,
|
| 659 |
+
shadow_near=0.1,
|
| 660 |
+
shadow_far=10.0,
|
| 661 |
+
shadow_map_size=2048,
|
| 662 |
+
scene_idxs: Optional[List[int]] = None,
|
| 663 |
+
):
|
| 664 |
+
if scene_idxs is None:
|
| 665 |
+
scene_idxs = list(range(len(self.sub_scenes)))
|
| 666 |
+
for scene_idx in scene_idxs:
|
| 667 |
+
if self.parallel_in_single_scene:
|
| 668 |
+
scene = self.sub_scenes[0]
|
| 669 |
+
else:
|
| 670 |
+
scene = self.sub_scenes[scene_idx]
|
| 671 |
+
entity = sapien.Entity()
|
| 672 |
+
entity.name = "spot_light"
|
| 673 |
+
light = sapien.render.RenderSpotLightComponent()
|
| 674 |
+
entity.add_component(light)
|
| 675 |
+
light.color = color
|
| 676 |
+
light.shadow = shadow
|
| 677 |
+
light.shadow_near = shadow_near
|
| 678 |
+
light.shadow_far = shadow_far
|
| 679 |
+
light.shadow_map_size = shadow_map_size
|
| 680 |
+
light.inner_fov = inner_fov
|
| 681 |
+
light.outer_fov = outer_fov
|
| 682 |
+
if self.parallel_in_single_scene:
|
| 683 |
+
light_position = position + self.scene_offsets_np[scene_idx]
|
| 684 |
+
else:
|
| 685 |
+
light_position = position
|
| 686 |
+
light.pose = sapien.Pose(
|
| 687 |
+
light_position, sapien.math.shortest_rotation([1, 0, 0], direction)
|
| 688 |
+
)
|
| 689 |
+
scene.add_entity(entity)
|
| 690 |
+
return
|
| 691 |
+
|
| 692 |
+
def add_area_light_for_ray_tracing(
|
| 693 |
+
self,
|
| 694 |
+
pose: sapien.Pose,
|
| 695 |
+
color,
|
| 696 |
+
half_width: float,
|
| 697 |
+
half_height: float,
|
| 698 |
+
scene_idxs=None,
|
| 699 |
+
):
|
| 700 |
+
lighting_scenes = (
|
| 701 |
+
self.sub_scenes
|
| 702 |
+
if scene_idxs is None
|
| 703 |
+
else [self.sub_scenes[i] for i in scene_idxs]
|
| 704 |
+
)
|
| 705 |
+
for scene in lighting_scenes:
|
| 706 |
+
entity = sapien.Entity()
|
| 707 |
+
light = sapien.render.RenderParallelogramLightComponent()
|
| 708 |
+
entity.add_component(light)
|
| 709 |
+
light.set_shape(half_width, half_height)
|
| 710 |
+
light.color = color
|
| 711 |
+
light.pose = pose
|
| 712 |
+
scene.add_entity(entity)
|
| 713 |
+
return
|
| 714 |
+
|
| 715 |
+
# def remove_light(self, light):
|
| 716 |
+
# self.remove_entity(light.entity)
|
| 717 |
+
|
| 718 |
+
# def set_environment_map(self, cubemap: str):
|
| 719 |
+
# if isinstance(cubemap, str):
|
| 720 |
+
# self.render_system.cubemap = sapien.render.RenderCubemap(cubemap)
|
| 721 |
+
# else:
|
| 722 |
+
# self.render_system.cubemap = cubemap
|
| 723 |
+
|
| 724 |
+
# def set_environment_map_from_files(
|
| 725 |
+
# self, px: str, nx: str, py: str, ny: str, pz: str, nz: str
|
| 726 |
+
# ):
|
| 727 |
+
# self.render_system.cubemap = sapien.render.RenderCubemap(px, nx, py, ny, pz, nz)
|
| 728 |
+
|
| 729 |
+
# ---------------------------------------------------------------------------- #
|
| 730 |
+
# Additional useful properties / functions
|
| 731 |
+
# ---------------------------------------------------------------------------- #
|
| 732 |
+
@property
|
| 733 |
+
def num_envs(self):
|
| 734 |
+
return len(self.sub_scenes)
|
| 735 |
+
|
| 736 |
+
def get_pairwise_contact_impulses(
|
| 737 |
+
self, obj1: Union[Actor, Link], obj2: Union[Actor, Link]
|
| 738 |
+
):
|
| 739 |
+
"""
|
| 740 |
+
Get the impulse vectors between two actors/links. Returns impulse vector of shape (N, 3)
|
| 741 |
+
where N is the number of environments and 3 is the dimension of the impulse vector itself,
|
| 742 |
+
representing x, y, and z direction of impulse.
|
| 743 |
+
|
| 744 |
+
Note that dividing the impulse value by self.px.timestep yields the pairwise contact force in Newtons. The equivalent API for that
|
| 745 |
+
is self.get_pairwise_contact_force(obj1, obj2). It is generally recommended to use the force values since they are independent of the
|
| 746 |
+
timestep (dt = 1 / sim_freq) of the simulation.
|
| 747 |
+
|
| 748 |
+
Args:
|
| 749 |
+
obj1: Actor | Link
|
| 750 |
+
obj2: Actor | Link
|
| 751 |
+
"""
|
| 752 |
+
# TODO (stao): Is there any optimization improvement when putting all queries all together and fetched together
|
| 753 |
+
# vs multiple smaller queries? If so, might be worth exposing a helpful API for that instead of having user
|
| 754 |
+
# write this code below themselves.
|
| 755 |
+
if self.gpu_sim_enabled:
|
| 756 |
+
query_hash = hash((obj1, obj2))
|
| 757 |
+
query_key = obj1.name + obj2.name
|
| 758 |
+
|
| 759 |
+
# we rebuild the potentially expensive contact query if it has not existed previously
|
| 760 |
+
# or if it has, the managed objects are a different set
|
| 761 |
+
rebuild_query = (query_key not in self.pairwise_contact_queries) or (
|
| 762 |
+
query_key in self._pairwise_contact_query_unique_hashes
|
| 763 |
+
and self._pairwise_contact_query_unique_hashes[query_key] != query_hash
|
| 764 |
+
)
|
| 765 |
+
if rebuild_query:
|
| 766 |
+
body_pairs = list(zip(obj1._bodies, obj2._bodies))
|
| 767 |
+
self.pairwise_contact_queries[
|
| 768 |
+
query_key
|
| 769 |
+
] = self.px.gpu_create_contact_pair_impulse_query(body_pairs)
|
| 770 |
+
self._pairwise_contact_query_unique_hashes[query_key] = query_hash
|
| 771 |
+
|
| 772 |
+
query = self.pairwise_contact_queries[query_key]
|
| 773 |
+
self.px.gpu_query_contact_pair_impulses(query)
|
| 774 |
+
# query.cuda_impulses is shape (num_unique_pairs * num_envs, 3)
|
| 775 |
+
pairwise_contact_impulses = query.cuda_impulses.torch().clone()
|
| 776 |
+
return pairwise_contact_impulses
|
| 777 |
+
else:
|
| 778 |
+
contacts = self.px.get_contacts()
|
| 779 |
+
pairwise_contact_impulses = sapien_utils.get_pairwise_contact_impulse(
|
| 780 |
+
contacts, obj1._bodies[0].entity, obj2._bodies[0].entity
|
| 781 |
+
)
|
| 782 |
+
return common.to_tensor(pairwise_contact_impulses)[None, :]
|
| 783 |
+
|
| 784 |
+
def get_pairwise_contact_forces(
|
| 785 |
+
self, obj1: Union[Actor, Link], obj2: Union[Actor, Link]
|
| 786 |
+
):
|
| 787 |
+
"""
|
| 788 |
+
Get the force vectors between two actors/links. Returns force vector of shape (N, 3)
|
| 789 |
+
where N is the number of environments and 3 is the dimension of the force vector itself,
|
| 790 |
+
representing x, y, and z direction of force.
|
| 791 |
+
|
| 792 |
+
Args:
|
| 793 |
+
obj1: Actor | Link
|
| 794 |
+
obj2: Actor | Link
|
| 795 |
+
"""
|
| 796 |
+
return self.get_pairwise_contact_impulses(obj1, obj2) / self.px.timestep
|
| 797 |
+
|
| 798 |
+
@cached_property
|
| 799 |
+
def scene_offsets(self):
|
| 800 |
+
"""torch tensor of shape (num_envs, 3) representing the offset of each scene in the world frame"""
|
| 801 |
+
return torch.tensor(
|
| 802 |
+
np.array(
|
| 803 |
+
[self.px.get_scene_offset(sub_scene) for sub_scene in self.sub_scenes]
|
| 804 |
+
),
|
| 805 |
+
device=self.device,
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
@cached_property
|
| 809 |
+
def scene_offsets_np(self):
|
| 810 |
+
"""numpy array of shape (num_envs, 3) representing the offset of each scene in the world frame"""
|
| 811 |
+
return np.array(
|
| 812 |
+
[self.px.get_scene_offset(sub_scene) for sub_scene in self.sub_scenes]
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
# -------------------------------------------------------------------------- #
|
| 816 |
+
# Simulation state (required for MPC)
|
| 817 |
+
# -------------------------------------------------------------------------- #
|
| 818 |
+
|
| 819 |
+
def add_to_state_dict_registry(self, object: Union[Actor, Articulation]):
|
| 820 |
+
if isinstance(object, Actor):
|
| 821 |
+
assert (
|
| 822 |
+
object.name not in self.state_dict_registry.actors
|
| 823 |
+
), f"Object {object.name} already in state dict registry"
|
| 824 |
+
self.state_dict_registry.actors[object.name] = object
|
| 825 |
+
elif isinstance(object, Articulation):
|
| 826 |
+
assert (
|
| 827 |
+
object.name not in self.state_dict_registry.articulations
|
| 828 |
+
), f"Object {object.name} already in state dict registry"
|
| 829 |
+
self.state_dict_registry.articulations[object.name] = object
|
| 830 |
+
else:
|
| 831 |
+
raise ValueError(f"Expected Actor or Articulation, got {object}")
|
| 832 |
+
|
| 833 |
+
def remove_from_state_dict_registry(self, object: Union[Actor, Articulation]):
|
| 834 |
+
if isinstance(object, Actor):
|
| 835 |
+
assert (
|
| 836 |
+
object.name in self.state_dict_registry.actors
|
| 837 |
+
), f"Object {object.name} not in state dict registry"
|
| 838 |
+
del self.state_dict_registry.actors[object.name]
|
| 839 |
+
elif isinstance(object, Articulation):
|
| 840 |
+
assert (
|
| 841 |
+
object.name in self.state_dict_registry.articulations
|
| 842 |
+
), f"Object {object.name} not in state dict registry"
|
| 843 |
+
del self.state_dict_registry.articulations[object.name]
|
| 844 |
+
else:
|
| 845 |
+
raise ValueError(f"Expected Actor or Articulation, got {object}")
|
| 846 |
+
|
| 847 |
+
def get_sim_state(self) -> torch.Tensor:
|
| 848 |
+
"""Get simulation state. Returns a dictionary with two nested dictionaries "actors" and "articulations".
|
| 849 |
+
In the nested dictionaries they map the actor/articulation name to a vector of shape (N, D) for N parallel
|
| 850 |
+
environments and D dimensions of padded state per environment.
|
| 851 |
+
|
| 852 |
+
Note that static actor data are not included. It is expected that an environment reconstructs itself in a deterministic manner such that
|
| 853 |
+
the same static actors always have the same states"""
|
| 854 |
+
state_dict = dict()
|
| 855 |
+
state_dict["actors"] = dict()
|
| 856 |
+
state_dict["articulations"] = dict()
|
| 857 |
+
for actor in self.state_dict_registry.actors.values():
|
| 858 |
+
if actor.px_body_type == "static":
|
| 859 |
+
continue
|
| 860 |
+
state_dict["actors"][actor.name] = actor.get_state().clone()
|
| 861 |
+
for articulation in self.state_dict_registry.articulations.values():
|
| 862 |
+
state_dict["articulations"][
|
| 863 |
+
articulation.name
|
| 864 |
+
] = articulation.get_state().clone()
|
| 865 |
+
if len(state_dict["actors"]) == 0:
|
| 866 |
+
del state_dict["actors"]
|
| 867 |
+
if len(state_dict["articulations"]) == 0:
|
| 868 |
+
del state_dict["articulations"]
|
| 869 |
+
return state_dict
|
| 870 |
+
|
| 871 |
+
def set_sim_state(self, state: Dict, env_idx: torch.Tensor = None):
|
| 872 |
+
if env_idx is not None:
|
| 873 |
+
prev_reset_mask = self._reset_mask.clone()
|
| 874 |
+
# safe guard against setting the wrong states
|
| 875 |
+
self._reset_mask[:] = False
|
| 876 |
+
self._reset_mask[env_idx] = True
|
| 877 |
+
|
| 878 |
+
if "actors" in state:
|
| 879 |
+
for actor_id, actor_state in state["actors"].items():
|
| 880 |
+
if len(actor_state.shape) == 1:
|
| 881 |
+
actor_state = actor_state[None, :]
|
| 882 |
+
# do not pass in env_idx to avoid redundant reset mask changes
|
| 883 |
+
self.state_dict_registry.actors[actor_id].set_state(actor_state, None)
|
| 884 |
+
if "articulations" in state:
|
| 885 |
+
for art_id, art_state in state["articulations"].items():
|
| 886 |
+
if len(art_state.shape) == 1:
|
| 887 |
+
art_state = art_state[None, :]
|
| 888 |
+
self.state_dict_registry.articulations[art_id].set_state(
|
| 889 |
+
art_state, None
|
| 890 |
+
)
|
| 891 |
+
if env_idx is not None:
|
| 892 |
+
self._reset_mask = prev_reset_mask
|
| 893 |
+
|
| 894 |
+
# ---------------------------------------------------------------------------- #
|
| 895 |
+
# GPU Simulation Management
|
| 896 |
+
# ---------------------------------------------------------------------------- #
|
| 897 |
+
def _setup(self, enable_gpu: bool):
|
| 898 |
+
"""
|
| 899 |
+
Start the CPU/GPU simulation and allocate all buffers and initialize objects
|
| 900 |
+
"""
|
| 901 |
+
if enable_gpu:
|
| 902 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 903 |
+
for scene in self.sub_scenes:
|
| 904 |
+
scene.update_render()
|
| 905 |
+
self.px.gpu_init()
|
| 906 |
+
self.non_static_actors: List[Actor] = []
|
| 907 |
+
# find non static actors, and set data indices that are now available after gpu_init was called
|
| 908 |
+
for actor in self.actors.values():
|
| 909 |
+
if actor.px_body_type == "static":
|
| 910 |
+
continue
|
| 911 |
+
self.non_static_actors.append(actor)
|
| 912 |
+
if enable_gpu:
|
| 913 |
+
actor._body_data_index # only need to access this attribute to populate it
|
| 914 |
+
|
| 915 |
+
for articulation in self.articulations.values():
|
| 916 |
+
articulation._data_index
|
| 917 |
+
for link in articulation.links:
|
| 918 |
+
link._body_data_index
|
| 919 |
+
for actor in self.non_static_actors:
|
| 920 |
+
actor.set_pose(actor.initial_pose)
|
| 921 |
+
for articulation in self.articulations.values():
|
| 922 |
+
articulation.set_pose(articulation.initial_pose)
|
| 923 |
+
|
| 924 |
+
if enable_gpu:
|
| 925 |
+
self.px.cuda_rigid_body_data.torch()[:, 7:] = torch.zeros_like(
|
| 926 |
+
self.px.cuda_rigid_body_data.torch()[:, 7:]
|
| 927 |
+
) # zero out all velocities
|
| 928 |
+
self.px.cuda_articulation_qvel.torch()[:, :] = torch.zeros_like(
|
| 929 |
+
self.px.cuda_articulation_qvel.torch()
|
| 930 |
+
) # zero out all q velocities
|
| 931 |
+
|
| 932 |
+
self.px.gpu_apply_rigid_dynamic_data()
|
| 933 |
+
self.px.gpu_apply_articulation_root_pose()
|
| 934 |
+
self.px.gpu_apply_articulation_root_velocity()
|
| 935 |
+
self.px.gpu_apply_articulation_qvel()
|
| 936 |
+
|
| 937 |
+
self._gpu_sim_initialized = True
|
| 938 |
+
self.px.gpu_update_articulation_kinematics()
|
| 939 |
+
self._gpu_fetch_all()
|
| 940 |
+
|
| 941 |
+
def _gpu_apply_all(self):
|
| 942 |
+
"""
|
| 943 |
+
Calls gpu_apply to update all body data, qpos, qvel, qf, and root poses
|
| 944 |
+
"""
|
| 945 |
+
assert (
|
| 946 |
+
not self._needs_fetch
|
| 947 |
+
), "Once _gpu_apply_all is called, you must call _gpu_fetch_all before calling _gpu_apply_all again\
|
| 948 |
+
as otherwise there is undefined behavior that is likely impossible to debug"
|
| 949 |
+
self.px.gpu_apply_rigid_dynamic_data()
|
| 950 |
+
self.px.gpu_apply_articulation_qpos()
|
| 951 |
+
self.px.gpu_apply_articulation_qvel()
|
| 952 |
+
self.px.gpu_apply_articulation_qf()
|
| 953 |
+
self.px.gpu_apply_articulation_root_pose()
|
| 954 |
+
self.px.gpu_apply_articulation_root_velocity()
|
| 955 |
+
self.px.gpu_apply_articulation_target_position()
|
| 956 |
+
self.px.gpu_apply_articulation_target_velocity()
|
| 957 |
+
self._needs_fetch = True
|
| 958 |
+
|
| 959 |
+
def _gpu_fetch_all(self):
|
| 960 |
+
"""
|
| 961 |
+
Queries simulation for all relevant GPU data. Note that this has some overhead.
|
| 962 |
+
Should only be called at most once per simulation step as this automatically queries all data for all
|
| 963 |
+
objects built in the scene.
|
| 964 |
+
"""
|
| 965 |
+
if len(self.non_static_actors) > 0:
|
| 966 |
+
self.px.gpu_fetch_rigid_dynamic_data()
|
| 967 |
+
|
| 968 |
+
if len(self.articulations) > 0:
|
| 969 |
+
self.px.gpu_fetch_articulation_link_pose()
|
| 970 |
+
self.px.gpu_fetch_articulation_link_velocity()
|
| 971 |
+
self.px.gpu_fetch_articulation_qpos()
|
| 972 |
+
self.px.gpu_fetch_articulation_qvel()
|
| 973 |
+
self.px.gpu_fetch_articulation_qacc()
|
| 974 |
+
self.px.gpu_fetch_articulation_target_qpos()
|
| 975 |
+
self.px.gpu_fetch_articulation_target_qvel()
|
| 976 |
+
|
| 977 |
+
self._needs_fetch = False
|
| 978 |
+
|
| 979 |
+
# ---------------------------------------------------------------------------- #
|
| 980 |
+
# CPU/GPU sim Rendering Code
|
| 981 |
+
# ---------------------------------------------------------------------------- #
|
| 982 |
+
def _get_all_render_bodies(
|
| 983 |
+
self,
|
| 984 |
+
) -> List[Tuple[sapien.render.RenderBodyComponent, int]]:
|
| 985 |
+
all_render_bodies = []
|
| 986 |
+
for actor in self.actors.values():
|
| 987 |
+
if actor.px_body_type == "static":
|
| 988 |
+
continue
|
| 989 |
+
all_render_bodies += [
|
| 990 |
+
(
|
| 991 |
+
entity.find_component_by_type(sapien.render.RenderBodyComponent),
|
| 992 |
+
entity.find_component_by_type(
|
| 993 |
+
physx.PhysxRigidDynamicComponent
|
| 994 |
+
).gpu_pose_index,
|
| 995 |
+
)
|
| 996 |
+
for entity in actor._objs
|
| 997 |
+
]
|
| 998 |
+
for articulation in self.articulations.values():
|
| 999 |
+
all_render_bodies += [
|
| 1000 |
+
(
|
| 1001 |
+
px_link.entity.find_component_by_type(
|
| 1002 |
+
sapien.render.RenderBodyComponent
|
| 1003 |
+
),
|
| 1004 |
+
px_link.gpu_pose_index,
|
| 1005 |
+
)
|
| 1006 |
+
for link in articulation.links
|
| 1007 |
+
for px_link in link._objs
|
| 1008 |
+
]
|
| 1009 |
+
return all_render_bodies
|
| 1010 |
+
|
| 1011 |
+
def _setup_gpu_rendering(self):
|
| 1012 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 1013 |
+
self._sapien_31_setup_gpu_rendering()
|
| 1014 |
+
else:
|
| 1015 |
+
self._sapien_setup_gpu_rendering()
|
| 1016 |
+
|
| 1017 |
+
def _sapien_setup_gpu_rendering(self):
|
| 1018 |
+
"""
|
| 1019 |
+
Prepares the scene for GPU parallelized rendering to enable taking e.g. RGB images
|
| 1020 |
+
"""
|
| 1021 |
+
for rb, gpu_pose_index in self._get_all_render_bodies():
|
| 1022 |
+
if rb is not None:
|
| 1023 |
+
for s in rb.render_shapes:
|
| 1024 |
+
s.set_gpu_pose_batch_index(gpu_pose_index)
|
| 1025 |
+
self.render_system_group = sapien.render.RenderSystemGroup(
|
| 1026 |
+
[s.render_system for s in self.sub_scenes]
|
| 1027 |
+
)
|
| 1028 |
+
self.render_system_group.set_cuda_poses(self.px.cuda_rigid_body_data)
|
| 1029 |
+
|
| 1030 |
+
def _sapien_31_setup_gpu_rendering(self):
|
| 1031 |
+
"""
|
| 1032 |
+
Prepares the scene for GPU parallelized rendering to enable taking e.g. RGB images
|
| 1033 |
+
"""
|
| 1034 |
+
|
| 1035 |
+
px: physx.PhysxGpuSystem = self.px
|
| 1036 |
+
|
| 1037 |
+
shape_pose_indices = []
|
| 1038 |
+
shapes = []
|
| 1039 |
+
scene_id = 0
|
| 1040 |
+
for scene in self.sub_scenes:
|
| 1041 |
+
scene_id += 1
|
| 1042 |
+
for body in scene.render_system.render_bodies:
|
| 1043 |
+
b = body.entity.find_component_by_type(
|
| 1044 |
+
sapien.physx.PhysxRigidBodyComponent
|
| 1045 |
+
)
|
| 1046 |
+
if b is None:
|
| 1047 |
+
continue
|
| 1048 |
+
for s in body.render_shapes:
|
| 1049 |
+
shape_pose_indices.append(b.gpu_pose_index)
|
| 1050 |
+
shapes.append(s)
|
| 1051 |
+
|
| 1052 |
+
cam_pose_indices = []
|
| 1053 |
+
cams = []
|
| 1054 |
+
for cameras in self.sensors.values():
|
| 1055 |
+
assert isinstance(cameras, Camera), f"Expected Camera, got {cameras}"
|
| 1056 |
+
for c in cameras.camera._render_cameras:
|
| 1057 |
+
b = c.entity.find_component_by_type(
|
| 1058 |
+
sapien.physx.PhysxRigidBodyComponent
|
| 1059 |
+
)
|
| 1060 |
+
if b is None:
|
| 1061 |
+
continue
|
| 1062 |
+
cam_pose_indices.append(b.gpu_pose_index)
|
| 1063 |
+
cams.append(c)
|
| 1064 |
+
|
| 1065 |
+
sync_manager = sapien.render.GpuSyncManager()
|
| 1066 |
+
sync_manager.set_cuda_poses(px.cuda_rigid_body_data)
|
| 1067 |
+
sync_manager.set_render_shapes(shape_pose_indices, shapes)
|
| 1068 |
+
sync_manager.set_cameras(cam_pose_indices, cams)
|
| 1069 |
+
|
| 1070 |
+
self.render_system_group = sync_manager
|
| 1071 |
+
|
| 1072 |
+
def _gpu_setup_sensors(self, sensors: Dict[str, BaseSensor]):
|
| 1073 |
+
if SAPIEN_RENDER_SYSTEM == "3.1":
|
| 1074 |
+
self._sapien_31_gpu_setup_sensors(sensors)
|
| 1075 |
+
else:
|
| 1076 |
+
self._sapien_gpu_setup_sensors(sensors)
|
| 1077 |
+
|
| 1078 |
+
def _sapien_gpu_setup_sensors(self, sensors: Dict[str, BaseSensor]):
|
| 1079 |
+
for name, sensor in sensors.items():
|
| 1080 |
+
if isinstance(sensor, Camera):
|
| 1081 |
+
try:
|
| 1082 |
+
camera_group = self.render_system_group.create_camera_group(
|
| 1083 |
+
sensor.camera._render_cameras,
|
| 1084 |
+
list(sensor.config.shader_config.texture_names.keys()),
|
| 1085 |
+
)
|
| 1086 |
+
except RuntimeError as e:
|
| 1087 |
+
raise RuntimeError(
|
| 1088 |
+
"Unable to create GPU parallelized camera group. "
|
| 1089 |
+
"If the error is about being unable to create a buffer, you are likely using too many Cameras. "
|
| 1090 |
+
"Either use less cameras (via less parallel envs) and/or reduce the size of the cameras. "
|
| 1091 |
+
"Another common cause is using a memory intensive shader, you can try using the 'minimal' shader "
|
| 1092 |
+
"which optimizes for GPU memory but disables some advanced functionalities. "
|
| 1093 |
+
"Another option is to avoid rendering with the rgb_array mode / using the human render cameras as "
|
| 1094 |
+
"they can be more memory intensive as they typically have higher resolutions for the purposes of visualization."
|
| 1095 |
+
) from e
|
| 1096 |
+
sensor.camera.camera_group = camera_group
|
| 1097 |
+
self.camera_groups[name] = camera_group
|
| 1098 |
+
else:
|
| 1099 |
+
raise NotImplementedError(
|
| 1100 |
+
f"This sensor {sensor} of type {sensor.__class__} has not been implemented yet on the GPU"
|
| 1101 |
+
)
|
| 1102 |
+
|
| 1103 |
+
def _sapien_31_gpu_setup_sensors(self, sensors: dict[str, BaseSensor]):
|
| 1104 |
+
for name, sensor in sensors.items():
|
| 1105 |
+
if isinstance(sensor, Camera):
|
| 1106 |
+
batch_renderer = sapien.render.RenderManager(
|
| 1107 |
+
sapien.render.get_shader_pack(
|
| 1108 |
+
sensor.config.shader_config.shader_pack
|
| 1109 |
+
)
|
| 1110 |
+
)
|
| 1111 |
+
batch_renderer.set_size(sensor.config.width, sensor.config.height)
|
| 1112 |
+
batch_renderer.set_cameras(sensor.camera._render_cameras)
|
| 1113 |
+
sensor.camera.camera_group = self.camera_groups[name] = batch_renderer
|
| 1114 |
+
else:
|
| 1115 |
+
raise NotImplementedError(
|
| 1116 |
+
f"This sensor {sensor} of type {sensor.__class__} has not bget_picture_cuda implemented yet on the GPU"
|
| 1117 |
+
)
|
| 1118 |
+
|
| 1119 |
+
def get_sensor_images(
|
| 1120 |
+
self, obs: Dict[str, Any]
|
| 1121 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 1122 |
+
"""Get raw sensor data as images for visualization purposes."""
|
| 1123 |
+
sensor_data = dict()
|
| 1124 |
+
for name, sensor in self.sensors.items():
|
| 1125 |
+
sensor_data[name] = sensor.get_images(obs[name])
|
| 1126 |
+
return sensor_data
|
| 1127 |
+
|
| 1128 |
+
def get_human_render_camera_images(
|
| 1129 |
+
self, camera_name: str = None
|
| 1130 |
+
) -> Dict[str, torch.Tensor]:
|
| 1131 |
+
image_data = dict()
|
| 1132 |
+
if self.gpu_sim_enabled:
|
| 1133 |
+
if self.parallel_in_single_scene:
|
| 1134 |
+
for name, camera in self.human_render_cameras.items():
|
| 1135 |
+
camera.camera._render_cameras[0].take_picture()
|
| 1136 |
+
rgb = camera.get_obs(
|
| 1137 |
+
rgb=True, depth=False, segmentation=False, position=False
|
| 1138 |
+
)["rgb"]
|
| 1139 |
+
image_data[name] = rgb
|
| 1140 |
+
else:
|
| 1141 |
+
for name, camera in self.human_render_cameras.items():
|
| 1142 |
+
if camera_name is not None and name != camera_name:
|
| 1143 |
+
continue
|
| 1144 |
+
assert camera.config.shader_config.shader_pack not in [
|
| 1145 |
+
"rt",
|
| 1146 |
+
"rt-fast",
|
| 1147 |
+
"rt-med",
|
| 1148 |
+
], "ray tracing shaders do not work with parallel rendering"
|
| 1149 |
+
camera.capture()
|
| 1150 |
+
rgb = camera.get_obs(
|
| 1151 |
+
rgb=True, depth=False, segmentation=False, position=False
|
| 1152 |
+
)["rgb"]
|
| 1153 |
+
image_data[name] = rgb
|
| 1154 |
+
else:
|
| 1155 |
+
for name, camera in self.human_render_cameras.items():
|
| 1156 |
+
if camera_name is not None and name != camera_name:
|
| 1157 |
+
continue
|
| 1158 |
+
camera.capture()
|
| 1159 |
+
rgb = camera.get_obs(
|
| 1160 |
+
rgb=True, depth=False, segmentation=False, position=False
|
| 1161 |
+
)["rgb"]
|
| 1162 |
+
image_data[name] = rgb
|
| 1163 |
+
return image_data
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .control import *
|
| 2 |
+
from .dexterity import *
|
| 3 |
+
from .digital_twins import *
|
| 4 |
+
from .drawing import *
|
| 5 |
+
from .empty_env import EmptyEnv
|
| 6 |
+
from .fmb import *
|
| 7 |
+
from .humanoid import *
|
| 8 |
+
from .mobile_manipulation import *
|
| 9 |
+
from .quadruped import *
|
| 10 |
+
from .rotate_cube import RotateCubeEnv
|
| 11 |
+
from .tabletop import *
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# isort: off
|
| 2 |
+
from .rotate_valve import RotateValveEnv
|
| 3 |
+
from .rotate_single_object_in_hand import RotateSingleObjectInHand
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (346 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/rotate_single_object_in_hand.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/__pycache__/rotate_valve.cpython-310.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/rotate_single_object_in_hand.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from mani_skill import ASSET_DIR
|
| 9 |
+
from mani_skill.agents.robots import AllegroHandRightTouch
|
| 10 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 11 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 12 |
+
from mani_skill.utils import common, sapien_utils
|
| 13 |
+
from mani_skill.utils.building import actors
|
| 14 |
+
from mani_skill.utils.building.actors import build_cube
|
| 15 |
+
from mani_skill.utils.geometry.rotation_conversions import quaternion_apply
|
| 16 |
+
from mani_skill.utils.io_utils import load_json
|
| 17 |
+
from mani_skill.utils.registration import register_env
|
| 18 |
+
from mani_skill.utils.scene_builder.table import TableSceneBuilder
|
| 19 |
+
from mani_skill.utils.structs.actor import Actor
|
| 20 |
+
from mani_skill.utils.structs.pose import Pose, vectorize_pose
|
| 21 |
+
from mani_skill.utils.structs.types import Array, GPUMemoryConfig, SimConfig
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class RotateSingleObjectInHand(BaseEnv):
|
| 25 |
+
agent: Union[AllegroHandRightTouch]
|
| 26 |
+
_clearance = 0.003
|
| 27 |
+
hand_init_height = 0.25
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
*args,
|
| 32 |
+
robot_init_qpos_noise=0.02,
|
| 33 |
+
obj_init_pos_noise=0.02,
|
| 34 |
+
difficulty_level: int = -1,
|
| 35 |
+
num_envs=1,
|
| 36 |
+
reconfiguration_freq=None,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 40 |
+
self.obj_init_pos_noise = obj_init_pos_noise
|
| 41 |
+
self.obj_heights: torch.Tensor = torch.Tensor()
|
| 42 |
+
if (
|
| 43 |
+
not isinstance(difficulty_level, int)
|
| 44 |
+
or difficulty_level >= 4
|
| 45 |
+
or difficulty_level < 0
|
| 46 |
+
):
|
| 47 |
+
raise ValueError(
|
| 48 |
+
f"Difficulty level must be a int within 0-3, but get {difficulty_level}"
|
| 49 |
+
)
|
| 50 |
+
self.difficulty_level = difficulty_level
|
| 51 |
+
if self.difficulty_level >= 2:
|
| 52 |
+
if reconfiguration_freq is None:
|
| 53 |
+
if num_envs == 1:
|
| 54 |
+
reconfiguration_freq = 1
|
| 55 |
+
else:
|
| 56 |
+
reconfiguration_freq = 0
|
| 57 |
+
super().__init__(
|
| 58 |
+
*args,
|
| 59 |
+
robot_uids="allegro_hand_right_touch",
|
| 60 |
+
num_envs=num_envs,
|
| 61 |
+
reconfiguration_freq=reconfiguration_freq,
|
| 62 |
+
**kwargs,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
with torch.device(self.device):
|
| 66 |
+
self.prev_unit_vector = torch.zeros((self.num_envs, 3))
|
| 67 |
+
self.cum_rotation_angle = torch.zeros((self.num_envs,))
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def _default_sim_config(self):
|
| 71 |
+
return SimConfig(
|
| 72 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 73 |
+
max_rigid_contact_count=self.num_envs * max(1024, self.num_envs) * 8,
|
| 74 |
+
max_rigid_patch_count=self.num_envs * max(1024, self.num_envs) * 2,
|
| 75 |
+
found_lost_pairs_capacity=2**26,
|
| 76 |
+
)
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def _default_sensor_configs(self):
|
| 81 |
+
pose = sapien_utils.look_at(
|
| 82 |
+
eye=[0.15, 0, 0.45], target=[-0.1, 0, self.hand_init_height]
|
| 83 |
+
)
|
| 84 |
+
return [CameraConfig("base_camera", pose, 128, 128, np.pi / 2, 0.01, 100)]
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def _default_human_render_camera_configs(self):
|
| 88 |
+
pose = sapien_utils.look_at([0.2, 0.4, 0.6], [0.0, 0.0, 0.3])
|
| 89 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 90 |
+
|
| 91 |
+
def _load_scene(self, options: dict):
|
| 92 |
+
self.table_scene = TableSceneBuilder(
|
| 93 |
+
env=self, robot_init_qpos_noise=self.robot_init_qpos_noise
|
| 94 |
+
)
|
| 95 |
+
self.table_scene.build()
|
| 96 |
+
|
| 97 |
+
obj_heights = []
|
| 98 |
+
if self.difficulty_level == 0:
|
| 99 |
+
self.obj = build_cube(
|
| 100 |
+
self.scene,
|
| 101 |
+
half_size=0.04,
|
| 102 |
+
color=np.array([255, 255, 255, 255]) / 255,
|
| 103 |
+
name="cube",
|
| 104 |
+
body_type="dynamic",
|
| 105 |
+
)
|
| 106 |
+
obj_heights.append(0.03)
|
| 107 |
+
elif self.difficulty_level == 1:
|
| 108 |
+
half_sizes = (self._batched_episode_rng.randn() * 0.1 + 1) * 0.04
|
| 109 |
+
self._objs: List[Actor] = []
|
| 110 |
+
for i, half_size in enumerate(half_sizes):
|
| 111 |
+
builder = self.scene.create_actor_builder()
|
| 112 |
+
builder.add_box_collision(
|
| 113 |
+
half_size=[half_size] * 3,
|
| 114 |
+
)
|
| 115 |
+
builder.add_box_visual(
|
| 116 |
+
half_size=[half_size] * 3,
|
| 117 |
+
material=sapien.render.RenderMaterial(
|
| 118 |
+
base_color=np.array([255, 255, 255, 255]) / 255,
|
| 119 |
+
),
|
| 120 |
+
)
|
| 121 |
+
builder.set_scene_idxs([i])
|
| 122 |
+
self._objs.append(builder.build(name=f"cube-{i}"))
|
| 123 |
+
obj_heights.append(half_size)
|
| 124 |
+
self.obj = Actor.merge(self._objs, name="cube")
|
| 125 |
+
elif self.difficulty_level >= 2:
|
| 126 |
+
all_model_ids = np.array(
|
| 127 |
+
list(
|
| 128 |
+
load_json(
|
| 129 |
+
ASSET_DIR / "assets/mani_skill2_ycb/info_pick_v0.json"
|
| 130 |
+
).keys()
|
| 131 |
+
)
|
| 132 |
+
)
|
| 133 |
+
model_ids = self._batched_episode_rng.choice(all_model_ids)
|
| 134 |
+
self._objs: List[Actor] = []
|
| 135 |
+
for i, model_id in enumerate(model_ids):
|
| 136 |
+
builder = actors.get_actor_builder(self.scene, id=f"ycb:{model_id}")
|
| 137 |
+
builder.set_scene_idxs([i])
|
| 138 |
+
self._objs.append(builder.build(name=f"{model_id}-{i}"))
|
| 139 |
+
self.obj = Actor.merge(self._objs, name="ycb_object")
|
| 140 |
+
else:
|
| 141 |
+
raise ValueError(
|
| 142 |
+
f"Difficulty level must be an int within 0-4, but get {self.difficulty_level}"
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
if self.difficulty_level < 2:
|
| 146 |
+
# for levels 0 and 1 we already know object heights. For other levels we need to compute them
|
| 147 |
+
self.obj_heights = common.to_tensor(obj_heights, device=self.device)
|
| 148 |
+
|
| 149 |
+
def _after_reconfigure(self, options: dict):
|
| 150 |
+
if self.difficulty_level >= 2:
|
| 151 |
+
self.obj_heights = []
|
| 152 |
+
for obj in self._objs:
|
| 153 |
+
collision_mesh = obj.get_first_collision_mesh()
|
| 154 |
+
# this value is used to set object pose so the bottom is at z=0
|
| 155 |
+
self.obj_heights.append(-collision_mesh.bounding_box.bounds[0, 2])
|
| 156 |
+
self.obj_heights = common.to_tensor(self.obj_heights, device=self.device)
|
| 157 |
+
|
| 158 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 159 |
+
self._initialize_actors(env_idx)
|
| 160 |
+
self._initialize_agent(env_idx)
|
| 161 |
+
|
| 162 |
+
def _initialize_actors(self, env_idx: torch.Tensor):
|
| 163 |
+
with torch.device(self.device):
|
| 164 |
+
b = len(env_idx)
|
| 165 |
+
# Initialize object pose
|
| 166 |
+
self.table_scene.initialize(env_idx)
|
| 167 |
+
pose = self.obj.pose
|
| 168 |
+
new_pos = torch.randn((b, 3)) * self.obj_init_pos_noise
|
| 169 |
+
# hand_init_height is robot hand position while the 0.03 is a margin to ensure
|
| 170 |
+
new_pos[:, 2] = (
|
| 171 |
+
torch.abs(new_pos[:, 2]) + self.hand_init_height + self.obj_heights
|
| 172 |
+
)
|
| 173 |
+
pose.raw_pose[:, 0:3] = new_pos
|
| 174 |
+
pose.raw_pose[:, 3:7] = torch.tensor([[1, 0, 0, 0]])
|
| 175 |
+
self.obj.set_pose(pose)
|
| 176 |
+
|
| 177 |
+
# Initialize object axis
|
| 178 |
+
if self.difficulty_level <= 2:
|
| 179 |
+
axis = torch.ones((b,), dtype=torch.long) * 2
|
| 180 |
+
else:
|
| 181 |
+
axis = torch.randint(0, 3, (b,), dtype=torch.long)
|
| 182 |
+
self.rot_dir = F.one_hot(axis, num_classes=3)
|
| 183 |
+
|
| 184 |
+
# Sample a unit vector on the tangent plane of rotating axis
|
| 185 |
+
vector_axis = (axis + 1) % 3
|
| 186 |
+
vector = F.one_hot(vector_axis, num_classes=3)
|
| 187 |
+
|
| 188 |
+
# Initialize task related cache
|
| 189 |
+
self.unit_vector = vector
|
| 190 |
+
self.prev_unit_vector = vector.clone()
|
| 191 |
+
self.success_threshold = torch.pi * 4
|
| 192 |
+
self.cum_rotation_angle = torch.zeros((b,))
|
| 193 |
+
|
| 194 |
+
# Controller parameters
|
| 195 |
+
stiffness = torch.tensor(self.agent.controller.config.stiffness)
|
| 196 |
+
damping = torch.tensor(self.agent.controller.config.damping)
|
| 197 |
+
force_limit = torch.tensor(self.agent.controller.config.force_limit)
|
| 198 |
+
self.controller_param = (
|
| 199 |
+
stiffness.expand(b, self.agent.robot.dof[0]),
|
| 200 |
+
damping.expand(b, self.agent.robot.dof[0]),
|
| 201 |
+
force_limit.expand(b, self.agent.robot.dof[0]),
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
def _initialize_agent(self, env_idx: torch.Tensor):
|
| 205 |
+
with torch.device(self.device):
|
| 206 |
+
b = len(env_idx)
|
| 207 |
+
dof = self.agent.robot.dof
|
| 208 |
+
if isinstance(dof, torch.Tensor):
|
| 209 |
+
dof = dof[0]
|
| 210 |
+
init_qpos = torch.zeros((b, dof))
|
| 211 |
+
self.agent.reset(init_qpos)
|
| 212 |
+
self.agent.robot.set_pose(
|
| 213 |
+
Pose.create_from_pq(
|
| 214 |
+
torch.tensor([0.0, 0, self.hand_init_height]),
|
| 215 |
+
torch.tensor([-0.707, 0, 0.707, 0]),
|
| 216 |
+
)
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
def _get_obs_extra(self, info: Dict):
|
| 220 |
+
with torch.device(self.device):
|
| 221 |
+
obs = dict(rotate_dir=self.rot_dir)
|
| 222 |
+
if self.obs_mode_struct.use_state:
|
| 223 |
+
obs.update(
|
| 224 |
+
obj_pose=vectorize_pose(self.obj.pose),
|
| 225 |
+
obj_tip_vec=info["obj_tip_vec"].view(self.num_envs, 12),
|
| 226 |
+
)
|
| 227 |
+
return obs
|
| 228 |
+
|
| 229 |
+
def evaluate(self, **kwargs) -> dict:
|
| 230 |
+
with torch.device(self.device):
|
| 231 |
+
# 1. rotation angle
|
| 232 |
+
obj_pose = self.obj.pose
|
| 233 |
+
new_unit_vector = quaternion_apply(obj_pose.q, self.unit_vector)
|
| 234 |
+
new_unit_vector -= (
|
| 235 |
+
torch.sum(new_unit_vector * self.rot_dir, dim=-1, keepdim=True)
|
| 236 |
+
* self.rot_dir
|
| 237 |
+
)
|
| 238 |
+
new_unit_vector = new_unit_vector / torch.linalg.norm(
|
| 239 |
+
new_unit_vector, dim=-1, keepdim=True
|
| 240 |
+
)
|
| 241 |
+
angle = torch.acos(
|
| 242 |
+
torch.clip(
|
| 243 |
+
torch.sum(new_unit_vector * self.prev_unit_vector, dim=-1), 0, 1
|
| 244 |
+
)
|
| 245 |
+
)
|
| 246 |
+
# We do not expect the rotation angle for a single step to be so large
|
| 247 |
+
angle = torch.clip(angle, -torch.pi / 20, torch.pi / 20)
|
| 248 |
+
self.prev_unit_vector = new_unit_vector
|
| 249 |
+
|
| 250 |
+
# 2. object velocity
|
| 251 |
+
obj_vel = torch.linalg.norm(self.obj.get_linear_velocity(), dim=-1)
|
| 252 |
+
|
| 253 |
+
# 3. object falling
|
| 254 |
+
obj_fall = (obj_pose.p[:, 2] < self.hand_init_height - 0.05).to(torch.bool)
|
| 255 |
+
|
| 256 |
+
# 4. finger object distance
|
| 257 |
+
tip_poses = [vectorize_pose(link.pose) for link in self.agent.tip_links]
|
| 258 |
+
tip_poses = torch.stack(tip_poses, dim=1) # (b, 4, 7)
|
| 259 |
+
obj_tip_vec = tip_poses[..., :3] - obj_pose.p[:, None, :] # (b, 4, 3)
|
| 260 |
+
obj_tip_dist = torch.linalg.norm(obj_tip_vec, dim=-1) # (b, 4)
|
| 261 |
+
|
| 262 |
+
# 5. cum rotation angle
|
| 263 |
+
self.cum_rotation_angle += angle
|
| 264 |
+
success = self.cum_rotation_angle > self.success_threshold
|
| 265 |
+
|
| 266 |
+
# 6. controller effort
|
| 267 |
+
qpos_target = self.agent.controller._target_qpos
|
| 268 |
+
qpos_error = qpos_target - self.agent.robot.qpos
|
| 269 |
+
qvel = self.agent.robot.qvel
|
| 270 |
+
qf = qpos_error * self.controller_param[0] - qvel * self.controller_param[1]
|
| 271 |
+
qf = torch.clip(qf, -self.controller_param[2], self.controller_param[2])
|
| 272 |
+
power = torch.sum(qf * qvel, dim=-1)
|
| 273 |
+
|
| 274 |
+
return dict(
|
| 275 |
+
rotation_angle=angle,
|
| 276 |
+
obj_vel=obj_vel,
|
| 277 |
+
obj_fall=obj_fall,
|
| 278 |
+
obj_tip_vec=obj_tip_vec,
|
| 279 |
+
obj_tip_dist=obj_tip_dist,
|
| 280 |
+
success=success,
|
| 281 |
+
qf=qf,
|
| 282 |
+
power=power,
|
| 283 |
+
fail=obj_fall,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
def compute_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 287 |
+
# 1. rotation reward
|
| 288 |
+
angle = info["rotation_angle"]
|
| 289 |
+
reward = 20 * angle
|
| 290 |
+
|
| 291 |
+
# 2. velocity penalty
|
| 292 |
+
obj_vel = info["obj_vel"]
|
| 293 |
+
reward += -0.1 * obj_vel
|
| 294 |
+
|
| 295 |
+
# 3. falling penalty
|
| 296 |
+
obj_fall = info["obj_fall"]
|
| 297 |
+
reward += -50.0 * obj_fall
|
| 298 |
+
|
| 299 |
+
# 4. effort penalty
|
| 300 |
+
power = torch.abs(info["power"])
|
| 301 |
+
reward += -0.0003 * power
|
| 302 |
+
|
| 303 |
+
# 5. torque penalty
|
| 304 |
+
qf = info["qf"]
|
| 305 |
+
qf_norm = torch.linalg.norm(qf, dim=-1)
|
| 306 |
+
reward += -0.0003 * qf_norm
|
| 307 |
+
|
| 308 |
+
# 6. finger object distance reward
|
| 309 |
+
obj_tip_dist = info["obj_tip_dist"]
|
| 310 |
+
distance_rew = 0.1 / (0.02 + 4 * obj_tip_dist)
|
| 311 |
+
reward += torch.mean(torch.clip(distance_rew, 0, 1), dim=-1)
|
| 312 |
+
|
| 313 |
+
return reward
|
| 314 |
+
|
| 315 |
+
def compute_normalized_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 316 |
+
# this should be equal to compute_dense_reward / max possible reward
|
| 317 |
+
return self.compute_dense_reward(obs=obs, action=action, info=info) / 4.0
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
@register_env("RotateSingleObjectInHandLevel0-v1", max_episode_steps=300)
|
| 321 |
+
class RotateSingleObjectInHandLevel0(RotateSingleObjectInHand):
|
| 322 |
+
def __init__(self, *args, **kwargs):
|
| 323 |
+
super().__init__(
|
| 324 |
+
*args,
|
| 325 |
+
robot_init_qpos_noise=0.02,
|
| 326 |
+
obj_init_pos_noise=0.02,
|
| 327 |
+
difficulty_level=0,
|
| 328 |
+
**kwargs,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
@register_env("RotateSingleObjectInHandLevel1-v1", max_episode_steps=300)
|
| 333 |
+
class RotateSingleObjectInHandLevel1(RotateSingleObjectInHand):
|
| 334 |
+
def __init__(self, *args, **kwargs):
|
| 335 |
+
super().__init__(
|
| 336 |
+
*args,
|
| 337 |
+
robot_init_qpos_noise=0.02,
|
| 338 |
+
obj_init_pos_noise=0.02,
|
| 339 |
+
difficulty_level=1,
|
| 340 |
+
**kwargs,
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
@register_env(
|
| 345 |
+
"RotateSingleObjectInHandLevel2-v1",
|
| 346 |
+
max_episode_steps=300,
|
| 347 |
+
asset_download_ids=["ycb"],
|
| 348 |
+
)
|
| 349 |
+
class RotateSingleObjectInHandLevel2(RotateSingleObjectInHand):
|
| 350 |
+
def __init__(self, *args, **kwargs):
|
| 351 |
+
super().__init__(
|
| 352 |
+
*args,
|
| 353 |
+
robot_init_qpos_noise=0.02,
|
| 354 |
+
obj_init_pos_noise=0.02,
|
| 355 |
+
difficulty_level=2,
|
| 356 |
+
**kwargs,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@register_env(
|
| 361 |
+
"RotateSingleObjectInHandLevel3-v1",
|
| 362 |
+
max_episode_steps=300,
|
| 363 |
+
asset_download_ids=["ycb"],
|
| 364 |
+
)
|
| 365 |
+
class RotateSingleObjectInHandLevel3(RotateSingleObjectInHand):
|
| 366 |
+
def __init__(self, *args, **kwargs):
|
| 367 |
+
super().__init__(
|
| 368 |
+
*args,
|
| 369 |
+
robot_init_qpos_noise=0.02,
|
| 370 |
+
obj_init_pos_noise=0.02,
|
| 371 |
+
difficulty_level=3,
|
| 372 |
+
**kwargs,
|
| 373 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/dexterity/rotate_valve.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from mani_skill import logger
|
| 7 |
+
from mani_skill.agents.robots import DClaw
|
| 8 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 9 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 10 |
+
from mani_skill.utils import sapien_utils
|
| 11 |
+
from mani_skill.utils.building.articulations import build_robel_valve
|
| 12 |
+
from mani_skill.utils.geometry.rotation_conversions import axis_angle_to_quaternion
|
| 13 |
+
from mani_skill.utils.registration import register_env
|
| 14 |
+
from mani_skill.utils.scene_builder.table import TableSceneBuilder
|
| 15 |
+
from mani_skill.utils.structs.articulation import Articulation
|
| 16 |
+
from mani_skill.utils.structs.link import Link
|
| 17 |
+
from mani_skill.utils.structs.pose import Pose, vectorize_pose
|
| 18 |
+
from mani_skill.utils.structs.types import Array
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class RotateValveEnv(BaseEnv):
|
| 22 |
+
agent: Union[DClaw]
|
| 23 |
+
_clearance = 0.003
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
*args,
|
| 28 |
+
robot_init_qpos_noise=0.02,
|
| 29 |
+
valve_init_pos_noise=0.02,
|
| 30 |
+
difficulty_level: int = -1,
|
| 31 |
+
**kwargs,
|
| 32 |
+
):
|
| 33 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 34 |
+
self.valve_init_pos_noise = valve_init_pos_noise
|
| 35 |
+
|
| 36 |
+
if (
|
| 37 |
+
not isinstance(difficulty_level, int)
|
| 38 |
+
or difficulty_level >= 5
|
| 39 |
+
or difficulty_level < 0
|
| 40 |
+
):
|
| 41 |
+
raise ValueError(
|
| 42 |
+
f"Difficulty level must be a int within 0-4, but get {difficulty_level}"
|
| 43 |
+
)
|
| 44 |
+
self.difficulty_level = difficulty_level
|
| 45 |
+
|
| 46 |
+
# Task information
|
| 47 |
+
# For the simplest level 0, only quarter round will make it a success
|
| 48 |
+
# For the hardest level 4, rotate one rounds will make it a success
|
| 49 |
+
# For other intermediate level 1-3, the success threshold should be half round
|
| 50 |
+
if self.difficulty_level == 0:
|
| 51 |
+
self.success_threshold = torch.pi / 2
|
| 52 |
+
elif self.difficulty_level == 4:
|
| 53 |
+
self.success_threshold = torch.pi * 2
|
| 54 |
+
else:
|
| 55 |
+
self.success_threshold = torch.pi * 1
|
| 56 |
+
|
| 57 |
+
self.capsule_offset = 0.01
|
| 58 |
+
|
| 59 |
+
super().__init__(*args, robot_uids="dclaw", **kwargs)
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def _default_sensor_configs(self):
|
| 63 |
+
pose = sapien_utils.look_at(eye=[0.3, 0, 0.3], target=[-0.1, 0, 0.05])
|
| 64 |
+
return [CameraConfig("base_camera", pose, 128, 128, np.pi / 2, 0.01, 100)]
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def _default_human_render_camera_configs(self):
|
| 68 |
+
pose = sapien_utils.look_at([0.2, 0.4, 0.4], [0.0, 0.0, 0.1])
|
| 69 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 70 |
+
|
| 71 |
+
def _load_scene(self, options: dict):
|
| 72 |
+
self.table_scene = TableSceneBuilder(
|
| 73 |
+
env=self, robot_init_qpos_noise=self.robot_init_qpos_noise
|
| 74 |
+
)
|
| 75 |
+
self.table_scene.build()
|
| 76 |
+
self._load_articulations()
|
| 77 |
+
|
| 78 |
+
def _load_articulations(self):
|
| 79 |
+
# Robel valve
|
| 80 |
+
if self.difficulty_level == 0:
|
| 81 |
+
# Only tri-valve
|
| 82 |
+
valve_angles_list = [(0, np.pi / 3 * 2, np.pi / 3 * 4)] * self.num_envs
|
| 83 |
+
elif self.difficulty_level == 1:
|
| 84 |
+
base_angles = [
|
| 85 |
+
np.arange(0, np.pi * 2, np.pi * 2 / 3),
|
| 86 |
+
np.arange(0, np.pi * 2, np.pi / 2),
|
| 87 |
+
np.arange(0, np.pi * 2, np.pi * 2 / 5),
|
| 88 |
+
]
|
| 89 |
+
valve_angles_list = (
|
| 90 |
+
base_angles * int(self.num_envs // 3)
|
| 91 |
+
+ base_angles[: int(self.num_envs % 3)]
|
| 92 |
+
)
|
| 93 |
+
elif self.difficulty_level == 2:
|
| 94 |
+
num_valve_head = self._batched_episode_rng.randint(3, 6)
|
| 95 |
+
valve_angles_list = [
|
| 96 |
+
sample_valve_angles(num_head, self._batched_episode_rng[i])
|
| 97 |
+
for i, num_head in enumerate(num_valve_head)
|
| 98 |
+
]
|
| 99 |
+
elif self.difficulty_level >= 3:
|
| 100 |
+
num_valve_head = self._batched_episode_rng.randint(3, 6)
|
| 101 |
+
valve_angles_list = [
|
| 102 |
+
sample_valve_angles(num_head, self._batched_episode_rng[i])
|
| 103 |
+
for i, num_head in enumerate(num_valve_head)
|
| 104 |
+
]
|
| 105 |
+
else:
|
| 106 |
+
raise ValueError(
|
| 107 |
+
f"Difficulty level must be a int within 0-4, but get {self.difficulty_level}"
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
valves: List[Articulation] = []
|
| 111 |
+
capsule_lens = []
|
| 112 |
+
valve_links = []
|
| 113 |
+
for i, valve_angles in enumerate(valve_angles_list):
|
| 114 |
+
scene_idxs = [i]
|
| 115 |
+
if self.difficulty_level < 3:
|
| 116 |
+
valve, capsule_len = build_robel_valve(
|
| 117 |
+
self.scene,
|
| 118 |
+
valve_angles=valve_angles,
|
| 119 |
+
scene_idxs=scene_idxs,
|
| 120 |
+
name=f"valve_station_{i}",
|
| 121 |
+
)
|
| 122 |
+
else:
|
| 123 |
+
scales = self._batched_episode_rng[i].randn(2) * 0.1 + 1
|
| 124 |
+
valve, capsule_len = build_robel_valve(
|
| 125 |
+
self.scene,
|
| 126 |
+
valve_angles=valve_angles,
|
| 127 |
+
scene_idxs=scene_idxs,
|
| 128 |
+
name=f"valve_station_{i}",
|
| 129 |
+
radius_scale=scales[0],
|
| 130 |
+
capsule_radius_scale=scales[1],
|
| 131 |
+
)
|
| 132 |
+
valves.append(valve)
|
| 133 |
+
valve_links.append(valve.links_map["valve"])
|
| 134 |
+
capsule_lens.append(capsule_len)
|
| 135 |
+
self.valve = Articulation.merge(valves, "valve_station")
|
| 136 |
+
self.capsule_lens = torch.from_numpy(np.array(capsule_lens)).to(self.device)
|
| 137 |
+
self.valve_link = Link.merge(valve_links, name="valve")
|
| 138 |
+
|
| 139 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 140 |
+
self._initialize_actors(env_idx)
|
| 141 |
+
self._initialize_agent(env_idx)
|
| 142 |
+
|
| 143 |
+
def _initialize_actors(self, env_idx: torch.Tensor):
|
| 144 |
+
with torch.device(self.device):
|
| 145 |
+
b = len(env_idx)
|
| 146 |
+
self.table_scene.initialize(env_idx)
|
| 147 |
+
|
| 148 |
+
# Initialize task related information
|
| 149 |
+
if self.difficulty_level <= 3:
|
| 150 |
+
self.rotate_direction = torch.ones(b)
|
| 151 |
+
else:
|
| 152 |
+
self.rotate_direction = 1 - torch.randint(0, 2, (b,)) * 2
|
| 153 |
+
|
| 154 |
+
# Initialize the valve
|
| 155 |
+
xyz = torch.zeros((b, 3))
|
| 156 |
+
xyz[:, :2].uniform_(-0.02, 0.02)
|
| 157 |
+
axis_angle = torch.zeros((b, 3))
|
| 158 |
+
axis_angle[:, 2].uniform_(torch.pi / 6, torch.pi * 5 / 6)
|
| 159 |
+
pose = Pose.create_from_pq(xyz, axis_angle_to_quaternion(axis_angle))
|
| 160 |
+
self.valve.set_pose(pose)
|
| 161 |
+
|
| 162 |
+
qpos = torch.rand((b, 1)) * torch.pi * 2 - torch.pi
|
| 163 |
+
self.valve.set_qpos(qpos)
|
| 164 |
+
self.rest_qpos = qpos
|
| 165 |
+
|
| 166 |
+
def _initialize_agent(self, env_idx: torch.Tensor):
|
| 167 |
+
with torch.device(self.device):
|
| 168 |
+
b = len(env_idx)
|
| 169 |
+
dof = self.agent.robot.dof
|
| 170 |
+
if isinstance(dof, torch.Tensor):
|
| 171 |
+
dof = dof[0]
|
| 172 |
+
|
| 173 |
+
init_qpos = torch.zeros((b, dof))
|
| 174 |
+
# set root joint qpos to avoid robot-object collision after reset
|
| 175 |
+
init_qpos[:, self.agent.root_joint_indices] = torch.tensor(
|
| 176 |
+
[0.7, -0.7, -0.7]
|
| 177 |
+
)
|
| 178 |
+
init_qpos += torch.randn((b, dof)) * self.robot_init_qpos_noise
|
| 179 |
+
self.agent.reset(init_qpos)
|
| 180 |
+
self.agent.robot.set_pose(
|
| 181 |
+
Pose.create_from_pq(
|
| 182 |
+
torch.tensor([0.0, 0, 0.28]), torch.tensor([0, 0, -1, 0])
|
| 183 |
+
)
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def _get_obs_extra(self, info: Dict):
|
| 187 |
+
with torch.device(self.device):
|
| 188 |
+
valve_qpos = self.valve.qpos
|
| 189 |
+
valve_qvel = self.valve.qvel
|
| 190 |
+
obs = dict(
|
| 191 |
+
rotate_dir=self.rotate_direction.to(torch.float32),
|
| 192 |
+
valve_qpos=valve_qpos,
|
| 193 |
+
valve_qvel=valve_qvel,
|
| 194 |
+
valve_x=torch.cos(valve_qpos[:, 0]),
|
| 195 |
+
valve_y=torch.sin(valve_qpos[:, 0]),
|
| 196 |
+
)
|
| 197 |
+
if self.obs_mode_struct.use_state:
|
| 198 |
+
obs.update(
|
| 199 |
+
valve_pose=vectorize_pose(self.valve.pose),
|
| 200 |
+
)
|
| 201 |
+
return obs
|
| 202 |
+
|
| 203 |
+
def evaluate(self, **kwargs) -> dict:
|
| 204 |
+
valve_rotation = (self.valve.qpos - self.rest_qpos)[:, 0]
|
| 205 |
+
success = valve_rotation * self.rotate_direction > self.success_threshold
|
| 206 |
+
return dict(success=success, valve_rotation=valve_rotation)
|
| 207 |
+
|
| 208 |
+
def compute_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 209 |
+
rotation = info["valve_rotation"]
|
| 210 |
+
qvel = self.valve.qvel
|
| 211 |
+
|
| 212 |
+
# Distance between fingertips and the circle grouned by valve tips
|
| 213 |
+
tip_poses = self.agent.tip_poses # (b, 3, 7)
|
| 214 |
+
tip_pos = tip_poses[:, :, :2] # (b, 3, 2)
|
| 215 |
+
valve_pos = self.valve_link.pose.p[:, :2] # (b, 2)
|
| 216 |
+
valve_tip_dist = torch.linalg.norm(tip_pos - valve_pos[:, None, :], dim=-1)
|
| 217 |
+
desired_valve_tip_dist = self.capsule_lens[:, None] - self.capsule_offset
|
| 218 |
+
error = torch.norm(valve_tip_dist - desired_valve_tip_dist, dim=-1)
|
| 219 |
+
reward = 1 - torch.tanh(error * 10)
|
| 220 |
+
|
| 221 |
+
directed_velocity = qvel[:, 0] * self.rotate_direction
|
| 222 |
+
reward += torch.tanh(5 * directed_velocity) * 4
|
| 223 |
+
|
| 224 |
+
motion_reward = torch.clip(rotation / torch.pi / 2, -1, 1)
|
| 225 |
+
reward += motion_reward
|
| 226 |
+
|
| 227 |
+
return reward
|
| 228 |
+
|
| 229 |
+
def compute_normalized_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 230 |
+
# this should be equal to compute_dense_reward / max possible reward
|
| 231 |
+
return self.compute_dense_reward(obs=obs, action=action, info=info) / 6.0
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def sample_valve_angles(
|
| 235 |
+
num_head: int,
|
| 236 |
+
random_state: np.random.RandomState,
|
| 237 |
+
min_angle_diff=np.pi / 6,
|
| 238 |
+
num_max_attempts=500,
|
| 239 |
+
):
|
| 240 |
+
for i in range(num_max_attempts):
|
| 241 |
+
angles = random_state.uniform(0, np.pi * 2, (num_head,))
|
| 242 |
+
angles = np.sort(angles)
|
| 243 |
+
|
| 244 |
+
# Append a 360 degree at the end of the list to check the last angle with the first one (0-degree)
|
| 245 |
+
diff = np.append(angles[1:], np.pi * 2) - angles
|
| 246 |
+
if np.min(diff) >= min_angle_diff:
|
| 247 |
+
return angles
|
| 248 |
+
|
| 249 |
+
logger.warn(
|
| 250 |
+
f"sample_valve_angles reach max attempts {num_max_attempts}. Will use the default valve angles."
|
| 251 |
+
)
|
| 252 |
+
return np.arange(0, np.pi * 2, np.pi * 2 / num_head)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@register_env("RotateValveLevel0-v1", max_episode_steps=80)
|
| 256 |
+
class RotateValveEnvLevel0(RotateValveEnv):
|
| 257 |
+
def __init__(self, *args, **kwargs):
|
| 258 |
+
super().__init__(
|
| 259 |
+
*args,
|
| 260 |
+
robot_init_qpos_noise=0.02,
|
| 261 |
+
valve_init_pos_noise=0.02,
|
| 262 |
+
difficulty_level=0,
|
| 263 |
+
**kwargs,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
@register_env("RotateValveLevel1-v1", max_episode_steps=150)
|
| 268 |
+
class RotateValveEnvLevel1(RotateValveEnv):
|
| 269 |
+
def __init__(self, *args, **kwargs):
|
| 270 |
+
super().__init__(
|
| 271 |
+
*args,
|
| 272 |
+
robot_init_qpos_noise=0.02,
|
| 273 |
+
valve_init_pos_noise=0.02,
|
| 274 |
+
difficulty_level=1,
|
| 275 |
+
**kwargs,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
@register_env("RotateValveLevel2-v1", max_episode_steps=150)
|
| 280 |
+
class RotateValveEnvLevel2(RotateValveEnv):
|
| 281 |
+
def __init__(self, *args, **kwargs):
|
| 282 |
+
super().__init__(
|
| 283 |
+
*args,
|
| 284 |
+
robot_init_qpos_noise=0.02,
|
| 285 |
+
valve_init_pos_noise=0.02,
|
| 286 |
+
difficulty_level=2,
|
| 287 |
+
**kwargs,
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@register_env("RotateValveLevel3-v1", max_episode_steps=150)
|
| 292 |
+
class RotateValveEnvLevel3(RotateValveEnv):
|
| 293 |
+
def __init__(self, *args, **kwargs):
|
| 294 |
+
super().__init__(
|
| 295 |
+
*args,
|
| 296 |
+
robot_init_qpos_noise=0.02,
|
| 297 |
+
valve_init_pos_noise=0.02,
|
| 298 |
+
difficulty_level=3,
|
| 299 |
+
**kwargs,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@register_env("RotateValveLevel4-v1", max_episode_steps=300)
|
| 304 |
+
class RotateValveEnvLevel4(RotateValveEnv):
|
| 305 |
+
def __init__(self, *args, **kwargs):
|
| 306 |
+
super().__init__(
|
| 307 |
+
*args,
|
| 308 |
+
robot_init_qpos_noise=0.02,
|
| 309 |
+
valve_init_pos_noise=0.02,
|
| 310 |
+
difficulty_level=4,
|
| 311 |
+
**kwargs,
|
| 312 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/empty_env.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from mani_skill.agents.robots.fetch.fetch import Fetch
|
| 8 |
+
from mani_skill.agents.robots.panda.panda import Panda
|
| 9 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 10 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 11 |
+
from mani_skill.utils import sapien_utils
|
| 12 |
+
from mani_skill.utils.building.ground import build_ground
|
| 13 |
+
from mani_skill.utils.registration import register_env
|
| 14 |
+
from mani_skill.utils.structs.types import GPUMemoryConfig, SimConfig
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@register_env("Empty-v1", max_episode_steps=200000)
|
| 18 |
+
class EmptyEnv(BaseEnv):
|
| 19 |
+
SUPPORTED_REWARD_MODES = ["none"]
|
| 20 |
+
"""
|
| 21 |
+
This is just a dummy environment for showcasing robots in a empty scene
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, *args, robot_uids="panda", **kwargs):
|
| 25 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 26 |
+
|
| 27 |
+
@property
|
| 28 |
+
def _default_sensor_configs(self):
|
| 29 |
+
pose = sapien_utils.look_at([1.25, -1.25, 1.5], [0.0, 0.0, 0.2])
|
| 30 |
+
return [CameraConfig("base_camera", pose, 128, 128, np.pi / 2, 0.01, 100)]
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def _default_human_render_camera_configs(self):
|
| 34 |
+
pose = sapien_utils.look_at([1.25, -1.25, 1.5], [0.0, 0.0, 0.2])
|
| 35 |
+
return CameraConfig("render_camera", pose, 2048, 2048, 1, 0.01, 100)
|
| 36 |
+
|
| 37 |
+
def _load_agent(self, options: dict):
|
| 38 |
+
super()._load_agent(options, sapien.Pose())
|
| 39 |
+
|
| 40 |
+
def _load_scene(self, options: dict):
|
| 41 |
+
self.ground = build_ground(self.scene)
|
| 42 |
+
self.ground.set_collision_group_bit(group=2, bit_idx=30, bit=1)
|
| 43 |
+
|
| 44 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
def evaluate(self):
|
| 48 |
+
return {}
|
| 49 |
+
|
| 50 |
+
def _get_obs_extra(self, info: Dict):
|
| 51 |
+
return dict()
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/fmb/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (255 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/fmb/__pycache__/fmb.cpython-310.pyc
ADDED
|
Binary file (6.6 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .humanoid_pick_place import *
|
| 2 |
+
from .humanoid_stand import HumanoidStandEnv
|
| 3 |
+
from .transport_box import TransportBoxEnv
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/humanoid_stand.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from mani_skill.agents.robots import UnitreeG1Simplified, UnitreeH1Simplified
|
| 8 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 9 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 10 |
+
from mani_skill.utils import common, sapien_utils
|
| 11 |
+
from mani_skill.utils.building.ground import build_ground
|
| 12 |
+
from mani_skill.utils.registration import register_env
|
| 13 |
+
from mani_skill.utils.structs.types import GPUMemoryConfig, SimConfig
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class HumanoidStandEnv(BaseEnv):
|
| 17 |
+
SUPPORTED_REWARD_MODES = ["sparse", "none"]
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
*args,
|
| 22 |
+
robot_uids="unitree_h1_simplified",
|
| 23 |
+
robot_init_qpos_noise=0.02,
|
| 24 |
+
**kwargs
|
| 25 |
+
):
|
| 26 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 27 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def _default_sensor_configs(self):
|
| 31 |
+
return []
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def _default_human_render_camera_configs(self):
|
| 35 |
+
pose = sapien_utils.look_at([1.0, 1.0, 2.5], [0.0, 0.0, 0.75])
|
| 36 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 37 |
+
|
| 38 |
+
def _load_scene(self, options: dict):
|
| 39 |
+
build_ground(self.scene)
|
| 40 |
+
|
| 41 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
def evaluate(self):
|
| 45 |
+
is_standing = self.agent.is_standing()
|
| 46 |
+
self.agent.is_fallen()
|
| 47 |
+
return {"is_standing": is_standing, "fail": ~is_standing}
|
| 48 |
+
|
| 49 |
+
def _get_obs_extra(self, info: Dict):
|
| 50 |
+
return dict()
|
| 51 |
+
|
| 52 |
+
def compute_sparse_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 53 |
+
return info["is_standing"]
|
| 54 |
+
|
| 55 |
+
# def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 56 |
+
# return torch.zeros(self.num_envs, device=self.device)
|
| 57 |
+
|
| 58 |
+
# def compute_normalized_dense_reward(
|
| 59 |
+
# self, obs: Any, action: torch.Tensor, info: Dict
|
| 60 |
+
# ):
|
| 61 |
+
# max_reward = 1.0
|
| 62 |
+
# return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Different robot embodiments require different configurations for optimal running and nicer render videos, we define those specifics below
|
| 66 |
+
@register_env("UnitreeH1Stand-v1", max_episode_steps=1000)
|
| 67 |
+
class UnitreeH1StandEnv(HumanoidStandEnv):
|
| 68 |
+
SUPPORTED_ROBOTS = ["unitree_h1_simplified"]
|
| 69 |
+
agent: Union[UnitreeH1Simplified]
|
| 70 |
+
|
| 71 |
+
def __init__(self, *args, robot_uids="unitree_h1_simplified", **kwargs):
|
| 72 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def _default_sim_config(self):
|
| 76 |
+
return SimConfig(
|
| 77 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 78 |
+
max_rigid_contact_count=2**22, max_rigid_patch_count=2**21
|
| 79 |
+
)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def _default_human_render_camera_configs(self):
|
| 84 |
+
pose = sapien_utils.look_at([1.0, 1.0, 2.5], [0.0, 0.0, 0.75])
|
| 85 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 86 |
+
|
| 87 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 88 |
+
with torch.device(self.device):
|
| 89 |
+
b = len(env_idx)
|
| 90 |
+
standing_keyframe = self.agent.keyframes["standing"]
|
| 91 |
+
random_qpos = (
|
| 92 |
+
torch.randn(size=(b, self.agent.robot.dof[0]), dtype=torch.float) * 0.05
|
| 93 |
+
)
|
| 94 |
+
random_qpos += common.to_tensor(standing_keyframe.qpos, device=self.device)
|
| 95 |
+
self.agent.robot.set_qpos(random_qpos)
|
| 96 |
+
self.agent.robot.set_pose(sapien.Pose(p=[0, 0, 0.975]))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@register_env("UnitreeG1Stand-v1", max_episode_steps=1000)
|
| 100 |
+
class UnitreeG1StandEnv(HumanoidStandEnv):
|
| 101 |
+
SUPPORTED_ROBOTS = ["unitree_g1_simplified_legs"]
|
| 102 |
+
agent: Union[UnitreeG1Simplified]
|
| 103 |
+
|
| 104 |
+
def __init__(self, *args, robot_uids="unitree_g1_simplified_legs", **kwargs):
|
| 105 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def _default_sim_config(self):
|
| 109 |
+
return SimConfig(
|
| 110 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 111 |
+
max_rigid_contact_count=2**22, max_rigid_patch_count=2**21
|
| 112 |
+
)
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def _default_human_render_camera_configs(self):
|
| 117 |
+
pose = sapien_utils.look_at([1.0, 1.0, 2.0], [0.0, 0.0, 0.75])
|
| 118 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 119 |
+
|
| 120 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 121 |
+
with torch.device(self.device):
|
| 122 |
+
b = len(env_idx)
|
| 123 |
+
standing_keyframe = self.agent.keyframes["standing"]
|
| 124 |
+
random_qpos = (
|
| 125 |
+
torch.randn(size=(b, self.agent.robot.dof[0]), dtype=torch.float) * 0.05
|
| 126 |
+
)
|
| 127 |
+
random_qpos += common.to_tensor(standing_keyframe.qpos, device=self.device)
|
| 128 |
+
self.agent.robot.set_qpos(random_qpos)
|
| 129 |
+
self.agent.robot.set_pose(sapien.Pose(p=[0, 0, 0.755]))
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/humanoid/transport_box.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Any, Dict
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import sapien
|
| 8 |
+
import torch
|
| 9 |
+
from transforms3d.euler import euler2quat
|
| 10 |
+
|
| 11 |
+
from mani_skill.agents.robots.unitree_g1.g1_upper_body import (
|
| 12 |
+
UnitreeG1UpperBodyWithHeadCamera,
|
| 13 |
+
)
|
| 14 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 15 |
+
from mani_skill.envs.utils import randomization
|
| 16 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 17 |
+
from mani_skill.utils import common, sapien_utils
|
| 18 |
+
from mani_skill.utils.building import ground
|
| 19 |
+
from mani_skill.utils.registration import register_env
|
| 20 |
+
from mani_skill.utils.structs.pose import Pose
|
| 21 |
+
from mani_skill.utils.structs.types import GPUMemoryConfig, SceneConfig, SimConfig
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@register_env("UnitreeG1TransportBox-v1", max_episode_steps=100)
|
| 25 |
+
class TransportBoxEnv(BaseEnv):
|
| 26 |
+
"""
|
| 27 |
+
**Task Description:**
|
| 28 |
+
A G1 humanoid robot must find a box on a table and transport it to the other table and place it there.
|
| 29 |
+
|
| 30 |
+
**Randomizations:**
|
| 31 |
+
- the box's xy position is randomized in the region [-0.05, -0.05] x [0.2, 0.05]
|
| 32 |
+
- the box's z-axis rotation is randomized to a random angle in [0, np.pi/6]
|
| 33 |
+
|
| 34 |
+
**Success Conditions:**
|
| 35 |
+
- the box is resting on top of the other table
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
_sample_video_link = "https://github.com/haosulab/ManiSkill/raw/main/figures/environment_demos/UnitreeG1TransportBox-v1_rt.mp4"
|
| 39 |
+
SUPPORTED_ROBOTS = ["unitree_g1_simplified_upper_body_with_head_camera"]
|
| 40 |
+
agent: UnitreeG1UpperBodyWithHeadCamera
|
| 41 |
+
|
| 42 |
+
def __init__(self, *args, **kwargs):
|
| 43 |
+
self.init_robot_pose = copy.deepcopy(
|
| 44 |
+
UnitreeG1UpperBodyWithHeadCamera.keyframes["standing"].pose
|
| 45 |
+
)
|
| 46 |
+
self.init_robot_pose.p = [-0.1, 0, 0.755]
|
| 47 |
+
self.init_robot_qpos = UnitreeG1UpperBodyWithHeadCamera.keyframes[
|
| 48 |
+
"standing"
|
| 49 |
+
].qpos.copy()
|
| 50 |
+
self.init_robot_qpos[4] = -1.25
|
| 51 |
+
self.init_robot_qpos[3] = 1.25
|
| 52 |
+
super().__init__(
|
| 53 |
+
*args,
|
| 54 |
+
robot_uids="unitree_g1_simplified_upper_body_with_head_camera",
|
| 55 |
+
**kwargs
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def _default_sim_config(self):
|
| 60 |
+
return SimConfig(
|
| 61 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 62 |
+
max_rigid_contact_count=2**22, max_rigid_patch_count=2**21
|
| 63 |
+
),
|
| 64 |
+
scene_config=SceneConfig(contact_offset=0.02),
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def _default_sensor_configs(self):
|
| 69 |
+
pose = sapien_utils.look_at([1.0, 0.0, 1.6], [0, 0.0, 0.65])
|
| 70 |
+
return [
|
| 71 |
+
CameraConfig("base_camera", pose=pose, width=128, height=128, fov=np.pi / 3)
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def _default_human_render_camera_configs(self):
|
| 76 |
+
pose = sapien_utils.look_at([1.0, 0.0, 1.6], [0, 0.0, 0.65])
|
| 77 |
+
return CameraConfig(
|
| 78 |
+
"render_camera", pose=pose, width=512, height=512, fov=np.pi / 3
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
def _load_agent(self, options: dict):
|
| 82 |
+
super()._load_agent(options, sapien.Pose(p=[0, 0, 1]))
|
| 83 |
+
|
| 84 |
+
def _load_scene(self, options: dict):
|
| 85 |
+
self.ground = ground.build_ground(self.scene, mipmap_levels=7)
|
| 86 |
+
# build two tables
|
| 87 |
+
|
| 88 |
+
model_dir = Path(
|
| 89 |
+
os.path.join(
|
| 90 |
+
os.path.dirname(__file__), "../../../utils/scene_builder/table/assets"
|
| 91 |
+
)
|
| 92 |
+
)
|
| 93 |
+
table_model_file = str(model_dir / "table.glb")
|
| 94 |
+
scale = 1.2
|
| 95 |
+
table_pose = sapien.Pose(q=euler2quat(0, 0, np.pi / 2))
|
| 96 |
+
builder = self.scene.create_actor_builder()
|
| 97 |
+
builder.add_visual_from_file(
|
| 98 |
+
filename=table_model_file,
|
| 99 |
+
scale=[scale] * 3,
|
| 100 |
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
| 101 |
+
)
|
| 102 |
+
builder.add_box_collision(
|
| 103 |
+
pose=sapien.Pose(p=[0, 0, 0.630612274 / 2]),
|
| 104 |
+
half_size=(1.658057143 / 2, 0.829028571 / 2, 0.630612274 / 2),
|
| 105 |
+
)
|
| 106 |
+
builder.add_visual_from_file(
|
| 107 |
+
filename=table_model_file, scale=[scale] * 3, pose=table_pose
|
| 108 |
+
)
|
| 109 |
+
builder.initial_pose = sapien.Pose(p=[0, 0.66, 0])
|
| 110 |
+
self.table_1 = builder.build_static(name="table-1")
|
| 111 |
+
builder = self.scene.create_actor_builder()
|
| 112 |
+
builder.add_visual_from_file(
|
| 113 |
+
filename=table_model_file,
|
| 114 |
+
scale=[scale] * 3,
|
| 115 |
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
| 116 |
+
)
|
| 117 |
+
builder.add_box_collision(
|
| 118 |
+
pose=sapien.Pose(p=[0, 0, 0.630612274 / 2]),
|
| 119 |
+
half_size=(1.658057143 / 2, 0.829028571 / 2, 0.630612274 / 2),
|
| 120 |
+
)
|
| 121 |
+
builder.add_visual_from_file(
|
| 122 |
+
filename=table_model_file, scale=[scale] * 3, pose=table_pose
|
| 123 |
+
)
|
| 124 |
+
builder.initial_pose = sapien.Pose(p=[0, -0.66, 0])
|
| 125 |
+
self.table_2 = builder.build_static(name="table-2")
|
| 126 |
+
|
| 127 |
+
builder = self.scene.create_actor_builder()
|
| 128 |
+
builder.add_box_collision(half_size=(0.18, 0.12, 0.12), density=200)
|
| 129 |
+
visual_file = os.path.join(
|
| 130 |
+
os.path.dirname(__file__), "assets/cardboard_box/textured.obj"
|
| 131 |
+
)
|
| 132 |
+
builder.add_visual_from_file(
|
| 133 |
+
filename=visual_file,
|
| 134 |
+
scale=[0.12] * 3,
|
| 135 |
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
| 136 |
+
)
|
| 137 |
+
builder.initial_pose = sapien.Pose(p=[-0.1, -0.37, 0.7508])
|
| 138 |
+
self.box = builder.build(name="box")
|
| 139 |
+
|
| 140 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 141 |
+
with torch.device(self.device):
|
| 142 |
+
b = len(env_idx)
|
| 143 |
+
self.agent.robot.set_qpos(self.init_robot_qpos)
|
| 144 |
+
self.agent.robot.set_pose(self.init_robot_pose)
|
| 145 |
+
xyz = torch.zeros((b, 3))
|
| 146 |
+
xyz[:, 2] = 0.7508
|
| 147 |
+
xyz[:, 0] = randomization.uniform(-0.05, 0.2, size=(b,))
|
| 148 |
+
xyz[:, 1] = randomization.uniform(-0.05, 0.05, size=(b,))
|
| 149 |
+
xyz[:, :2] += torch.tensor([-0.1, -0.37])
|
| 150 |
+
quat = randomization.random_quaternions(
|
| 151 |
+
n=b, device=self.device, lock_x=True, lock_y=True, bounds=(0, np.pi / 6)
|
| 152 |
+
)
|
| 153 |
+
self.box.set_pose(Pose.create_from_pq(xyz, quat))
|
| 154 |
+
|
| 155 |
+
def evaluate(self):
|
| 156 |
+
# left_hand_grasped_box = self.agent.left_hand_is_grasping(self.box, max_angle=110)
|
| 157 |
+
# right_hand_grasped_box = self.agent.right_hand_is_grasping(self.box, max_angle=110)
|
| 158 |
+
l_contact_forces = (
|
| 159 |
+
(
|
| 160 |
+
self.scene.get_pairwise_contact_forces(
|
| 161 |
+
self.agent.robot.links_map["left_five_link"], self.box
|
| 162 |
+
)
|
| 163 |
+
+ self.scene.get_pairwise_contact_forces(
|
| 164 |
+
self.agent.robot.links_map["left_three_link"], self.box
|
| 165 |
+
)
|
| 166 |
+
+ self.scene.get_pairwise_contact_forces(
|
| 167 |
+
self.agent.robot.links_map["left_palm_link"], self.box
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
.abs()
|
| 171 |
+
.sum(dim=1)
|
| 172 |
+
)
|
| 173 |
+
r_contact_forces = (
|
| 174 |
+
(
|
| 175 |
+
self.scene.get_pairwise_contact_forces(
|
| 176 |
+
self.agent.robot.links_map["right_five_link"], self.box
|
| 177 |
+
)
|
| 178 |
+
+ self.scene.get_pairwise_contact_forces(
|
| 179 |
+
self.agent.robot.links_map["right_three_link"], self.box
|
| 180 |
+
)
|
| 181 |
+
+ self.scene.get_pairwise_contact_forces(
|
| 182 |
+
self.agent.robot.links_map["right_palm_link"], self.box
|
| 183 |
+
)
|
| 184 |
+
)
|
| 185 |
+
.abs()
|
| 186 |
+
.sum(dim=1)
|
| 187 |
+
)
|
| 188 |
+
left_hand_hit_box = l_contact_forces > 10
|
| 189 |
+
right_hand_hit_box = r_contact_forces > 10
|
| 190 |
+
# is grasping the box if both hands contact the box and the tcp of the hands are below the grasp points on the box.
|
| 191 |
+
box_grasped = (
|
| 192 |
+
left_hand_hit_box
|
| 193 |
+
& right_hand_hit_box
|
| 194 |
+
& (
|
| 195 |
+
self.agent.right_tcp.pose.p[:, 2]
|
| 196 |
+
< self.box_right_grasp_point.p[:, 2] + 0.04
|
| 197 |
+
)
|
| 198 |
+
& (
|
| 199 |
+
self.agent.left_tcp.pose.p[:, 2]
|
| 200 |
+
< self.box_left_grasp_point.p[:, 2] + 0.04
|
| 201 |
+
)
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# simply requires box to be resting somewhere on the correct table
|
| 205 |
+
box_at_correct_table_z = (0.751 > self.box.pose.p[:, 2]) & (
|
| 206 |
+
self.box.pose.p[:, 2] > 0.750
|
| 207 |
+
)
|
| 208 |
+
box_at_correct_table_xy = (
|
| 209 |
+
(0.78 > self.box.pose.p[:, 0])
|
| 210 |
+
& (self.box.pose.p[:, 0] > -0.78)
|
| 211 |
+
& (1.0 > self.box.pose.p[:, 1])
|
| 212 |
+
& (self.box.pose.p[:, 1] > 0.3)
|
| 213 |
+
)
|
| 214 |
+
# box_at_correct_table = torch.linalg.norm(self.box.pose.p - torch.tensor([0, 0.66, 0.731], device=self.device), dim=1) < 0.05
|
| 215 |
+
box_at_correct_table = box_at_correct_table_z & box_at_correct_table_xy
|
| 216 |
+
|
| 217 |
+
facing_table_with_box = (-1.7 < self.agent.robot.qpos[:, 0]) & (
|
| 218 |
+
self.agent.robot.qpos[:, 0] < -1.4
|
| 219 |
+
) # in this range the robot is probably facing the box on the left table.
|
| 220 |
+
return {
|
| 221 |
+
"success": ~box_grasped & box_at_correct_table,
|
| 222 |
+
"left_hand_hit_box": l_contact_forces > 0,
|
| 223 |
+
"right_hand_hit_box": r_contact_forces > 0,
|
| 224 |
+
"box_grasped": box_grasped,
|
| 225 |
+
"box_at_correct_table_xy": box_at_correct_table_xy,
|
| 226 |
+
"facing_table_with_box": facing_table_with_box,
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
def _get_obs_extra(self, info: Dict):
|
| 230 |
+
obs = dict(
|
| 231 |
+
right_tcp_pose=self.agent.right_tcp.pose.raw_pose,
|
| 232 |
+
left_tcp_pose=self.agent.left_tcp.pose.raw_pose,
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
if "state" in self.obs_mode:
|
| 236 |
+
obs.update(
|
| 237 |
+
box_pose=self.box.pose.raw_pose,
|
| 238 |
+
right_tcp_to_box_pos=self.box.pose.p - self.agent.right_tcp.pose.p,
|
| 239 |
+
left_tcp_to_box_pos=self.box.pose.p - self.agent.left_tcp.pose.p,
|
| 240 |
+
)
|
| 241 |
+
return obs
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def box_right_grasp_point(self):
|
| 245 |
+
return self.box.pose * Pose.create_from_pq(
|
| 246 |
+
torch.tensor([-0.165, 0.07, 0.05], device=self.device)
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def box_left_grasp_point(self):
|
| 251 |
+
return self.box.pose * Pose.create_from_pq(
|
| 252 |
+
torch.tensor([0.165, 0.07, 0.05], device=self.device)
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 256 |
+
# Stage 1, move to face the box on the table. Succeeds if facing_table_with_box
|
| 257 |
+
reward = 1 - torch.tanh((self.agent.robot.qpos[:, 0] + 1.4).abs())
|
| 258 |
+
|
| 259 |
+
# Stage 2, grasp the box stably. Succeeds if box_grasped
|
| 260 |
+
# encourage arms to go down essentially and for tcps to be close to the edge of the box
|
| 261 |
+
stage_2_reward = (
|
| 262 |
+
1
|
| 263 |
+
+ (1 - torch.tanh((self.agent.robot.qpos[:, 3]).abs())) / 4
|
| 264 |
+
+ (1 - torch.tanh((self.agent.robot.qpos[:, 4]).abs())) / 4
|
| 265 |
+
+ (
|
| 266 |
+
1
|
| 267 |
+
- torch.tanh(
|
| 268 |
+
3
|
| 269 |
+
* torch.linalg.norm(
|
| 270 |
+
self.agent.right_tcp.pose.p - self.box_right_grasp_point.p,
|
| 271 |
+
dim=1,
|
| 272 |
+
)
|
| 273 |
+
)
|
| 274 |
+
)
|
| 275 |
+
/ 4
|
| 276 |
+
+ (
|
| 277 |
+
1
|
| 278 |
+
- torch.tanh(
|
| 279 |
+
3
|
| 280 |
+
* torch.linalg.norm(
|
| 281 |
+
self.agent.left_tcp.pose.p - self.box_left_grasp_point.p, dim=1
|
| 282 |
+
)
|
| 283 |
+
)
|
| 284 |
+
)
|
| 285 |
+
/ 4
|
| 286 |
+
)
|
| 287 |
+
reward[info["facing_table_with_box"]] = stage_2_reward[
|
| 288 |
+
info["facing_table_with_box"]
|
| 289 |
+
]
|
| 290 |
+
# Stage 3 transport box to above the other table, Succeeds if box_at_correct_table_xy
|
| 291 |
+
stage_3_reward = (
|
| 292 |
+
2 + 1 - torch.tanh((self.agent.robot.qpos[:, 0] - 1.4).abs() / 5)
|
| 293 |
+
)
|
| 294 |
+
reward[info["box_grasped"]] = stage_3_reward[info["box_grasped"]]
|
| 295 |
+
# Stage 4 let go of the box. Succeeds if success (~box_grasped & box_at_correct_table)
|
| 296 |
+
stage_4_reward = (
|
| 297 |
+
3
|
| 298 |
+
+ (1 - torch.tanh((self.agent.robot.qpos[:, 3] - 1.25).abs())) / 2
|
| 299 |
+
+ (1 - torch.tanh((self.agent.robot.qpos[:, 4] + 1.25).abs())) / 2
|
| 300 |
+
)
|
| 301 |
+
reward[info["box_at_correct_table_xy"]] = stage_4_reward[
|
| 302 |
+
info["box_at_correct_table_xy"]
|
| 303 |
+
]
|
| 304 |
+
# encourage agent to stay close to a target qposition?
|
| 305 |
+
reward[info["success"]] = 5
|
| 306 |
+
return reward
|
| 307 |
+
|
| 308 |
+
def compute_normalized_dense_reward(
|
| 309 |
+
self, obs: Any, action: torch.Tensor, info: Dict
|
| 310 |
+
):
|
| 311 |
+
return self.compute_dense_reward(obs, action, info) / 5
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/mobile_manipulation/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (346 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/mobile_manipulation/open_cabinet_drawer.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import sapien.physx as physx
|
| 6 |
+
import torch
|
| 7 |
+
import trimesh
|
| 8 |
+
|
| 9 |
+
from mani_skill import PACKAGE_ASSET_DIR
|
| 10 |
+
from mani_skill.agents.robots import Fetch
|
| 11 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 12 |
+
from mani_skill.envs.utils import randomization
|
| 13 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 14 |
+
from mani_skill.utils import common, sapien_utils
|
| 15 |
+
from mani_skill.utils.building import actors, articulations
|
| 16 |
+
from mani_skill.utils.building.ground import build_ground
|
| 17 |
+
from mani_skill.utils.geometry.geometry import transform_points
|
| 18 |
+
from mani_skill.utils.io_utils import load_json
|
| 19 |
+
from mani_skill.utils.registration import register_env
|
| 20 |
+
from mani_skill.utils.structs import Articulation, Link, Pose
|
| 21 |
+
from mani_skill.utils.structs.types import GPUMemoryConfig, SimConfig
|
| 22 |
+
|
| 23 |
+
CABINET_COLLISION_BIT = 29
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# TODO (stao): we need to cut the meshes of all the cabinets in this dataset for gpu sim, there may be some wierd physics
|
| 27 |
+
# that may happen although it seems okay for state based RL
|
| 28 |
+
@register_env(
|
| 29 |
+
"OpenCabinetDrawer-v1",
|
| 30 |
+
asset_download_ids=["partnet_mobility_cabinet"],
|
| 31 |
+
max_episode_steps=100,
|
| 32 |
+
)
|
| 33 |
+
class OpenCabinetDrawerEnv(BaseEnv):
|
| 34 |
+
"""
|
| 35 |
+
**Task Description:**
|
| 36 |
+
Use the Fetch mobile manipulation robot to move towards a target cabinet and open the target drawer out.
|
| 37 |
+
|
| 38 |
+
**Randomizations:**
|
| 39 |
+
- Robot is randomly initialized 1.6 to 1.8 meters away from the cabinet and positioned to face it
|
| 40 |
+
- Robot's base orientation is randomized by -9 to 9 degrees
|
| 41 |
+
- The cabinet selected to manipulate is randomly sampled from all PartnetMobility cabinets that have drawers
|
| 42 |
+
- The drawer to open is randomly sampled from all drawers available to open
|
| 43 |
+
|
| 44 |
+
**Success Conditions:**
|
| 45 |
+
- The drawer is open at least 90% of the way, and the angular/linear velocities of the drawer link are small
|
| 46 |
+
|
| 47 |
+
**Goal Specification:**
|
| 48 |
+
- 3D goal position centered at the center of mass of the handle mesh on the drawer to open (also visualized in human renders with a sphere).
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
_sample_video_link = "https://github.com/haosulab/ManiSkill/raw/main/figures/environment_demos/OpenCabinetDrawer-v1_rt.mp4"
|
| 52 |
+
|
| 53 |
+
SUPPORTED_ROBOTS = ["fetch"]
|
| 54 |
+
agent: Union[Fetch]
|
| 55 |
+
handle_types = ["prismatic"]
|
| 56 |
+
TRAIN_JSON = (
|
| 57 |
+
PACKAGE_ASSET_DIR / "partnet_mobility/meta/info_cabinet_drawer_train.json"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
min_open_frac = 0.75
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
*args,
|
| 65 |
+
robot_uids="fetch",
|
| 66 |
+
robot_init_qpos_noise=0.02,
|
| 67 |
+
reconfiguration_freq=None,
|
| 68 |
+
num_envs=1,
|
| 69 |
+
**kwargs,
|
| 70 |
+
):
|
| 71 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 72 |
+
train_data = load_json(self.TRAIN_JSON)
|
| 73 |
+
self.all_model_ids = np.array(list(train_data.keys()))
|
| 74 |
+
# self.all_model_ids = np.array(["1004", "1004"])
|
| 75 |
+
if reconfiguration_freq is None:
|
| 76 |
+
# if not user set, we pick a number
|
| 77 |
+
if num_envs == 1:
|
| 78 |
+
reconfiguration_freq = 1
|
| 79 |
+
else:
|
| 80 |
+
reconfiguration_freq = 0
|
| 81 |
+
super().__init__(
|
| 82 |
+
*args,
|
| 83 |
+
robot_uids=robot_uids,
|
| 84 |
+
reconfiguration_freq=reconfiguration_freq,
|
| 85 |
+
num_envs=num_envs,
|
| 86 |
+
**kwargs,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def _default_sim_config(self):
|
| 91 |
+
return SimConfig(
|
| 92 |
+
spacing=5,
|
| 93 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 94 |
+
max_rigid_contact_count=2**21, max_rigid_patch_count=2**19
|
| 95 |
+
),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def _default_sensor_configs(self):
|
| 100 |
+
return []
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def _default_human_render_camera_configs(self):
|
| 104 |
+
pose = sapien_utils.look_at(eye=[-1.8, -1.3, 1.8], target=[-0.3, 0.5, 0])
|
| 105 |
+
return CameraConfig(
|
| 106 |
+
"render_camera", pose=pose, width=512, height=512, fov=1, near=0.01, far=100
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
def _load_agent(self, options: dict):
|
| 110 |
+
super()._load_agent(options, sapien.Pose(p=[1, 0, 0]))
|
| 111 |
+
|
| 112 |
+
def _load_scene(self, options: dict):
|
| 113 |
+
self.ground = build_ground(self.scene)
|
| 114 |
+
# temporarily turn off the logging as there will be big red warnings
|
| 115 |
+
# about the cabinets having oblong meshes which we ignore for now.
|
| 116 |
+
sapien.set_log_level("off")
|
| 117 |
+
self._load_cabinets(self.handle_types)
|
| 118 |
+
sapien.set_log_level("warn")
|
| 119 |
+
from mani_skill.agents.robots.fetch import FETCH_WHEELS_COLLISION_BIT
|
| 120 |
+
|
| 121 |
+
self.ground.set_collision_group_bit(
|
| 122 |
+
group=2, bit_idx=FETCH_WHEELS_COLLISION_BIT, bit=1
|
| 123 |
+
)
|
| 124 |
+
self.ground.set_collision_group_bit(
|
| 125 |
+
group=2, bit_idx=CABINET_COLLISION_BIT, bit=1
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def _load_cabinets(self, joint_types: List[str]):
|
| 129 |
+
# we sample random cabinet model_ids with numpy as numpy is always deterministic based on seed, regardless of
|
| 130 |
+
# GPU/CPU simulation backends. This is useful for replaying demonstrations.
|
| 131 |
+
model_ids = self._batched_episode_rng.choice(self.all_model_ids)
|
| 132 |
+
link_ids = self._batched_episode_rng.randint(0, 2**31)
|
| 133 |
+
|
| 134 |
+
self._cabinets = []
|
| 135 |
+
handle_links: List[List[Link]] = []
|
| 136 |
+
handle_links_meshes: List[List[trimesh.Trimesh]] = []
|
| 137 |
+
for i, model_id in enumerate(model_ids):
|
| 138 |
+
# partnet-mobility is a dataset source and the ids are the ones we sampled
|
| 139 |
+
# we provide tools to easily create the articulation builder like so by querying
|
| 140 |
+
# the dataset source and unique ID
|
| 141 |
+
cabinet_builder = articulations.get_articulation_builder(
|
| 142 |
+
self.scene, f"partnet-mobility:{model_id}"
|
| 143 |
+
)
|
| 144 |
+
cabinet_builder.set_scene_idxs(scene_idxs=[i])
|
| 145 |
+
cabinet_builder.initial_pose = sapien.Pose(p=[0, 0, 0], q=[1, 0, 0, 0])
|
| 146 |
+
cabinet = cabinet_builder.build(name=f"{model_id}-{i}")
|
| 147 |
+
self.remove_from_state_dict_registry(cabinet)
|
| 148 |
+
# this disables self collisions by setting the group 2 bit at CABINET_COLLISION_BIT all the same
|
| 149 |
+
# that bit is also used to disable collision with the ground plane
|
| 150 |
+
for link in cabinet.links:
|
| 151 |
+
link.set_collision_group_bit(
|
| 152 |
+
group=2, bit_idx=CABINET_COLLISION_BIT, bit=1
|
| 153 |
+
)
|
| 154 |
+
self._cabinets.append(cabinet)
|
| 155 |
+
handle_links.append([])
|
| 156 |
+
handle_links_meshes.append([])
|
| 157 |
+
|
| 158 |
+
# TODO (stao): At the moment code for selecting semantic parts of articulations
|
| 159 |
+
# is not very simple. Will be improved in the future as we add in features that
|
| 160 |
+
# support part and mesh-wise annotations in a standard querable format
|
| 161 |
+
for link, joint in zip(cabinet.links, cabinet.joints):
|
| 162 |
+
if joint.type[0] in joint_types:
|
| 163 |
+
handle_links[-1].append(link)
|
| 164 |
+
# save the first mesh in the link object that correspond with a handle
|
| 165 |
+
handle_links_meshes[-1].append(
|
| 166 |
+
link.generate_mesh(
|
| 167 |
+
filter=lambda _, render_shape: "handle"
|
| 168 |
+
in render_shape.name,
|
| 169 |
+
mesh_name="handle",
|
| 170 |
+
)[0]
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# we can merge different articulations/links with different degrees of freedoms into a single view/object
|
| 174 |
+
# allowing you to manage all of them under one object and retrieve data like qpos, pose, etc. all together
|
| 175 |
+
# and with high performance. Note that some properties such as qpos and qlimits are now padded.
|
| 176 |
+
self.cabinet = Articulation.merge(self._cabinets, name="cabinet")
|
| 177 |
+
self.add_to_state_dict_registry(self.cabinet)
|
| 178 |
+
self.handle_link = Link.merge(
|
| 179 |
+
[links[link_ids[i] % len(links)] for i, links in enumerate(handle_links)],
|
| 180 |
+
name="handle_link",
|
| 181 |
+
)
|
| 182 |
+
# store the position of the handle mesh itself relative to the link it is apart of
|
| 183 |
+
self.handle_link_pos = common.to_tensor(
|
| 184 |
+
np.array(
|
| 185 |
+
[
|
| 186 |
+
meshes[link_ids[i] % len(meshes)].bounding_box.center_mass
|
| 187 |
+
for i, meshes in enumerate(handle_links_meshes)
|
| 188 |
+
]
|
| 189 |
+
),
|
| 190 |
+
device=self.device,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
self.handle_link_goal = actors.build_sphere(
|
| 194 |
+
self.scene,
|
| 195 |
+
radius=0.02,
|
| 196 |
+
color=[0, 1, 0, 1],
|
| 197 |
+
name="handle_link_goal",
|
| 198 |
+
body_type="kinematic",
|
| 199 |
+
add_collision=False,
|
| 200 |
+
initial_pose=sapien.Pose(p=[0, 0, 0], q=[1, 0, 0, 0]),
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
def _after_reconfigure(self, options):
|
| 204 |
+
# To spawn cabinets in the right place, we need to change their z position such that
|
| 205 |
+
# the bottom of the cabinet sits at z=0 (the floor). Luckily the partnet mobility dataset is made such that
|
| 206 |
+
# the negative of the lower z-bound of the collision mesh bounding box is the right value
|
| 207 |
+
|
| 208 |
+
# this code is in _after_reconfigure since retrieving collision meshes requires the GPU to be initialized
|
| 209 |
+
# which occurs after the initial reconfigure call (after self._load_scene() is called)
|
| 210 |
+
self.cabinet_zs = []
|
| 211 |
+
for cabinet in self._cabinets:
|
| 212 |
+
collision_mesh = cabinet.get_first_collision_mesh()
|
| 213 |
+
self.cabinet_zs.append(-collision_mesh.bounding_box.bounds[0, 2])
|
| 214 |
+
self.cabinet_zs = common.to_tensor(self.cabinet_zs, device=self.device)
|
| 215 |
+
|
| 216 |
+
# get the qmin qmax values of the joint corresponding to the selected links
|
| 217 |
+
target_qlimits = self.handle_link.joint.limits # [b, 1, 2]
|
| 218 |
+
qmin, qmax = target_qlimits[..., 0], target_qlimits[..., 1]
|
| 219 |
+
self.target_qpos = qmin + (qmax - qmin) * self.min_open_frac
|
| 220 |
+
|
| 221 |
+
def handle_link_positions(self, env_idx: Optional[torch.Tensor] = None):
|
| 222 |
+
if env_idx is None:
|
| 223 |
+
return transform_points(
|
| 224 |
+
self.handle_link.pose.to_transformation_matrix().clone(),
|
| 225 |
+
common.to_tensor(self.handle_link_pos, device=self.device),
|
| 226 |
+
)
|
| 227 |
+
return transform_points(
|
| 228 |
+
self.handle_link.pose[env_idx].to_transformation_matrix().clone(),
|
| 229 |
+
common.to_tensor(self.handle_link_pos[env_idx], device=self.device),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 233 |
+
|
| 234 |
+
with torch.device(self.device):
|
| 235 |
+
b = len(env_idx)
|
| 236 |
+
xy = torch.zeros((b, 3))
|
| 237 |
+
xy[:, 2] = self.cabinet_zs[env_idx]
|
| 238 |
+
self.cabinet.set_pose(Pose.create_from_pq(p=xy))
|
| 239 |
+
|
| 240 |
+
# initialize robot
|
| 241 |
+
if self.robot_uids == "fetch":
|
| 242 |
+
qpos = torch.tensor(
|
| 243 |
+
[
|
| 244 |
+
0,
|
| 245 |
+
0,
|
| 246 |
+
0,
|
| 247 |
+
0,
|
| 248 |
+
0,
|
| 249 |
+
0,
|
| 250 |
+
0,
|
| 251 |
+
-np.pi / 4,
|
| 252 |
+
0,
|
| 253 |
+
np.pi / 4,
|
| 254 |
+
0,
|
| 255 |
+
np.pi / 3,
|
| 256 |
+
0,
|
| 257 |
+
0.015,
|
| 258 |
+
0.015,
|
| 259 |
+
]
|
| 260 |
+
)
|
| 261 |
+
qpos = qpos.repeat(b).reshape(b, -1)
|
| 262 |
+
dist = randomization.uniform(1.6, 1.8, size=(b,))
|
| 263 |
+
theta = randomization.uniform(0.9 * torch.pi, 1.1 * torch.pi, size=(b,))
|
| 264 |
+
xy = torch.zeros((b, 2))
|
| 265 |
+
xy[:, 0] += torch.cos(theta) * dist
|
| 266 |
+
xy[:, 1] += torch.sin(theta) * dist
|
| 267 |
+
qpos[:, :2] = xy
|
| 268 |
+
noise_ori = randomization.uniform(
|
| 269 |
+
-0.05 * torch.pi, 0.05 * torch.pi, size=(b,)
|
| 270 |
+
)
|
| 271 |
+
ori = (theta - torch.pi) + noise_ori
|
| 272 |
+
qpos[:, 2] = ori
|
| 273 |
+
self.agent.robot.set_qpos(qpos)
|
| 274 |
+
self.agent.robot.set_pose(sapien.Pose())
|
| 275 |
+
# close all the cabinets. We know beforehand that lower qlimit means "closed" for these assets.
|
| 276 |
+
qlimits = self.cabinet.get_qlimits() # [b, self.cabinet.max_dof, 2])
|
| 277 |
+
self.cabinet.set_qpos(qlimits[env_idx, :, 0])
|
| 278 |
+
self.cabinet.set_qvel(self.cabinet.qpos[env_idx] * 0)
|
| 279 |
+
|
| 280 |
+
# NOTE (stao): This is a temporary work around for the issue where the cabinet drawers/doors might open
|
| 281 |
+
# themselves on the first step. It's unclear why this happens on GPU sim only atm.
|
| 282 |
+
# moreover despite setting qpos/qvel to 0, the cabinets might still move on their own a little bit.
|
| 283 |
+
# this may be due to oblong meshes.
|
| 284 |
+
if self.gpu_sim_enabled:
|
| 285 |
+
self.scene._gpu_apply_all()
|
| 286 |
+
self.scene.px.gpu_update_articulation_kinematics()
|
| 287 |
+
self.scene.px.step()
|
| 288 |
+
self.scene._gpu_fetch_all()
|
| 289 |
+
|
| 290 |
+
self.handle_link_goal.set_pose(
|
| 291 |
+
Pose.create_from_pq(p=self.handle_link_positions(env_idx))
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
def _after_control_step(self):
|
| 295 |
+
# after each control step, we update the goal position of the handle link
|
| 296 |
+
# for GPU sim we need to update the kinematics data to get latest pose information for up to date link poses
|
| 297 |
+
# and fetch it, followed by an apply call to ensure the GPU sim is up to date
|
| 298 |
+
if self.gpu_sim_enabled:
|
| 299 |
+
self.scene.px.gpu_update_articulation_kinematics()
|
| 300 |
+
self.scene._gpu_fetch_all()
|
| 301 |
+
self.handle_link_goal.set_pose(
|
| 302 |
+
Pose.create_from_pq(p=self.handle_link_positions())
|
| 303 |
+
)
|
| 304 |
+
if self.gpu_sim_enabled:
|
| 305 |
+
self.scene._gpu_apply_all()
|
| 306 |
+
|
| 307 |
+
def evaluate(self):
|
| 308 |
+
# even though self.handle_link is a different link across different articulations
|
| 309 |
+
# we can still fetch a joint that represents the parent joint of all those links
|
| 310 |
+
# and easily get the qpos value.
|
| 311 |
+
open_enough = self.handle_link.joint.qpos >= self.target_qpos
|
| 312 |
+
handle_link_pos = self.handle_link_positions()
|
| 313 |
+
|
| 314 |
+
link_is_static = (
|
| 315 |
+
torch.linalg.norm(self.handle_link.angular_velocity, axis=1) <= 1
|
| 316 |
+
) & (torch.linalg.norm(self.handle_link.linear_velocity, axis=1) <= 0.1)
|
| 317 |
+
return {
|
| 318 |
+
"success": open_enough & link_is_static,
|
| 319 |
+
"handle_link_pos": handle_link_pos,
|
| 320 |
+
"open_enough": open_enough,
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
def _get_obs_extra(self, info: Dict):
|
| 324 |
+
obs = dict(
|
| 325 |
+
tcp_pose=self.agent.tcp.pose.raw_pose,
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
if "state" in self.obs_mode:
|
| 329 |
+
obs.update(
|
| 330 |
+
tcp_to_handle_pos=info["handle_link_pos"] - self.agent.tcp.pose.p,
|
| 331 |
+
target_link_qpos=self.handle_link.joint.qpos,
|
| 332 |
+
target_handle_pos=info["handle_link_pos"],
|
| 333 |
+
)
|
| 334 |
+
return obs
|
| 335 |
+
|
| 336 |
+
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 337 |
+
tcp_to_handle_dist = torch.linalg.norm(
|
| 338 |
+
self.agent.tcp.pose.p - info["handle_link_pos"], axis=1
|
| 339 |
+
)
|
| 340 |
+
reaching_reward = 1 - torch.tanh(5 * tcp_to_handle_dist)
|
| 341 |
+
amount_to_open_left = torch.div(
|
| 342 |
+
self.target_qpos - self.handle_link.joint.qpos, self.target_qpos
|
| 343 |
+
)
|
| 344 |
+
open_reward = 2 * (1 - amount_to_open_left)
|
| 345 |
+
reaching_reward[
|
| 346 |
+
amount_to_open_left < 0.999
|
| 347 |
+
] = 2 # if joint opens even a tiny bit, we don't need reach reward anymore
|
| 348 |
+
# print(open_reward.shape)
|
| 349 |
+
open_reward[info["open_enough"]] = 3 # give max reward here
|
| 350 |
+
reward = reaching_reward + open_reward
|
| 351 |
+
reward[info["success"]] = 5.0
|
| 352 |
+
return reward
|
| 353 |
+
|
| 354 |
+
def compute_normalized_dense_reward(
|
| 355 |
+
self, obs: Any, action: torch.Tensor, info: Dict
|
| 356 |
+
):
|
| 357 |
+
max_reward = 5.0
|
| 358 |
+
return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
@register_env("OpenCabinetDoor-v1", max_episode_steps=100)
|
| 362 |
+
class OpenCabinetDoorEnv(OpenCabinetDrawerEnv):
|
| 363 |
+
TRAIN_JSON = (
|
| 364 |
+
PACKAGE_ASSET_DIR / "partnet_mobility/meta/info_cabinet_door_train.json"
|
| 365 |
+
)
|
| 366 |
+
handle_types = ["revolute", "revolute_unwrapped"]
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/rotate_cube.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Tuple
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torch.random
|
| 6 |
+
|
| 7 |
+
from mani_skill import PACKAGE_ASSET_DIR
|
| 8 |
+
from mani_skill.agents.robots import TriFingerPro
|
| 9 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 10 |
+
from mani_skill.envs.utils.randomization.pose import random_quaternions
|
| 11 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 12 |
+
from mani_skill.utils import common, sapien_utils
|
| 13 |
+
from mani_skill.utils.building import ActorBuilder, actors
|
| 14 |
+
from mani_skill.utils.building.ground import build_ground
|
| 15 |
+
from mani_skill.utils.registration import register_env
|
| 16 |
+
from mani_skill.utils.structs import Actor, Articulation, Pose
|
| 17 |
+
from mani_skill.utils.structs.types import Array, GPUMemoryConfig, SimConfig
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class RotateCubeEnv(BaseEnv):
|
| 21 |
+
"""
|
| 22 |
+
Modified from https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/tasks/trifinger.py
|
| 23 |
+
https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/cfg/task/Trifinger.yaml
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
SUPPORTED_ROBOTS = ["trifingerpro"]
|
| 27 |
+
|
| 28 |
+
# Specify some supported robot types
|
| 29 |
+
agent: TriFingerPro
|
| 30 |
+
|
| 31 |
+
# Specify default simulation/gpu memory configurations.
|
| 32 |
+
sim_config = SimConfig(
|
| 33 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 34 |
+
found_lost_pairs_capacity=2**25, max_rigid_patch_count=2**18
|
| 35 |
+
)
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# set some commonly used values
|
| 39 |
+
goal_radius = 0.02
|
| 40 |
+
cube_half_size = 0.02
|
| 41 |
+
|
| 42 |
+
# radius of the area
|
| 43 |
+
ARENA_RADIUS = 0.195
|
| 44 |
+
size = 0.065 # m
|
| 45 |
+
max_len = 0.065
|
| 46 |
+
# 3D radius of the cuboid
|
| 47 |
+
radius_3d = max_len * np.sqrt(3) / 2
|
| 48 |
+
# compute distance from wall to the center
|
| 49 |
+
max_com_distance_to_center = ARENA_RADIUS - radius_3d
|
| 50 |
+
# minimum and maximum height for spawning the object
|
| 51 |
+
min_height = 0.065 / 2
|
| 52 |
+
max_height = 0.1
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
*args,
|
| 57 |
+
robot_uids="trifingerpro",
|
| 58 |
+
robot_init_qpos_noise=0.02,
|
| 59 |
+
difficulty_level: int = 4,
|
| 60 |
+
**kwargs,
|
| 61 |
+
):
|
| 62 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 63 |
+
|
| 64 |
+
if (
|
| 65 |
+
not isinstance(difficulty_level, int)
|
| 66 |
+
or difficulty_level >= 5
|
| 67 |
+
or difficulty_level < 0
|
| 68 |
+
):
|
| 69 |
+
raise ValueError(
|
| 70 |
+
f"Difficulty level must be a int within 0-4, but get {difficulty_level}"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
self.difficulty_level = difficulty_level
|
| 74 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def _default_sensor_configs(self):
|
| 78 |
+
pose = sapien_utils.look_at(eye=(0.7, 0.0, 0.7), target=(0.0, 0.0, 0.0))
|
| 79 |
+
return [CameraConfig("base_camera", pose, 128, 128, np.pi / 2, 0.01, 100)]
|
| 80 |
+
|
| 81 |
+
@property
|
| 82 |
+
def _default_human_render_camera_configs(self):
|
| 83 |
+
pose = sapien_utils.look_at(eye=(0.7, 0.0, 0.7), target=(0.0, 0.0, 0.0))
|
| 84 |
+
return CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)
|
| 85 |
+
|
| 86 |
+
def _load_scene(self, options: dict):
|
| 87 |
+
self.ground = build_ground(self.scene, altitude=0)
|
| 88 |
+
loader1 = self.scene.create_urdf_loader()
|
| 89 |
+
loader1.fix_root_link = True
|
| 90 |
+
loader1.name = "table"
|
| 91 |
+
urdf_path = f"{PACKAGE_ASSET_DIR}/robots/trifinger/table_without_border.urdf"
|
| 92 |
+
table: Articulation = loader1.load(urdf_path)
|
| 93 |
+
|
| 94 |
+
builder: ActorBuilder = self.scene.create_actor_builder()
|
| 95 |
+
high_table_boundary_file_name = f"{PACKAGE_ASSET_DIR}/robots/trifinger/robot_properties_fingers/meshes/high_table_boundary.stl"
|
| 96 |
+
builder.add_nonconvex_collision_from_file(
|
| 97 |
+
filename=high_table_boundary_file_name, scale=[1, 1, 1], material=None
|
| 98 |
+
)
|
| 99 |
+
builder.add_visual_from_file(filename=high_table_boundary_file_name)
|
| 100 |
+
table_boundary: Actor = builder.build_static("table2")
|
| 101 |
+
|
| 102 |
+
self.obj = actors.build_colorful_cube(
|
| 103 |
+
self.scene,
|
| 104 |
+
half_size=self.size / 2,
|
| 105 |
+
color=np.array([169, 42, 12, 255]) / 255,
|
| 106 |
+
name="cube",
|
| 107 |
+
body_type="dynamic",
|
| 108 |
+
add_collision=True,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
self.obj_goal = actors.build_colorful_cube(
|
| 112 |
+
self.scene,
|
| 113 |
+
half_size=self.size / 2,
|
| 114 |
+
color=np.array([12, 160, 42, 255]) / 255,
|
| 115 |
+
name="cube_goal",
|
| 116 |
+
body_type="kinematic",
|
| 117 |
+
add_collision=False,
|
| 118 |
+
)
|
| 119 |
+
self._hidden_objects.append(self.obj_goal)
|
| 120 |
+
|
| 121 |
+
def _initialize_actors(self, env_idx: torch.Tensor):
|
| 122 |
+
with torch.device(self.device):
|
| 123 |
+
b = len(env_idx)
|
| 124 |
+
xyz = torch.zeros((b, 3))
|
| 125 |
+
xyz[..., 2] = self.size / 2 + 0.005
|
| 126 |
+
obj_pose = Pose.create_from_pq(p=xyz, q=[1, 0, 0, 0])
|
| 127 |
+
self.obj.set_pose(obj_pose)
|
| 128 |
+
pos, orn = self._sample_object_goal_poses(
|
| 129 |
+
env_idx, difficulty=self.difficulty_level
|
| 130 |
+
)
|
| 131 |
+
self.obj_goal.set_pose(Pose.create_from_pq(p=pos, q=orn))
|
| 132 |
+
self.prev_norms = None
|
| 133 |
+
|
| 134 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 135 |
+
self._initialize_actors(env_idx)
|
| 136 |
+
self._initialize_agent(env_idx)
|
| 137 |
+
|
| 138 |
+
def _sample_object_goal_poses(self, env_idx: torch.Tensor, difficulty: int):
|
| 139 |
+
"""Sample goal poses for the cube and sets them into the desired goal pose buffer.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
instances: A tensor constraining indices of environment instances to reset.
|
| 143 |
+
difficulty: Difficulty level. The higher, the more difficult is the goal.
|
| 144 |
+
|
| 145 |
+
Possible levels are:
|
| 146 |
+
- 0: Random goal position on the table, no orientation.
|
| 147 |
+
- 1: Random goal position on the table, including yaw orientation.
|
| 148 |
+
- 2: Fixed goal position in the air with x,y = 0. No orientation.
|
| 149 |
+
- 3: Random goal position in the air, no orientation.
|
| 150 |
+
- 4: Random goal pose in the air, including orientation.
|
| 151 |
+
"""
|
| 152 |
+
b = len(env_idx)
|
| 153 |
+
default_orn = torch.tensor(
|
| 154 |
+
[1.0, 0.0, 0.0, 0.0], dtype=torch.float, device=self.device
|
| 155 |
+
).repeat(b, 1)
|
| 156 |
+
|
| 157 |
+
def random_xy() -> Tuple[torch.Tensor, torch.Tensor]:
|
| 158 |
+
"""Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)"""
|
| 159 |
+
# sample radius of circle
|
| 160 |
+
radius = torch.sqrt(torch.rand(b, dtype=torch.float, device=self.device))
|
| 161 |
+
radius *= self.max_com_distance_to_center
|
| 162 |
+
# sample theta of point
|
| 163 |
+
theta = 2 * np.pi * torch.rand(b, dtype=torch.float, device=self.device)
|
| 164 |
+
# x,y-position of the cube
|
| 165 |
+
x = radius * torch.cos(theta)
|
| 166 |
+
y = radius * torch.sin(theta)
|
| 167 |
+
|
| 168 |
+
return x, y
|
| 169 |
+
|
| 170 |
+
def random_z(min_height: float, max_height: float) -> torch.Tensor:
|
| 171 |
+
"""Returns sampled height of the goal object."""
|
| 172 |
+
z = torch.rand(b, dtype=torch.float, device=self.device)
|
| 173 |
+
z = (max_height - min_height) * z + min_height
|
| 174 |
+
return z
|
| 175 |
+
|
| 176 |
+
if difficulty == 0:
|
| 177 |
+
# Random goal position on the table, no orientation.
|
| 178 |
+
pos_x, pos_y = random_xy()
|
| 179 |
+
pos_z = self.size / 2
|
| 180 |
+
orientation = default_orn
|
| 181 |
+
elif difficulty == 1:
|
| 182 |
+
# For initialization
|
| 183 |
+
pos_x, pos_y = random_xy()
|
| 184 |
+
pos_z = self.size / 2
|
| 185 |
+
orientation = random_quaternions(
|
| 186 |
+
b, lock_x=True, lock_y=True, device=self.device
|
| 187 |
+
)
|
| 188 |
+
elif difficulty == 2:
|
| 189 |
+
# Fixed goal position in the air with x,y = 0. No orientation.
|
| 190 |
+
pos_x, pos_y = 0.0, 0.0
|
| 191 |
+
pos_z = self.min_height + 0.05
|
| 192 |
+
orientation = default_orn
|
| 193 |
+
elif difficulty == 3:
|
| 194 |
+
# Random goal position in the air, no orientation.
|
| 195 |
+
pos_x, pos_y = random_xy()
|
| 196 |
+
pos_z = random_z(min_height=self.min_height, max_height=self.max_height)
|
| 197 |
+
orientation = default_orn
|
| 198 |
+
elif difficulty == 4:
|
| 199 |
+
# Random goal pose in the air, including orientation.
|
| 200 |
+
# Note: Set minimum height such that the cube does not intersect with the
|
| 201 |
+
# ground in any orientation
|
| 202 |
+
|
| 203 |
+
# pick x, y, z according to the maximum height / radius at the current point
|
| 204 |
+
# in the cirriculum
|
| 205 |
+
pos_x, pos_y = random_xy()
|
| 206 |
+
pos_z = random_z(min_height=self.radius_3d, max_height=self.max_height)
|
| 207 |
+
orientation = random_quaternions(b, device=self.device)
|
| 208 |
+
else:
|
| 209 |
+
msg = f"Invalid difficulty index for task: {difficulty}."
|
| 210 |
+
raise ValueError(msg)
|
| 211 |
+
|
| 212 |
+
pos_tensor = torch.zeros((b, 3), dtype=torch.float, device=self.device)
|
| 213 |
+
pos_tensor[:, 0] = pos_x
|
| 214 |
+
pos_tensor[:, 1] = pos_y
|
| 215 |
+
pos_tensor[:, 2] = pos_z
|
| 216 |
+
return pos_tensor, orientation
|
| 217 |
+
|
| 218 |
+
def evaluate(self):
|
| 219 |
+
obj_p = self.obj.pose.p
|
| 220 |
+
goal_p = self.obj_goal.pose.p
|
| 221 |
+
obj_q = self.obj.pose.q
|
| 222 |
+
goal_q = self.obj_goal.pose.q
|
| 223 |
+
|
| 224 |
+
is_obj_pos_close_to_goal = (
|
| 225 |
+
torch.linalg.norm(obj_p - goal_p, axis=1) < self.goal_radius
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
is_obj_q_close_to_goal = common.quat_diff_rad(obj_q, goal_q) < 0.1
|
| 229 |
+
|
| 230 |
+
is_success = is_obj_pos_close_to_goal & is_obj_q_close_to_goal
|
| 231 |
+
|
| 232 |
+
return {
|
| 233 |
+
"success": is_success,
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
def _initialize_agent(self, env_idx: torch.Tensor):
|
| 237 |
+
with torch.device(self.device):
|
| 238 |
+
b = len(env_idx)
|
| 239 |
+
dof = self.agent.robot.dof
|
| 240 |
+
if isinstance(dof, torch.Tensor):
|
| 241 |
+
dof = dof[0]
|
| 242 |
+
|
| 243 |
+
init_qpos = torch.zeros((b, dof))
|
| 244 |
+
init_qpos += torch.randn((b, dof)) * self.robot_init_qpos_noise
|
| 245 |
+
self.agent.reset(init_qpos)
|
| 246 |
+
self.agent.robot.set_pose(
|
| 247 |
+
Pose.create_from_pq(
|
| 248 |
+
torch.tensor([0.0, 0, self.size / 2 + 0.022]),
|
| 249 |
+
torch.tensor([1, 0, 0, 0]),
|
| 250 |
+
)
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
def _get_obs_extra(self, info: Dict):
|
| 254 |
+
obs = dict(
|
| 255 |
+
goal_pos=self.obj_goal.pose.p,
|
| 256 |
+
goal_q=self.obj_goal.pose.q,
|
| 257 |
+
)
|
| 258 |
+
if self.obs_mode_struct.use_state:
|
| 259 |
+
obs.update(
|
| 260 |
+
obj_p=self.obj.pose.p,
|
| 261 |
+
obj_q=self.obj.pose.q,
|
| 262 |
+
)
|
| 263 |
+
return obs
|
| 264 |
+
|
| 265 |
+
def compute_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 266 |
+
obj_pos = self.obj.pose.p
|
| 267 |
+
obj_q = self.obj.pose.q
|
| 268 |
+
goal_pos = self.obj_goal.pose.p
|
| 269 |
+
goal_q = self.obj_goal.pose.q
|
| 270 |
+
|
| 271 |
+
object_dist_weight = 5
|
| 272 |
+
object_rot_weight = 5
|
| 273 |
+
|
| 274 |
+
# Reward penalising finger movement
|
| 275 |
+
|
| 276 |
+
tip_poses = self.agent.tip_poses
|
| 277 |
+
# shape (N, 3 + 4, 3 fingers)
|
| 278 |
+
|
| 279 |
+
finger_reach_object_dist_1 = torch.norm(
|
| 280 |
+
tip_poses[:, :3, 0] - obj_pos, p=2, dim=-1
|
| 281 |
+
)
|
| 282 |
+
finger_reach_object_dist_2 = torch.norm(
|
| 283 |
+
tip_poses[:, :3, 1] - obj_pos, p=2, dim=-1
|
| 284 |
+
)
|
| 285 |
+
finger_reach_object_dist_3 = torch.norm(
|
| 286 |
+
tip_poses[:, :3, 2] - obj_pos, p=2, dim=-1
|
| 287 |
+
)
|
| 288 |
+
finger_reach_object_reward1 = 1 - torch.tanh(5 * finger_reach_object_dist_1)
|
| 289 |
+
finger_reach_object_reward2 = 1 - torch.tanh(5 * finger_reach_object_dist_2)
|
| 290 |
+
finger_reach_object_reward3 = 1 - torch.tanh(5 * finger_reach_object_dist_3)
|
| 291 |
+
finger_reach_object_reward = (
|
| 292 |
+
object_dist_weight
|
| 293 |
+
* (
|
| 294 |
+
finger_reach_object_reward1
|
| 295 |
+
+ finger_reach_object_reward2
|
| 296 |
+
+ finger_reach_object_reward3
|
| 297 |
+
)
|
| 298 |
+
/ 3
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Reward for object distance
|
| 302 |
+
object_dist = torch.norm(obj_pos - goal_pos, p=2, dim=-1)
|
| 303 |
+
|
| 304 |
+
init_xyz_tensor = torch.tensor(
|
| 305 |
+
[0, 0, 0.032], dtype=torch.float, device=self.device
|
| 306 |
+
).reshape(1, 3)
|
| 307 |
+
init_z_dist = torch.norm(
|
| 308 |
+
init_xyz_tensor
|
| 309 |
+
- goal_pos[
|
| 310 |
+
...,
|
| 311 |
+
],
|
| 312 |
+
p=2,
|
| 313 |
+
dim=-1,
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# object_dist_reward = object_dist_weight * dt * lgsk_kernel(object_dist, scale=50., eps=2.)
|
| 317 |
+
|
| 318 |
+
object_dist_reward = 1 - torch.tanh(5 * object_dist)
|
| 319 |
+
object_init_dist_reward = 1 - torch.tanh(5 * init_z_dist)
|
| 320 |
+
object_dist_reward -= object_init_dist_reward
|
| 321 |
+
|
| 322 |
+
init_z_tensor = torch.tensor(
|
| 323 |
+
[0.032], dtype=torch.float, device=self.device
|
| 324 |
+
).reshape(1, 1)
|
| 325 |
+
object_z_dist = torch.norm(obj_pos[..., 2:3] - goal_pos[..., 2:3], p=2, dim=-1)
|
| 326 |
+
init_z_dist = torch.norm(init_z_tensor - goal_pos[..., 2:3], p=2, dim=-1)
|
| 327 |
+
object_lift_reward = 5 * ((1 - torch.tanh(5 * object_z_dist)))
|
| 328 |
+
object_init_z_reward = 5 * ((1 - torch.tanh(5 * init_z_dist)))
|
| 329 |
+
|
| 330 |
+
object_lift_reward -= object_init_z_reward
|
| 331 |
+
|
| 332 |
+
# extract quaternion orientation
|
| 333 |
+
angles = common.quat_diff_rad(obj_q, goal_q)
|
| 334 |
+
object_rot_reward = -1 * torch.abs(angles)
|
| 335 |
+
pose_reward = (
|
| 336 |
+
object_dist_weight * (object_dist_reward + object_lift_reward)
|
| 337 |
+
+ object_rot_weight * object_rot_reward
|
| 338 |
+
)
|
| 339 |
+
total_reward = finger_reach_object_reward + pose_reward
|
| 340 |
+
total_reward = total_reward.clamp(-15, 15)
|
| 341 |
+
total_reward[info["success"]] = 15
|
| 342 |
+
return total_reward
|
| 343 |
+
|
| 344 |
+
def compute_normalized_dense_reward(self, obs: Any, action: Array, info: Dict):
|
| 345 |
+
self.max_reward = 15
|
| 346 |
+
dense_reward = self.compute_dense_reward(obs=obs, action=action, info=info)
|
| 347 |
+
norm_dense_reward = dense_reward / (2 * self.max_reward) + 0.5
|
| 348 |
+
return norm_dense_reward
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
@register_env("TriFingerRotateCubeLevel0-v1", max_episode_steps=250)
|
| 352 |
+
class RotateCubeEnvLevel0(RotateCubeEnv):
|
| 353 |
+
def __init__(self, *args, **kwargs):
|
| 354 |
+
super().__init__(
|
| 355 |
+
*args,
|
| 356 |
+
robot_init_qpos_noise=0.02,
|
| 357 |
+
difficulty_level=0,
|
| 358 |
+
**kwargs,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
@register_env("TriFingerRotateCubeLevel1-v1", max_episode_steps=250)
|
| 363 |
+
class RotateCubeEnvLevel1(RotateCubeEnv):
|
| 364 |
+
def __init__(self, *args, **kwargs):
|
| 365 |
+
super().__init__(
|
| 366 |
+
*args,
|
| 367 |
+
robot_init_qpos_noise=0.02,
|
| 368 |
+
difficulty_level=1,
|
| 369 |
+
**kwargs,
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
@register_env("TriFingerRotateCubeLevel2-v1", max_episode_steps=250)
|
| 374 |
+
class RotateCubeEnvLevel2(RotateCubeEnv):
|
| 375 |
+
def __init__(self, *args, **kwargs):
|
| 376 |
+
super().__init__(
|
| 377 |
+
*args,
|
| 378 |
+
robot_init_qpos_noise=0.02,
|
| 379 |
+
difficulty_level=2,
|
| 380 |
+
**kwargs,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@register_env("TriFingerRotateCubeLevel3-v1", max_episode_steps=250)
|
| 385 |
+
class RotateCubeEnvLevel3(RotateCubeEnv):
|
| 386 |
+
def __init__(self, *args, **kwargs):
|
| 387 |
+
super().__init__(
|
| 388 |
+
*args,
|
| 389 |
+
robot_init_qpos_noise=0.02,
|
| 390 |
+
difficulty_level=3,
|
| 391 |
+
**kwargs,
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
@register_env("TriFingerRotateCubeLevel4-v1", max_episode_steps=250)
|
| 396 |
+
class RotateCubeEnvLevel4(RotateCubeEnv):
|
| 397 |
+
def __init__(self, *args, **kwargs):
|
| 398 |
+
super().__init__(
|
| 399 |
+
*args,
|
| 400 |
+
robot_init_qpos_noise=0.02,
|
| 401 |
+
difficulty_level=4,
|
| 402 |
+
**kwargs,
|
| 403 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/template.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Code for a minimal environment/task with just a robot being loaded. We recommend copying this template and modifying as you need.
|
| 3 |
+
|
| 4 |
+
At a high-level, ManiSkill tasks can minimally be defined by what agents/actors are
|
| 5 |
+
loaded, how agents/actors are randomly initialized during env resets, how goals are randomized and parameterized in observations, and success conditions
|
| 6 |
+
|
| 7 |
+
Environment reset is comprised of running two functions, `self._reconfigure` and `self.initialize_episode`, which is auto
|
| 8 |
+
run by ManiSkill. As a user, you can override a number of functions that affect reconfiguration and episode initialization.
|
| 9 |
+
|
| 10 |
+
Reconfiguration will reset the entire environment scene and allow you to load/swap assets and agents.
|
| 11 |
+
|
| 12 |
+
Episode initialization will reset the poses of all actors, articulations, and agents,
|
| 13 |
+
in addition to initializing any task relevant data like a goal
|
| 14 |
+
|
| 15 |
+
See comments for how to make your own environment and what each required function should do. If followed correctly you can easily build a
|
| 16 |
+
task that can simulate on the CPU and be parallelized on the GPU without having to manage GPU memory and parallelization apart from some
|
| 17 |
+
code that need to be written in batched mode (e.g. reward, success conditions)
|
| 18 |
+
|
| 19 |
+
For a minimal implementation of a simple task, check out
|
| 20 |
+
mani_skill /envs/tasks/push_cube.py which is annotated with comments to explain how it is implemented
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
from typing import Any, Dict, Union
|
| 25 |
+
|
| 26 |
+
import numpy as np
|
| 27 |
+
import sapien
|
| 28 |
+
import torch
|
| 29 |
+
|
| 30 |
+
from mani_skill.agents.multi_agent import MultiAgent
|
| 31 |
+
from mani_skill.agents.robots.fetch.fetch import Fetch
|
| 32 |
+
from mani_skill.agents.robots.panda.panda import Panda
|
| 33 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 34 |
+
from mani_skill.sensors.camera import CameraConfig
|
| 35 |
+
from mani_skill.utils import common, sapien_utils
|
| 36 |
+
from mani_skill.utils.building import actors
|
| 37 |
+
from mani_skill.utils.registration import register_env
|
| 38 |
+
from mani_skill.utils.structs.types import GPUMemoryConfig, SimConfig
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# register the environment by a unique ID and specify a max time limit. Now once this file is imported you can do gym.make("CustomEnv-v0")
|
| 42 |
+
@register_env("CustomEnv-v1", max_episode_steps=200)
|
| 43 |
+
class CustomEnv(BaseEnv):
|
| 44 |
+
"""
|
| 45 |
+
Task Description
|
| 46 |
+
----------------
|
| 47 |
+
Add a task description here
|
| 48 |
+
|
| 49 |
+
Randomizations
|
| 50 |
+
--------------
|
| 51 |
+
- how is it randomized?
|
| 52 |
+
- how is that randomized?
|
| 53 |
+
|
| 54 |
+
Success Conditions
|
| 55 |
+
------------------
|
| 56 |
+
- what is done to check if this task is solved?
|
| 57 |
+
|
| 58 |
+
Visualization: link to a video/gif of the task being solved
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
# here you can define a list of robots that this task is built to support and be solved by. This is so that
|
| 62 |
+
# users won't be permitted to use robots not predefined here. If SUPPORTED_ROBOTS is not defined then users can do anything
|
| 63 |
+
SUPPORTED_ROBOTS = ["panda", "fetch"]
|
| 64 |
+
# if you want to say you support multiple robots you can use SUPPORTED_ROBOTS = [["panda", "panda"], ["panda", "fetch"]] etc.
|
| 65 |
+
|
| 66 |
+
# to help with programming, you can assert what type of agents are supported like below, and any shared properties of self.agent
|
| 67 |
+
# become available to typecheckers and auto-completion. E.g. Panda and Fetch both share a property called .tcp (tool center point).
|
| 68 |
+
agent: Union[Panda, Fetch]
|
| 69 |
+
# if you want to do typing for multi-agent setups, use this below and specify what possible tuples of robots are permitted by typing
|
| 70 |
+
# this will then populate agent.agents (list of the instantiated agents) with the right typing
|
| 71 |
+
# agent: MultiAgent[Union[Tuple[Panda, Panda], Tuple[Panda, Panda, Panda]]]
|
| 72 |
+
|
| 73 |
+
# in the __init__ function you can pick a default robot your task should use e.g. the panda robot by setting a default for robot_uids argument
|
| 74 |
+
# note that if robot_uids is a list of robot uids, then we treat it as a multi-agent setup and load each robot separately.
|
| 75 |
+
def __init__(self, *args, robot_uids="panda", robot_init_qpos_noise=0.02, **kwargs):
|
| 76 |
+
self.robot_init_qpos_noise = robot_init_qpos_noise
|
| 77 |
+
super().__init__(*args, robot_uids=robot_uids, **kwargs)
|
| 78 |
+
|
| 79 |
+
# Specify default simulation/gpu memory configurations. Note that tasks need to tune their GPU memory configurations accordingly
|
| 80 |
+
# in order to save memory while also running with no errors. In general you can start with low values and increase them
|
| 81 |
+
# depending on the messages that show up when you try to run more environments in parallel. Since this is a python property
|
| 82 |
+
# you can also check self.num_envs to dynamically set configurations as well
|
| 83 |
+
@property
|
| 84 |
+
def _default_sim_config(self):
|
| 85 |
+
return SimConfig(
|
| 86 |
+
gpu_memory_config=GPUMemoryConfig(
|
| 87 |
+
found_lost_pairs_capacity=2**25, max_rigid_patch_count=2**18
|
| 88 |
+
)
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
Reconfiguration Code
|
| 93 |
+
|
| 94 |
+
below are all functions involved in reconfiguration during environment reset called in the same order. As a user
|
| 95 |
+
you can change these however you want for your desired task. These functions will only ever be called once in general. In CPU simulation,
|
| 96 |
+
for some tasks these may need to be called multiple times if you need to swap out object assets. In GPU simulation these will only ever be called once.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def _load_agent(self, options: dict):
|
| 100 |
+
# this code loads the agent into the current scene. You should use it to specify the initial pose(s) of the agent(s)
|
| 101 |
+
# such that they don't collide with other objects initially
|
| 102 |
+
super()._load_agent(options, sapien.Pose(p=[0, 0, 0]))
|
| 103 |
+
|
| 104 |
+
def _load_scene(self, options: dict):
|
| 105 |
+
# here you add various objects like actors and articulations. If your task was to push a ball, you may add a dynamic sphere object on the ground
|
| 106 |
+
pass
|
| 107 |
+
|
| 108 |
+
@property
|
| 109 |
+
def _default_sensor_configs(self):
|
| 110 |
+
# To customize the sensors that capture images/pointclouds for the environment observations,
|
| 111 |
+
# simply define a CameraConfig as done below for Camera sensors. You can add multiple sensors by returning a list
|
| 112 |
+
pose = sapien_utils.look_at(
|
| 113 |
+
eye=[0.3, 0, 0.6], target=[-0.1, 0, 0.1]
|
| 114 |
+
) # sapien_utils.look_at is a utility to get the pose of a camera that looks at a target
|
| 115 |
+
|
| 116 |
+
# to see what all the sensors capture in the environment for observations, run env.render_sensors() which returns an rgb array you can visualize
|
| 117 |
+
return [CameraConfig("base_camera", pose, 128, 128, np.pi / 2, 0.01, 100)]
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def _default_human_render_camera_configs(self):
|
| 121 |
+
# this is just like _sensor_configs, but for adding cameras used for rendering when you call env.render()
|
| 122 |
+
# when render_mode="rgb_array" or env.render_rgb_array()
|
| 123 |
+
# Another feature here is that if there is a camera called render_camera, this is the default view shown initially when a GUI is opened
|
| 124 |
+
pose = sapien_utils.look_at([0.6, 0.7, 0.6], [0.0, 0.0, 0.35])
|
| 125 |
+
return [CameraConfig("render_camera", pose, 512, 512, 1, 0.01, 100)]
|
| 126 |
+
|
| 127 |
+
def _setup_sensors(self, options: dict):
|
| 128 |
+
# default code here will setup all sensors. You can add additional code to change the sensors e.g.
|
| 129 |
+
# if you want to randomize camera positions
|
| 130 |
+
return super()._setup_sensors()
|
| 131 |
+
|
| 132 |
+
def _load_lighting(self, options: dict):
|
| 133 |
+
# default code here will setup all lighting. You can add additional code to change the lighting e.g.
|
| 134 |
+
# if you want to randomize lighting in the scene
|
| 135 |
+
return super()._load_lighting()
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
Episode Initialization Code
|
| 139 |
+
|
| 140 |
+
below are all functions involved in episode initialization during environment reset called in the same order. As a user
|
| 141 |
+
you can change these however you want for your desired task. Note that these functions are given a env_idx variable.
|
| 142 |
+
|
| 143 |
+
`env_idx` is a torch Tensor representing the indices of the parallel environments that are being initialized/reset. This is used
|
| 144 |
+
to support partial resets where some parallel envs might be reset while others are still running (useful for faster RL and evaluation).
|
| 145 |
+
Generally you only need to really use it to determine batch sizes via len(env_idx). ManiSkill helps handle internally a lot of masking
|
| 146 |
+
you might normally need to do when working with GPU simulation. For specific details check out the push_cube.py code
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def _initialize_episode(self, env_idx: torch.Tensor, options: dict):
|
| 150 |
+
pass
|
| 151 |
+
|
| 152 |
+
"""
|
| 153 |
+
Modifying observations, goal parameterization, and success conditions for your task
|
| 154 |
+
|
| 155 |
+
the code below all impact some part of `self.step` function
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
def evaluate(self, obs: Any):
|
| 159 |
+
# this function is used primarily to determine success and failure of a task, both of which are optional. If a dictionary is returned
|
| 160 |
+
# containing "success": bool array indicating if the env is in success state or not, that is used as the terminated variable returned by
|
| 161 |
+
# self.step. Likewise if it contains "fail": bool array indicating the opposite (failure state or not) the same occurs. If both are given
|
| 162 |
+
# then a logical OR is taken so terminated = success | fail. If neither are given, terminated is always all False.
|
| 163 |
+
#
|
| 164 |
+
# You may also include additional keys which will populate the info object returned by self.step and that will be given to
|
| 165 |
+
# `_get_obs_extra` and `_compute_dense_reward`. Note that as everything is batched, you must return a batched array of
|
| 166 |
+
# `self.num_envs` booleans (or 0/1 values) for success an dfail as done in the example below
|
| 167 |
+
return {
|
| 168 |
+
"success": torch.zeros(self.num_envs, device=self.device, dtype=bool),
|
| 169 |
+
"fail": torch.zeros(self.num_envs, device=self.device, dtype=bool),
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
def _get_obs_extra(self, info: Dict):
|
| 173 |
+
# should return an dict of additional observation data for your tasks
|
| 174 |
+
# this will be included as part of the observation in the "extra" key when obs_mode="state_dict" or any of the visual obs_modes
|
| 175 |
+
# and included as part of a flattened observation when obs_mode="state". Moreover, you have access to the info object
|
| 176 |
+
# which is generated by the `evaluate` function above
|
| 177 |
+
return dict()
|
| 178 |
+
|
| 179 |
+
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
|
| 180 |
+
# you can optionally provide a dense reward function by returning a scalar value here. This is used when reward_mode="dense"
|
| 181 |
+
# note that as everything is batched, you must return a batch of of self.num_envs rewards as done in the example below.
|
| 182 |
+
# Moreover, you have access to the info object which is generated by the `evaluate` function above
|
| 183 |
+
return torch.zeros(self.num_envs, device=self.device)
|
| 184 |
+
|
| 185 |
+
def compute_normalized_dense_reward(
|
| 186 |
+
self, obs: Any, action: torch.Tensor, info: Dict
|
| 187 |
+
):
|
| 188 |
+
# this should be equal to compute_dense_reward / max possible reward
|
| 189 |
+
max_reward = 1.0
|
| 190 |
+
return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward
|
| 191 |
+
|
| 192 |
+
def get_state_dict(self):
|
| 193 |
+
# this function is important in order to allow accurate replaying of trajectories. Make sure to specify any
|
| 194 |
+
# non simulation state related data such as a random 3D goal position you generated
|
| 195 |
+
# alternatively you can skip this part if the environment's rewards, observations, eval etc. are dependent on simulation data only
|
| 196 |
+
# e.g. self.your_custom_actor.pose.p will always give you your actor's 3D position
|
| 197 |
+
state = super().get_state_dict()
|
| 198 |
+
# state["goal_pos"] = add_your_non_sim_state_data_here
|
| 199 |
+
return state
|
| 200 |
+
|
| 201 |
+
def set_state_dict(self, state):
|
| 202 |
+
# this function complements get_state and sets any non simulation state related data correctly so the environment behaves
|
| 203 |
+
# the exact same in terms of output rewards, observations, success etc. should you reset state to a given state and take the same actions
|
| 204 |
+
self.goal_pos = state["goal_pos"]
|
| 205 |
+
super().set_state_dict(state)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/bowl_on_rack.cpython-310.pyc
ADDED
|
Binary file (2.94 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/grasp_bowl_v0.cpython-310.pyc
ADDED
|
Binary file (3.47 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/pull_cube_tool.cpython-310.pyc
ADDED
|
Binary file (2.16 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/stack_mug_on_rack.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/assets/data.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Asset sources and tooling for managing the assets
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Dict, List, Optional
|
| 8 |
+
|
| 9 |
+
from mani_skill import ASSET_DIR, PACKAGE_ASSET_DIR
|
| 10 |
+
from mani_skill.utils import io_utils
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class DataSource:
|
| 15 |
+
source_type: str
|
| 16 |
+
"""what kind of data is this"""
|
| 17 |
+
url: Optional[str] = None
|
| 18 |
+
hf_repo_id: Optional[str] = None
|
| 19 |
+
github_url: Optional[str] = None
|
| 20 |
+
target_path: Optional[str] = None
|
| 21 |
+
"""the folder where the file will be downloaded to"""
|
| 22 |
+
checksum: Optional[str] = None
|
| 23 |
+
zip_dirname: Optional[str] = None
|
| 24 |
+
"""what to rename a zip files generated directory to"""
|
| 25 |
+
filename: Optional[str] = None
|
| 26 |
+
"""name to change the downloaded file to. If None, will not change the name"""
|
| 27 |
+
output_dir: str = ASSET_DIR
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
DATA_SOURCES: Dict[str, DataSource] = {}
|
| 31 |
+
"""Data sources map data source IDs to their respective DataSource objects which contain info on what the data is and where to download it"""
|
| 32 |
+
DATA_GROUPS: Dict[str, List[str]] = {}
|
| 33 |
+
"""Data groups map group ids (typically environment IDs) to a list of data source/group IDs for easy group management. data groups can be done hierarchicaly"""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def is_data_source_downloaded(data_source_id: str):
|
| 37 |
+
data_source = DATA_SOURCES[data_source_id]
|
| 38 |
+
return os.path.exists(data_source.output_dir / data_source.target_path)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def initialize_data_sources():
|
| 42 |
+
DATA_SOURCES["ycb"] = DataSource(
|
| 43 |
+
source_type="task_assets",
|
| 44 |
+
url="https://huggingface.co/datasets/haosulab/ManiSkill2/resolve/main/data/mani_skill2_ycb.zip",
|
| 45 |
+
target_path="assets/mani_skill2_ycb",
|
| 46 |
+
checksum="eb6f30642c90203715c178f67bf2288887ef6e7d05a9f3f1e713efcf7c2a541c",
|
| 47 |
+
)
|
| 48 |
+
DATA_SOURCES["pick_clutter_ycb_configs"] = DataSource(
|
| 49 |
+
source_type="task_assets",
|
| 50 |
+
url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/pick_clutter/ycb_train_5k.json.gz",
|
| 51 |
+
target_path="tasks/pick_clutter",
|
| 52 |
+
checksum="70ec176c7036f326ea7813b77f8c03bea9db5960198498957a49b2895a9ec338",
|
| 53 |
+
)
|
| 54 |
+
DATA_SOURCES["assembling_kits"] = DataSource(
|
| 55 |
+
source_type="task_assets",
|
| 56 |
+
url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/assembling_kits_v1.zip",
|
| 57 |
+
target_path="tasks/assembling_kits",
|
| 58 |
+
checksum="e3371f17a07a012edaa3a0b3604fb1577f3fb921876c3d5ed59733dd75a6b4a0",
|
| 59 |
+
)
|
| 60 |
+
DATA_SOURCES["panda_avoid_obstacles"] = DataSource(
|
| 61 |
+
source_type="task_assets",
|
| 62 |
+
url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/avoid_obstacles/panda_train_2k.json.gz",
|
| 63 |
+
target_path="tasks/avoid_obstacles",
|
| 64 |
+
checksum="44dae9a0804172515c290c1f49a1e7e72d76e40201a2c5c7d4a3ccd43b4d5be4",
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
DATA_SOURCES["bridge_v2_real2sim"] = DataSource(
|
| 68 |
+
source_type="task_assets",
|
| 69 |
+
url="https://huggingface.co/datasets/haosulab/ManiSkill_bridge_v2_real2sim/resolve/main/bridge_v2_real2sim_dataset.zip",
|
| 70 |
+
target_path="tasks/bridge_v2_real2sim_dataset",
|
| 71 |
+
checksum="618512a205b4528cafecdad14b1788ed1130879f3064deb406516ed5b9c5ba92",
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------------------------- #
|
| 75 |
+
# PartNet-mobility
|
| 76 |
+
# ---------------------------------------------------------------------------- #
|
| 77 |
+
category_uids = {}
|
| 78 |
+
for category in ["cabinet_drawer", "cabinet_door", "chair", "bucket", "faucet"]:
|
| 79 |
+
model_json = (
|
| 80 |
+
PACKAGE_ASSET_DIR / f"partnet_mobility/meta/info_{category}_train.json"
|
| 81 |
+
)
|
| 82 |
+
model_ids = set(io_utils.load_json(model_json).keys())
|
| 83 |
+
category_uids[category] = []
|
| 84 |
+
for model_id in model_ids:
|
| 85 |
+
uid = f"partnet_mobility/{model_id}"
|
| 86 |
+
DATA_SOURCES[uid] = DataSource(
|
| 87 |
+
source_type="objects",
|
| 88 |
+
url=f"https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/partnet_mobility/dataset/{model_id}.zip",
|
| 89 |
+
target_path=ASSET_DIR / "partnet_mobility" / "dataset" / model_id,
|
| 90 |
+
)
|
| 91 |
+
category_uids[category].append(uid)
|
| 92 |
+
|
| 93 |
+
DATA_GROUPS["partnet_mobility_cabinet"] = set(
|
| 94 |
+
category_uids["cabinet_drawer"] + category_uids["cabinet_door"]
|
| 95 |
+
)
|
| 96 |
+
DATA_GROUPS["partnet_mobility_chair"] = category_uids["chair"]
|
| 97 |
+
DATA_GROUPS["partnet_mobility_bucket"] = category_uids["bucket"]
|
| 98 |
+
DATA_GROUPS["partnet_mobility_faucet"] = category_uids["faucet"]
|
| 99 |
+
DATA_GROUPS["partnet_mobility"] = set(
|
| 100 |
+
category_uids["cabinet_drawer"]
|
| 101 |
+
+ category_uids["cabinet_door"]
|
| 102 |
+
+ category_uids["chair"]
|
| 103 |
+
+ category_uids["bucket"]
|
| 104 |
+
+ category_uids["faucet"]
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
# DATA_GROUPS["OpenCabinetDrawer-v1"] = category_uids["cabinet_drawer"]
|
| 108 |
+
# DATA_GROUPS["OpenCabinetDoor-v1"] = category_uids["cabinet_door"]
|
| 109 |
+
# DATA_GROUPS["PushChair-v1"] = category_uids["chair"]
|
| 110 |
+
# DATA_GROUPS["MoveBucket-v1"] = category_uids["bucket"]
|
| 111 |
+
# DATA_GROUPS["TurnFaucet-v1"] = category_uids["faucet"]
|
| 112 |
+
|
| 113 |
+
# ---------------------------------------------------------------------------- #
|
| 114 |
+
# Interactable Scene Datasets
|
| 115 |
+
# ---------------------------------------------------------------------------- #
|
| 116 |
+
DATA_SOURCES["ReplicaCAD"] = DataSource(
|
| 117 |
+
source_type="scene",
|
| 118 |
+
hf_repo_id="haosulab/ReplicaCAD",
|
| 119 |
+
target_path="scene_datasets/replica_cad_dataset",
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
DATA_SOURCES["ReplicaCADRearrange"] = DataSource(
|
| 123 |
+
source_type="scene",
|
| 124 |
+
url="https://huggingface.co/datasets/haosulab/ReplicaCADRearrange/resolve/main/rearrange.zip",
|
| 125 |
+
target_path="scene_datasets/replica_cad_dataset/rearrange",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
DATA_SOURCES["AI2THOR"] = DataSource(
|
| 129 |
+
source_type="scene",
|
| 130 |
+
url="https://huggingface.co/datasets/haosulab/AI2THOR/resolve/main/ai2thor.zip",
|
| 131 |
+
target_path="scene_datasets/ai2thor",
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
DATA_SOURCES["RoboCasa"] = DataSource(
|
| 135 |
+
source_type="scene",
|
| 136 |
+
url="https://huggingface.co/datasets/haosulab/RoboCasa/resolve/main/robocasa_dataset.zip",
|
| 137 |
+
target_path="scene_datasets/robocasa_dataset",
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Robots
|
| 141 |
+
DATA_SOURCES["xmate3_robotiq"] = DataSource(
|
| 142 |
+
source_type="robot",
|
| 143 |
+
url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/xmate3_robotiq.zip",
|
| 144 |
+
target_path="robots/xmate3_robotiq",
|
| 145 |
+
checksum="ddda102a20eb41e28a0a501702e240e5d7f4084221a44f580e729f08b7c12d1a",
|
| 146 |
+
)
|
| 147 |
+
DATA_SOURCES["ur10e"] = DataSource(
|
| 148 |
+
source_type="robot",
|
| 149 |
+
url="https://github.com/haosulab/ManiSkill-UR10e/archive/refs/tags/v0.1.0.zip",
|
| 150 |
+
target_path="robots/ur10e",
|
| 151 |
+
)
|
| 152 |
+
DATA_SOURCES["anymal_c"] = DataSource(
|
| 153 |
+
source_type="robot",
|
| 154 |
+
url="https://github.com/haosulab/ManiSkill-ANYmalC/archive/refs/tags/v0.1.1.zip",
|
| 155 |
+
target_path="robots/anymal_c",
|
| 156 |
+
)
|
| 157 |
+
DATA_SOURCES["unitree_h1"] = DataSource(
|
| 158 |
+
source_type="robot",
|
| 159 |
+
url="https://github.com/haosulab/ManiSkill-UnitreeH1/archive/refs/tags/v0.1.0.zip",
|
| 160 |
+
target_path="robots/unitree_h1",
|
| 161 |
+
)
|
| 162 |
+
DATA_SOURCES["unitree_g1"] = DataSource(
|
| 163 |
+
source_type="robot",
|
| 164 |
+
url="https://github.com/haosulab/ManiSkill-UnitreeG1/archive/refs/tags/v0.1.0.zip",
|
| 165 |
+
target_path="robots/unitree_g1",
|
| 166 |
+
)
|
| 167 |
+
DATA_SOURCES["unitree_go2"] = DataSource(
|
| 168 |
+
source_type="robot",
|
| 169 |
+
url="https://github.com/haosulab/ManiSkill-UnitreeGo2/archive/refs/tags/v0.1.1.zip",
|
| 170 |
+
target_path="robots/unitree_go2",
|
| 171 |
+
)
|
| 172 |
+
DATA_SOURCES["stompy"] = DataSource(
|
| 173 |
+
source_type="robot",
|
| 174 |
+
url="https://github.com/haosulab/ManiSkill-Stompy/archive/refs/tags/v0.1.0.zip",
|
| 175 |
+
target_path="robots/stompy",
|
| 176 |
+
)
|
| 177 |
+
DATA_SOURCES["widowx250s"] = DataSource(
|
| 178 |
+
source_type="robot",
|
| 179 |
+
url="https://github.com/haosulab/ManiSkill-WidowX250S/archive/refs/tags/v0.2.0.zip",
|
| 180 |
+
target_path="robots/widowx",
|
| 181 |
+
)
|
| 182 |
+
DATA_SOURCES["googlerobot"] = DataSource(
|
| 183 |
+
source_type="robot",
|
| 184 |
+
url="https://github.com/haosulab/ManiSkill-GoogleRobot/archive/refs/tags/v0.1.0.zip",
|
| 185 |
+
target_path="robots/googlerobot",
|
| 186 |
+
)
|
| 187 |
+
DATA_SOURCES["robotiq_2f"] = DataSource(
|
| 188 |
+
source_type="robot",
|
| 189 |
+
url="https://github.com/haosulab/ManiSkill-Robotiq_2F/archive/refs/tags/v0.1.0.zip",
|
| 190 |
+
target_path="robots/robotiq_2f",
|
| 191 |
+
)
|
| 192 |
+
DATA_SOURCES["xarm6"] = DataSource(
|
| 193 |
+
source_type="robot",
|
| 194 |
+
url="https://github.com/haosulab/ManiSkill-XArm6/archive/refs/tags/v0.1.0.zip",
|
| 195 |
+
target_path="robots/xarm6",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def expand_data_group_into_individual_data_source_ids(data_group_id: str):
|
| 200 |
+
"""Expand a data group into a list of individual data source IDs"""
|
| 201 |
+
uids = []
|
| 202 |
+
|
| 203 |
+
def helper(uid):
|
| 204 |
+
nonlocal uids
|
| 205 |
+
if uid in DATA_SOURCES:
|
| 206 |
+
uids.append(uid)
|
| 207 |
+
elif uid in DATA_GROUPS:
|
| 208 |
+
[helper(x) for x in DATA_GROUPS[uid]]
|
| 209 |
+
|
| 210 |
+
for uid in DATA_GROUPS[data_group_id]:
|
| 211 |
+
helper(uid)
|
| 212 |
+
uids = list(set(uids))
|
| 213 |
+
return uids
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
initialize_data_sources()
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/_mjcf_loader.py
ADDED
|
@@ -0,0 +1,921 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Loader code to import MJCF xml files into SAPIEN
|
| 3 |
+
|
| 4 |
+
Code partially adapted from https://github.com/NVIDIA/warp/blob/3ed2ceab824b65486c5204d2a7381d37b79fc314/warp/sim/import_mjcf.py
|
| 5 |
+
|
| 6 |
+
Articulations are known as kinematic trees (defined by <body> tags) in Mujoco. A single .xml file can have multiple articulations
|
| 7 |
+
|
| 8 |
+
Any <geom> tag in <worldbody> but not a <body> tag will be built as separate static actors if possible. Actors that are not static seem to be defined
|
| 9 |
+
with a free joint under a single body tag.
|
| 10 |
+
|
| 11 |
+
Warnings of unloadable tags/data can be printed if verbosity is turned on (by default it is off)
|
| 12 |
+
|
| 13 |
+
Notes:
|
| 14 |
+
Joint properties relating to the solver, stiffness, actuator, are all not directly imported here
|
| 15 |
+
and instead must be implemented via a controller like other robots in SAPIEN
|
| 16 |
+
|
| 17 |
+
Contact tags are not supported
|
| 18 |
+
|
| 19 |
+
Tendons/equality constraints are supported but may not work the same
|
| 20 |
+
|
| 21 |
+
The default group of geoms is 0 in mujoco. From docs it appears only group 0 and 2 are rendered by default.
|
| 22 |
+
This is also by default what the visualizer shows and presumably what image renders show.
|
| 23 |
+
Any other group is treated as being invisible (e.g. in SAPIEN we do not add visual bodies). SAPIEN does not currently support
|
| 24 |
+
toggling render groups like Mujoco. Sometimes a MJCF might not follow this and will try and render other groups. In that case the loader supports
|
| 25 |
+
indicating which other groups to add visual bodies for.
|
| 26 |
+
|
| 27 |
+
Ref: https://mujoco.readthedocs.io/en/stable/XMLreference.html#body-geom-group,
|
| 28 |
+
https://mujoco.readthedocs.io/en/latest/modeling.html#composite-objects (says group 3 is turned off)
|
| 29 |
+
|
| 30 |
+
If contype is 0, it means that geom can't collide with anything. We do this by not adding a collision shape at all.
|
| 31 |
+
|
| 32 |
+
geoms under worldbody but not body tags are treated as static objects at the moment.
|
| 33 |
+
|
| 34 |
+
Useful references:
|
| 35 |
+
- Collision detection: https://mujoco.readthedocs.io/en/stable/computation/index.html#collision-detection
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
import math
|
| 40 |
+
import os
|
| 41 |
+
import re
|
| 42 |
+
import xml.etree.ElementTree as ET
|
| 43 |
+
from collections import defaultdict
|
| 44 |
+
from copy import deepcopy
|
| 45 |
+
from dataclasses import dataclass
|
| 46 |
+
from functools import reduce
|
| 47 |
+
from typing import Any, Dict, List, Literal, Tuple, Union
|
| 48 |
+
from xml.etree.ElementTree import Element
|
| 49 |
+
|
| 50 |
+
import numpy as np
|
| 51 |
+
import sapien
|
| 52 |
+
from sapien import ActorBuilder, Pose
|
| 53 |
+
from sapien.physx import PhysxArticulation, PhysxMaterial
|
| 54 |
+
from sapien.render import RenderMaterial, RenderTexture2D
|
| 55 |
+
from sapien.wrapper.articulation_builder import (
|
| 56 |
+
ArticulationBuilder,
|
| 57 |
+
LinkBuilder,
|
| 58 |
+
MimicJointRecord,
|
| 59 |
+
)
|
| 60 |
+
from transforms3d import euler, quaternions
|
| 61 |
+
|
| 62 |
+
from mani_skill import logger
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@dataclass
|
| 66 |
+
class MJCFTexture:
|
| 67 |
+
name: str
|
| 68 |
+
type: Literal["skybox", "cube", "2d"]
|
| 69 |
+
rgb1: list
|
| 70 |
+
rgb2: list
|
| 71 |
+
file: str
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
DEFAULT_MJCF_OPTIONS = dict(contact=True)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
WARNED_ONCE = defaultdict(lambda: False)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _parse_int(attrib, key, default):
|
| 81 |
+
if key in attrib:
|
| 82 |
+
return int(attrib[key])
|
| 83 |
+
else:
|
| 84 |
+
return default
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _parse_float(attrib, key, default):
|
| 88 |
+
if key in attrib:
|
| 89 |
+
return float(attrib[key])
|
| 90 |
+
else:
|
| 91 |
+
return default
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _str_to_float(string: str, delimiter=" "):
|
| 95 |
+
res = [float(x) for x in string.split(delimiter)]
|
| 96 |
+
if len(res) == 1:
|
| 97 |
+
return res[0]
|
| 98 |
+
return res
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _merge_attrib(default_attrib: dict, incoming_attribs: Union[List[dict], dict]):
|
| 102 |
+
def helper_merge(a: dict, b: dict, path=[]):
|
| 103 |
+
for key in b:
|
| 104 |
+
if key in a:
|
| 105 |
+
if isinstance(a[key], dict) and isinstance(b[key], dict):
|
| 106 |
+
helper_merge(a[key], b[key], path + [str(key)])
|
| 107 |
+
else:
|
| 108 |
+
a[key] = b[key]
|
| 109 |
+
else:
|
| 110 |
+
a[key] = b[key]
|
| 111 |
+
return a
|
| 112 |
+
|
| 113 |
+
attrib = deepcopy(default_attrib)
|
| 114 |
+
if isinstance(incoming_attribs, dict):
|
| 115 |
+
incoming_attribs = [incoming_attribs]
|
| 116 |
+
reduce(helper_merge, [attrib] + incoming_attribs)
|
| 117 |
+
return attrib
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _parse_vec(attrib, key, default):
|
| 121 |
+
if key in attrib:
|
| 122 |
+
out = np.fromstring(attrib[key], sep=" ", dtype=np.float32)
|
| 123 |
+
else:
|
| 124 |
+
out = np.array(default, dtype=np.float32)
|
| 125 |
+
return out
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _parse_orientation(attrib, use_degrees, euler_seq):
|
| 129 |
+
if "quat" in attrib:
|
| 130 |
+
wxyz = np.fromstring(attrib["quat"], sep=" ")
|
| 131 |
+
return wxyz
|
| 132 |
+
if "euler" in attrib:
|
| 133 |
+
euler_angles = np.fromstring(attrib["euler"], sep=" ")
|
| 134 |
+
if use_degrees:
|
| 135 |
+
euler_angles *= np.pi / 180
|
| 136 |
+
# TODO (stao): support other axes?
|
| 137 |
+
return np.array(
|
| 138 |
+
euler.euler2quat(euler_angles[0], -euler_angles[1], euler_angles[2])
|
| 139 |
+
)
|
| 140 |
+
if "axisangle" in attrib:
|
| 141 |
+
axisangle = np.fromstring(attrib["axisangle"], sep=" ")
|
| 142 |
+
angle = axisangle[3]
|
| 143 |
+
if use_degrees:
|
| 144 |
+
angle *= np.pi / 180
|
| 145 |
+
axis = axisangle[:3] / np.linalg.norm(axisangle[:3])
|
| 146 |
+
return quaternions.axangle2quat(axis, angle)
|
| 147 |
+
if "xyaxes" in attrib:
|
| 148 |
+
xyaxes = np.fromstring(attrib["xyaxes"], sep=" ")
|
| 149 |
+
xaxis = xyaxes[:3] / np.linalg.norm(xyaxes[:3])
|
| 150 |
+
zaxis = xyaxes[3:] / np.linalg.norm(xyaxes[:3])
|
| 151 |
+
yaxis = np.cross(zaxis, xaxis)
|
| 152 |
+
yaxis = yaxis / np.linalg.norm(yaxis)
|
| 153 |
+
rot_matrix = np.array([xaxis, yaxis, zaxis]).T
|
| 154 |
+
return quaternions.mat2quat(rot_matrix)
|
| 155 |
+
if "zaxis" in attrib:
|
| 156 |
+
zaxis = np.fromstring(attrib["zaxis"], sep=" ")
|
| 157 |
+
zaxis = zaxis / np.linalg.norm(zaxis)
|
| 158 |
+
xaxis = np.cross(np.array([0, 0, 1]), zaxis)
|
| 159 |
+
xaxis = xaxis / np.linalg.norm(xaxis)
|
| 160 |
+
yaxis = np.cross(zaxis, xaxis)
|
| 161 |
+
yaxis = yaxis / np.linalg.norm(yaxis)
|
| 162 |
+
rot_matrix = np.array([xaxis, yaxis, zaxis]).T
|
| 163 |
+
return quaternions.mat2quat(rot_matrix)
|
| 164 |
+
return np.array([1, 0, 0, 0])
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class MJCFLoader:
|
| 168 |
+
"""
|
| 169 |
+
Class to load MJCF into SAPIEN.
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
def __init__(self, ignore_classes=["motor"], visual_groups=[0, 2]):
|
| 173 |
+
self.fix_root_link = True
|
| 174 |
+
"""whether to fix the root link. Note regardless of given XML, the root link is a dummy link this loader
|
| 175 |
+
creates which makes a number of operations down the line easier. In general this should be False if there is a freejoint for the root body
|
| 176 |
+
of articulations in the XML and should be true if there are no free joints. At the moment when modelling a robot from Mujoco this
|
| 177 |
+
must be handled on a case by case basis"""
|
| 178 |
+
|
| 179 |
+
self.load_multiple_collisions_from_file = False
|
| 180 |
+
self.load_nonconvex_collisions = False
|
| 181 |
+
self.multiple_collisions_decomposition = "none"
|
| 182 |
+
self.multiple_collisions_decomposition_params = dict()
|
| 183 |
+
|
| 184 |
+
self.revolute_unwrapped = False
|
| 185 |
+
self.scale = 1.0
|
| 186 |
+
|
| 187 |
+
self.visual_groups = visual_groups
|
| 188 |
+
|
| 189 |
+
self.scene: sapien.Scene = None
|
| 190 |
+
|
| 191 |
+
self.ignore_classes = ignore_classes
|
| 192 |
+
|
| 193 |
+
# self._material = None
|
| 194 |
+
# self._patch_radius = 0
|
| 195 |
+
# self._min_patch_radius = 0
|
| 196 |
+
self.density = 1000
|
| 197 |
+
# self._link_material = dict()
|
| 198 |
+
# self._link_patch_radius = dict()
|
| 199 |
+
# self._link_min_patch_radius = dict()
|
| 200 |
+
# self._link_density = dict()
|
| 201 |
+
|
| 202 |
+
self._defaults: Dict[str, Element] = dict()
|
| 203 |
+
self._assets = dict()
|
| 204 |
+
self._materials = dict()
|
| 205 |
+
self._textures: Dict[str, MJCFTexture] = dict()
|
| 206 |
+
self._meshes: Dict[str, Element] = dict()
|
| 207 |
+
|
| 208 |
+
self._link2builder: Dict[str, LinkBuilder] = dict()
|
| 209 |
+
self._link2parent_joint: Dict[str, Any] = dict()
|
| 210 |
+
self._group_count = 0
|
| 211 |
+
|
| 212 |
+
def set_scene(self, scene):
|
| 213 |
+
self.scene = scene
|
| 214 |
+
return self
|
| 215 |
+
|
| 216 |
+
@staticmethod
|
| 217 |
+
def _pose_from_origin(origin, scale):
|
| 218 |
+
origin[:3, 3] = origin[:3, 3] * scale
|
| 219 |
+
return Pose(origin)
|
| 220 |
+
|
| 221 |
+
def _build_geom(
|
| 222 |
+
self, geom: Element, builder: Union[LinkBuilder, ActorBuilder], defaults
|
| 223 |
+
):
|
| 224 |
+
geom_defaults = defaults
|
| 225 |
+
if "class" in geom.attrib:
|
| 226 |
+
geom_class = geom.attrib["class"]
|
| 227 |
+
ignore_geom = False
|
| 228 |
+
for pattern in self.ignore_classes:
|
| 229 |
+
if re.match(pattern, geom_class):
|
| 230 |
+
ignore_geom = True
|
| 231 |
+
break
|
| 232 |
+
if ignore_geom:
|
| 233 |
+
return
|
| 234 |
+
if geom_class in self._defaults:
|
| 235 |
+
geom_defaults = _merge_attrib(defaults, self._defaults[geom_class])
|
| 236 |
+
if "geom" in geom_defaults:
|
| 237 |
+
geom_attrib = _merge_attrib(geom_defaults["geom"], geom.attrib)
|
| 238 |
+
else:
|
| 239 |
+
geom_attrib = geom.attrib
|
| 240 |
+
|
| 241 |
+
geom_name = geom_attrib.get("name", "")
|
| 242 |
+
geom_type = geom_attrib.get("type", "sphere")
|
| 243 |
+
if "mesh" in geom_attrib:
|
| 244 |
+
geom_type = "mesh"
|
| 245 |
+
geom_size = (
|
| 246 |
+
_parse_vec(geom_attrib, "size", np.array([1.0, 1.0, 1.0])) * self.scale
|
| 247 |
+
)
|
| 248 |
+
geom_pos = (
|
| 249 |
+
_parse_vec(geom_attrib, "pos", np.array([0.0, 0.0, 0.0])) * self.scale
|
| 250 |
+
)
|
| 251 |
+
geom_rot = _parse_orientation(geom_attrib, self._use_degrees, self._euler_seq)
|
| 252 |
+
_parse_float(geom_attrib, "density", self.density)
|
| 253 |
+
if "material" in geom_attrib:
|
| 254 |
+
render_material = self._materials[geom_attrib["material"]]
|
| 255 |
+
else:
|
| 256 |
+
# use RGBA
|
| 257 |
+
render_material = RenderMaterial(
|
| 258 |
+
base_color=_parse_vec(geom_attrib, "rgba", [0.5, 0.5, 0.5, 1])
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
geom_density = _parse_float(geom_attrib, "density", 1000.0)
|
| 262 |
+
|
| 263 |
+
# if condim is 1, we can easily model the material's friction
|
| 264 |
+
condim = _parse_int(geom_attrib, "condim", 3)
|
| 265 |
+
if condim == 3:
|
| 266 |
+
friction = _parse_vec(
|
| 267 |
+
geom_attrib, "friction", np.array([0.3, 0.3, 0.3])
|
| 268 |
+
) # maniskill default friction is 0.3
|
| 269 |
+
# NOTE (stao): we only support sliding friction at the moment. see
|
| 270 |
+
# https://mujoco.readthedocs.io/en/stable/XMLreference.html#body-geom-friction
|
| 271 |
+
# we might be able to imitate their torsional frictions via patch radius attributes:
|
| 272 |
+
# https://nvidia-omniverse.github.io/PhysX/physx/5.4.0/_api_build/class_px_shape.html#_CPPv4N7PxShape23setTorsionalPatchRadiusE6PxReal
|
| 273 |
+
friction = friction[0]
|
| 274 |
+
physx_material = PhysxMaterial(
|
| 275 |
+
static_friction=friction, dynamic_friction=friction, restitution=0
|
| 276 |
+
)
|
| 277 |
+
elif condim == 1:
|
| 278 |
+
physx_material = PhysxMaterial(
|
| 279 |
+
static_friction=0, dynamic_friction=0, restitution=0
|
| 280 |
+
)
|
| 281 |
+
else:
|
| 282 |
+
physx_material = None
|
| 283 |
+
|
| 284 |
+
geom_group = _parse_int(geom_attrib, "group", 0)
|
| 285 |
+
# See note at top of file for how we handle geom groups
|
| 286 |
+
has_visual_body = False
|
| 287 |
+
if geom_group in self.visual_groups:
|
| 288 |
+
has_visual_body = True
|
| 289 |
+
|
| 290 |
+
geom_contype = _parse_int(geom_attrib, "contype", 1)
|
| 291 |
+
# See note at top of file for how we handle contype / objects without collisions
|
| 292 |
+
has_collisions = True
|
| 293 |
+
if geom_contype == 0:
|
| 294 |
+
has_collisions = False
|
| 295 |
+
|
| 296 |
+
t_visual2link = Pose(geom_pos, geom_rot)
|
| 297 |
+
if geom_type == "sphere":
|
| 298 |
+
if has_visual_body:
|
| 299 |
+
builder.add_sphere_visual(
|
| 300 |
+
t_visual2link, radius=geom_size[0], material=render_material
|
| 301 |
+
)
|
| 302 |
+
if has_collisions:
|
| 303 |
+
builder.add_sphere_collision(
|
| 304 |
+
t_visual2link,
|
| 305 |
+
radius=geom_size[0],
|
| 306 |
+
material=physx_material,
|
| 307 |
+
density=geom_density,
|
| 308 |
+
)
|
| 309 |
+
elif geom_type in ["capsule", "cylinder", "box"]:
|
| 310 |
+
if "fromto" in geom_attrib:
|
| 311 |
+
geom_fromto = _parse_vec(
|
| 312 |
+
geom_attrib, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
start = np.array(geom_fromto[0:3]) * self.scale
|
| 316 |
+
end = np.array(geom_fromto[3:6]) * self.scale
|
| 317 |
+
# objects follow a line from start to end.
|
| 318 |
+
|
| 319 |
+
# objects are default along x-axis and we rotate accordingly via axis angle.
|
| 320 |
+
axis = (end - start) / np.linalg.norm(end - start)
|
| 321 |
+
# TODO this is bugged
|
| 322 |
+
angle = math.acos(np.dot(axis, np.array([1.0, 0.0, 0.0])))
|
| 323 |
+
axis = np.cross(axis, np.array([1.0, 0.0, 0.0]))
|
| 324 |
+
if np.linalg.norm(axis) < 1e-3:
|
| 325 |
+
axis = np.array([1, 0, 0])
|
| 326 |
+
else:
|
| 327 |
+
axis = axis / np.linalg.norm(axis)
|
| 328 |
+
|
| 329 |
+
geom_pos = (start + end) * 0.5
|
| 330 |
+
geom_rot = quaternions.axangle2quat(axis, -angle)
|
| 331 |
+
t_visual2link.set_p(geom_pos)
|
| 332 |
+
t_visual2link.set_q(geom_rot)
|
| 333 |
+
geom_radius = geom_size[0]
|
| 334 |
+
geom_half_length = np.linalg.norm(end - start) * 0.5
|
| 335 |
+
else:
|
| 336 |
+
geom_radius = geom_size[0]
|
| 337 |
+
geom_half_length = geom_size[1]
|
| 338 |
+
# TODO (stao): oriented along z-axis for capsules whereas boxes are not?
|
| 339 |
+
if geom_type in ["capsule", "cylinder"]:
|
| 340 |
+
t_visual2link = t_visual2link * Pose(
|
| 341 |
+
q=euler.euler2quat(0, np.pi / 2, 0)
|
| 342 |
+
)
|
| 343 |
+
if geom_type == "capsule":
|
| 344 |
+
if has_visual_body:
|
| 345 |
+
builder.add_capsule_visual(
|
| 346 |
+
t_visual2link,
|
| 347 |
+
radius=geom_radius,
|
| 348 |
+
half_length=geom_half_length,
|
| 349 |
+
material=render_material,
|
| 350 |
+
name=geom_name,
|
| 351 |
+
)
|
| 352 |
+
if has_collisions:
|
| 353 |
+
builder.add_capsule_collision(
|
| 354 |
+
t_visual2link,
|
| 355 |
+
radius=geom_radius,
|
| 356 |
+
half_length=geom_half_length,
|
| 357 |
+
material=physx_material,
|
| 358 |
+
density=geom_density,
|
| 359 |
+
# name=geom_name,
|
| 360 |
+
)
|
| 361 |
+
elif geom_type == "box":
|
| 362 |
+
if has_visual_body:
|
| 363 |
+
builder.add_box_visual(
|
| 364 |
+
t_visual2link,
|
| 365 |
+
half_size=geom_size,
|
| 366 |
+
material=render_material,
|
| 367 |
+
name=geom_name,
|
| 368 |
+
)
|
| 369 |
+
if has_collisions:
|
| 370 |
+
builder.add_box_collision(
|
| 371 |
+
t_visual2link,
|
| 372 |
+
half_size=geom_size,
|
| 373 |
+
material=physx_material,
|
| 374 |
+
density=geom_density,
|
| 375 |
+
# name=geom_name,
|
| 376 |
+
)
|
| 377 |
+
elif geom_type == "cylinder":
|
| 378 |
+
if has_visual_body:
|
| 379 |
+
builder.add_cylinder_visual(
|
| 380 |
+
t_visual2link,
|
| 381 |
+
radius=geom_radius,
|
| 382 |
+
half_length=geom_half_length,
|
| 383 |
+
material=render_material,
|
| 384 |
+
name=geom_name,
|
| 385 |
+
)
|
| 386 |
+
if has_collisions:
|
| 387 |
+
builder.add_cylinder_collision(
|
| 388 |
+
t_visual2link,
|
| 389 |
+
radius=geom_radius,
|
| 390 |
+
half_length=geom_half_length,
|
| 391 |
+
material=physx_material,
|
| 392 |
+
density=geom_density,
|
| 393 |
+
# name=geom_name
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
elif geom_type == "plane":
|
| 397 |
+
if not WARNED_ONCE["plane"]:
|
| 398 |
+
logger.warn(
|
| 399 |
+
"Currently ManiSkill does not support loading plane geometries from MJCFs"
|
| 400 |
+
)
|
| 401 |
+
WARNED_ONCE["plane"] = True
|
| 402 |
+
elif geom_type == "ellipsoid":
|
| 403 |
+
if not WARNED_ONCE["ellipsoid"]:
|
| 404 |
+
logger.warn(
|
| 405 |
+
"Currently ManiSkill does not support loading ellipsoid geometries from MJCFs"
|
| 406 |
+
)
|
| 407 |
+
WARNED_ONCE["ellipsoid"] = True
|
| 408 |
+
elif geom_type == "mesh":
|
| 409 |
+
mesh_name = geom_attrib.get("mesh")
|
| 410 |
+
mesh_attrib = self._meshes[mesh_name].attrib
|
| 411 |
+
mesh_scale = self.scale * np.array(
|
| 412 |
+
_parse_vec(mesh_attrib, "scale", np.array([1, 1, 1]))
|
| 413 |
+
)
|
| 414 |
+
# TODO refquat
|
| 415 |
+
mesh_file = os.path.join(self._mesh_dir, mesh_attrib["file"])
|
| 416 |
+
if has_visual_body:
|
| 417 |
+
builder.add_visual_from_file(
|
| 418 |
+
mesh_file,
|
| 419 |
+
pose=t_visual2link,
|
| 420 |
+
scale=mesh_scale,
|
| 421 |
+
material=render_material,
|
| 422 |
+
)
|
| 423 |
+
if has_collisions:
|
| 424 |
+
if self.load_multiple_collisions_from_file:
|
| 425 |
+
builder.add_multiple_convex_collisions_from_file(
|
| 426 |
+
mesh_file,
|
| 427 |
+
pose=t_visual2link,
|
| 428 |
+
scale=mesh_scale,
|
| 429 |
+
material=physx_material,
|
| 430 |
+
density=geom_density,
|
| 431 |
+
)
|
| 432 |
+
else:
|
| 433 |
+
builder.add_convex_collision_from_file(
|
| 434 |
+
mesh_file,
|
| 435 |
+
pose=t_visual2link,
|
| 436 |
+
scale=mesh_scale,
|
| 437 |
+
material=physx_material,
|
| 438 |
+
density=geom_density,
|
| 439 |
+
)
|
| 440 |
+
elif geom_type == "sdf":
|
| 441 |
+
raise NotImplementedError("SDF geom type not supported at the moment")
|
| 442 |
+
elif geom_type == "hfield":
|
| 443 |
+
raise NotImplementedError("Height fields are not supported at the moment")
|
| 444 |
+
|
| 445 |
+
def _build_link(
|
| 446 |
+
self, body: Element, body_attrib, link_builder: LinkBuilder, defaults
|
| 447 |
+
):
|
| 448 |
+
"""sets inertial, visual/collision shapes"""
|
| 449 |
+
# inertial
|
| 450 |
+
# TODO (stao)
|
| 451 |
+
# if (
|
| 452 |
+
# link.inertial
|
| 453 |
+
# and link.inertial.mass != 0
|
| 454 |
+
# and not np.array_equal(link.inertial.inertia, np.zeros((3, 3)))
|
| 455 |
+
# ):
|
| 456 |
+
# t_inertial2link = self._pose_from_origin(link.inertial.origin, self.scale)
|
| 457 |
+
# mass = link.inertial.mass
|
| 458 |
+
# inertia = link.inertial.inertia
|
| 459 |
+
|
| 460 |
+
# if np.array_equal(np.diag(np.diag(inertia)), inertia):
|
| 461 |
+
# eigs = np.diag(inertia)
|
| 462 |
+
# vecs = np.eye(3)
|
| 463 |
+
# else:
|
| 464 |
+
# eigs, vecs = np.linalg.eigh(inertia)
|
| 465 |
+
# if np.linalg.det(vecs) < 0:
|
| 466 |
+
# vecs[:, 2] = -vecs[:, 2]
|
| 467 |
+
|
| 468 |
+
# assert all([x > 0 for x in eigs]), "invalid moment of inertia"
|
| 469 |
+
|
| 470 |
+
# t_inertia2inertial = np.eye(4)
|
| 471 |
+
# t_inertia2inertial[:3, :3] = vecs
|
| 472 |
+
# t_inertia2inertial = Pose(t_inertia2inertial)
|
| 473 |
+
|
| 474 |
+
# t_inertial2link = t_inertial2link * t_inertia2inertial
|
| 475 |
+
# scale3 = self.scale**3
|
| 476 |
+
# scale5 = self.scale**5
|
| 477 |
+
# link_builder.set_mass_and_inertia(
|
| 478 |
+
# mass * scale3, t_inertial2link, scale5 * eigs
|
| 479 |
+
# )
|
| 480 |
+
|
| 481 |
+
# go through each geometry of the body
|
| 482 |
+
for geo_count, geom in enumerate(body.findall("geom")):
|
| 483 |
+
self._build_geom(geom, link_builder, defaults)
|
| 484 |
+
|
| 485 |
+
def _parse_texture(self, texture: Element):
|
| 486 |
+
"""Parse MJCF textures to then be referenced by materials: https://mujoco.readthedocs.io/en/stable/XMLreference.html#asset-texture
|
| 487 |
+
|
| 488 |
+
NOTE:
|
| 489 |
+
- Procedural texture generation is currently not supported.
|
| 490 |
+
- Different texture types are not really supported
|
| 491 |
+
"""
|
| 492 |
+
name = texture.get("name")
|
| 493 |
+
file = texture.get("file")
|
| 494 |
+
self._textures[name] = MJCFTexture(
|
| 495 |
+
name=name,
|
| 496 |
+
type=texture.get("type"),
|
| 497 |
+
rgb1=texture.get("rgb1"),
|
| 498 |
+
rgb2=texture.get("rgb2"),
|
| 499 |
+
file=os.path.join(self._mesh_dir, file) if file else None,
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
def _parse_material(self, material: Element):
|
| 503 |
+
"""Parse MJCF materials in asset to sapien render materials"""
|
| 504 |
+
name = material.get("name")
|
| 505 |
+
texture = None
|
| 506 |
+
if material.get("texture") in self._textures:
|
| 507 |
+
texture = self._textures[material.get("texture")]
|
| 508 |
+
|
| 509 |
+
# NOTE: Procedural texture generation is currently not supported.
|
| 510 |
+
# Defaults from https://mujoco.readthedocs.io/en/stable/XMLreference.html#asset-material
|
| 511 |
+
em_val = _parse_float(material.attrib, "emission", 0)
|
| 512 |
+
rgba = np.array(_parse_vec(material.attrib, "rgba", [1, 1, 1, 1]))
|
| 513 |
+
render_material = RenderMaterial(
|
| 514 |
+
emission=[rgba[0] * em_val, rgba[1] * em_val, rgba[2] * em_val, 1],
|
| 515 |
+
base_color=rgba,
|
| 516 |
+
specular=_parse_float(material.attrib, "specular", 0),
|
| 517 |
+
# TODO (stao): double check below 2 properties are right
|
| 518 |
+
roughness=1 - _parse_float(material.attrib, "reflectance", 0),
|
| 519 |
+
metallic=_parse_float(material.attrib, "shininess", 0.5),
|
| 520 |
+
)
|
| 521 |
+
if texture is not None and texture.file is not None:
|
| 522 |
+
render_material.base_color_texture = RenderTexture2D(filename=texture.file)
|
| 523 |
+
self._materials[name] = render_material
|
| 524 |
+
|
| 525 |
+
def _parse_mesh(self, mesh: Element):
|
| 526 |
+
"""Parse MJCF mesh data in asset"""
|
| 527 |
+
# Vertex, normal, texcoord are not supported, file is required
|
| 528 |
+
file = mesh.get("file")
|
| 529 |
+
assert (
|
| 530 |
+
file is not None
|
| 531 |
+
), "Mesh file not provided. While Mujoco allows file to be optional, for loading into SAPIEN this is not optional"
|
| 532 |
+
name = mesh.get("name", os.path.splitext(file)[0])
|
| 533 |
+
self._meshes[name] = mesh
|
| 534 |
+
|
| 535 |
+
@property
|
| 536 |
+
def _root_default(self):
|
| 537 |
+
if "__root__" not in self._defaults:
|
| 538 |
+
return {}
|
| 539 |
+
return self._defaults["__root__"]
|
| 540 |
+
|
| 541 |
+
def _parse_default(self, node: Element, parent: Element):
|
| 542 |
+
"""Parse a MJCF default attribute. https://mujoco.readthedocs.io/en/stable/modeling.html#default-settings explains how it works"""
|
| 543 |
+
class_name = "__root__"
|
| 544 |
+
if node.tag == "default":
|
| 545 |
+
if "class" in node.attrib:
|
| 546 |
+
class_name = node.attrib["class"]
|
| 547 |
+
if parent is not None and "class" in parent.attrib:
|
| 548 |
+
self._defaults[class_name] = deepcopy(
|
| 549 |
+
self._defaults[parent.attrib["class"]]
|
| 550 |
+
)
|
| 551 |
+
else:
|
| 552 |
+
self._defaults[class_name] = {}
|
| 553 |
+
for child in node:
|
| 554 |
+
if child.tag == "default":
|
| 555 |
+
self._parse_default(child, node)
|
| 556 |
+
else:
|
| 557 |
+
if child.tag in self._defaults[class_name]:
|
| 558 |
+
self._defaults[class_name][child.tag] = _merge_attrib(
|
| 559 |
+
self._defaults[class_name][child.tag], child.attrib
|
| 560 |
+
)
|
| 561 |
+
else:
|
| 562 |
+
self._defaults[class_name][child.tag] = child.attrib
|
| 563 |
+
|
| 564 |
+
def _parse_body(
|
| 565 |
+
self,
|
| 566 |
+
body: Element,
|
| 567 |
+
parent: LinkBuilder,
|
| 568 |
+
incoming_defaults: dict,
|
| 569 |
+
builder: ArticulationBuilder,
|
| 570 |
+
):
|
| 571 |
+
body_class = body.get("childclass")
|
| 572 |
+
if body_class is None:
|
| 573 |
+
defaults = incoming_defaults
|
| 574 |
+
else:
|
| 575 |
+
for pattern in self.ignore_classes:
|
| 576 |
+
if re.match(pattern, body_class):
|
| 577 |
+
return
|
| 578 |
+
defaults = _merge_attrib(incoming_defaults, self._defaults[body_class])
|
| 579 |
+
|
| 580 |
+
if "body" in defaults:
|
| 581 |
+
body_attrib = _merge_attrib(defaults["body"], body.attrib)
|
| 582 |
+
else:
|
| 583 |
+
body_attrib = body.attrib
|
| 584 |
+
|
| 585 |
+
body_name = body_attrib["name"]
|
| 586 |
+
body_pos = _parse_vec(body_attrib, "pos", (0.0, 0.0, 0.0))
|
| 587 |
+
body_ori = _parse_orientation(
|
| 588 |
+
body_attrib, use_degrees=self._use_degrees, euler_seq=self._euler_seq
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
body_pos *= self.scale
|
| 592 |
+
body_pose = Pose(body_pos, q=body_ori)
|
| 593 |
+
|
| 594 |
+
link_builder = parent
|
| 595 |
+
|
| 596 |
+
joints = body.findall("joint")
|
| 597 |
+
# if body has no joints, it is a fixed joint
|
| 598 |
+
if len(joints) == 0:
|
| 599 |
+
joints = [ET.Element("joint", attrib=dict(type="fixed"))]
|
| 600 |
+
for i, joint in enumerate(joints):
|
| 601 |
+
# note there can be multiple joints here. We create some dummy links to simulate that
|
| 602 |
+
incoming_attributes = []
|
| 603 |
+
if "joint" in defaults:
|
| 604 |
+
incoming_attributes.append(defaults["joint"])
|
| 605 |
+
if "class" in joint.attrib:
|
| 606 |
+
incoming_attributes.append(
|
| 607 |
+
self._defaults[joint.attrib["class"]]["joint"]
|
| 608 |
+
)
|
| 609 |
+
incoming_attributes.append(joint.attrib)
|
| 610 |
+
joint_attrib = _merge_attrib(dict(), incoming_attributes)
|
| 611 |
+
|
| 612 |
+
# build the link
|
| 613 |
+
link_builder = builder.create_link_builder(parent=link_builder)
|
| 614 |
+
link_builder.set_joint_name(joint_attrib.get("name", ""))
|
| 615 |
+
if i == len(joints) - 1:
|
| 616 |
+
link_builder.set_name(f"{body_name}")
|
| 617 |
+
# the last link is the "real" one, the rest are dummy links to support multiple joints acting on a link
|
| 618 |
+
self._build_link(body, body_attrib, link_builder, defaults)
|
| 619 |
+
else:
|
| 620 |
+
link_builder.set_name(f"{body_name}_dummy_{i}")
|
| 621 |
+
self._link2builder[link_builder.name] = link_builder
|
| 622 |
+
|
| 623 |
+
joint_type = joint_attrib.get("type", "hinge")
|
| 624 |
+
joint_pos = np.array(_parse_vec(joint_attrib, "pos", [0, 0, 0]))
|
| 625 |
+
t_joint2parent = Pose()
|
| 626 |
+
if i == 0:
|
| 627 |
+
t_joint2parent = body_pose
|
| 628 |
+
|
| 629 |
+
friction = _parse_float(joint_attrib, "frictionloss", 0)
|
| 630 |
+
damping = _parse_float(joint_attrib, "damping", 0)
|
| 631 |
+
|
| 632 |
+
# compute joint axis and relative transformations
|
| 633 |
+
axis = _parse_vec(joint_attrib, "axis", [0.0, 0.0, 0.0])
|
| 634 |
+
axis_norm = np.linalg.norm(axis)
|
| 635 |
+
if axis_norm < 1e-3:
|
| 636 |
+
axis = np.array([1.0, 0.0, 0.0])
|
| 637 |
+
else:
|
| 638 |
+
axis /= axis_norm
|
| 639 |
+
|
| 640 |
+
if abs(axis @ [1.0, 0.0, 0.0]) > 0.9:
|
| 641 |
+
axis1 = np.cross(axis, [0.0, 0.0, 1.0])
|
| 642 |
+
axis1 /= np.linalg.norm(axis1)
|
| 643 |
+
else:
|
| 644 |
+
axis1 = np.cross(axis, [1.0, 0.0, 0.0])
|
| 645 |
+
axis1 /= np.linalg.norm(axis1)
|
| 646 |
+
axis2 = np.cross(axis, axis1)
|
| 647 |
+
t_axis2joint = np.eye(4)
|
| 648 |
+
t_axis2joint[:3, 3] = joint_pos
|
| 649 |
+
t_axis2joint[:3, 0] = axis
|
| 650 |
+
t_axis2joint[:3, 1] = axis1
|
| 651 |
+
t_axis2joint[:3, 2] = axis2
|
| 652 |
+
t_axis2joint = Pose(t_axis2joint)
|
| 653 |
+
t_axis2parent = t_joint2parent * t_axis2joint
|
| 654 |
+
|
| 655 |
+
limited = joint_attrib.get("limited", "auto")
|
| 656 |
+
if limited == "auto":
|
| 657 |
+
if "range" in joint_attrib:
|
| 658 |
+
limited = True
|
| 659 |
+
else:
|
| 660 |
+
limited = False
|
| 661 |
+
elif limited == "true":
|
| 662 |
+
limited = True
|
| 663 |
+
else:
|
| 664 |
+
limited = False
|
| 665 |
+
|
| 666 |
+
# set the joint properties to create it
|
| 667 |
+
if joint_type == "hinge":
|
| 668 |
+
if limited:
|
| 669 |
+
joint_limits = _parse_vec(joint_attrib, "range", [0, 0])
|
| 670 |
+
if self._use_degrees:
|
| 671 |
+
joint_limits = np.deg2rad(joint_limits)
|
| 672 |
+
link_builder.set_joint_properties(
|
| 673 |
+
"revolute_unwrapped",
|
| 674 |
+
[joint_limits],
|
| 675 |
+
t_axis2parent,
|
| 676 |
+
t_axis2joint,
|
| 677 |
+
friction,
|
| 678 |
+
damping,
|
| 679 |
+
)
|
| 680 |
+
else:
|
| 681 |
+
link_builder.set_joint_properties(
|
| 682 |
+
"revolute",
|
| 683 |
+
[[-np.inf, np.inf]],
|
| 684 |
+
t_axis2parent,
|
| 685 |
+
t_axis2joint,
|
| 686 |
+
friction,
|
| 687 |
+
damping,
|
| 688 |
+
)
|
| 689 |
+
elif joint_type == "slide":
|
| 690 |
+
if limited:
|
| 691 |
+
limits = [_parse_vec(joint_attrib, "range", [0, 0]) * self.scale]
|
| 692 |
+
else:
|
| 693 |
+
limits = [[-np.inf, np.inf]]
|
| 694 |
+
link_builder.set_joint_properties(
|
| 695 |
+
"prismatic",
|
| 696 |
+
limits,
|
| 697 |
+
t_axis2parent,
|
| 698 |
+
t_axis2joint,
|
| 699 |
+
friction,
|
| 700 |
+
damping,
|
| 701 |
+
)
|
| 702 |
+
elif joint_type == "fixed":
|
| 703 |
+
link_builder.set_joint_properties(
|
| 704 |
+
"fixed",
|
| 705 |
+
[],
|
| 706 |
+
t_axis2parent,
|
| 707 |
+
t_axis2joint,
|
| 708 |
+
friction,
|
| 709 |
+
damping,
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
# ensure adjacent links do not collide. Normally SAPIEN does this
|
| 713 |
+
# but we often create dummy links to support multiple joints between two link functionality
|
| 714 |
+
# that mujoco has so it must be done here.
|
| 715 |
+
if parent is not None:
|
| 716 |
+
parent.collision_groups[2] |= 1 << (self._group_count)
|
| 717 |
+
link_builder.collision_groups[2] |= 1 << (self._group_count)
|
| 718 |
+
self._group_count += 1
|
| 719 |
+
|
| 720 |
+
for child in body.findall("body"):
|
| 721 |
+
self._parse_body(child, link_builder, defaults, builder)
|
| 722 |
+
|
| 723 |
+
def _parse_constraint(self, constraint: Element):
|
| 724 |
+
joint_elems = []
|
| 725 |
+
for joint in constraint.findall("joint"):
|
| 726 |
+
joint_elems.append(joint)
|
| 727 |
+
return MimicJointRecord(
|
| 728 |
+
joint_elems[0].attrib["joint"],
|
| 729 |
+
joint_elems[1].attrib["joint"],
|
| 730 |
+
1,
|
| 731 |
+
0
|
| 732 |
+
# joint.mimic.multiplier,
|
| 733 |
+
# joint.mimic.offset,
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
def _parse_mjcf(
|
| 737 |
+
self, mjcf_string: str
|
| 738 |
+
) -> Tuple[List[ArticulationBuilder], List[ActorBuilder], None]:
|
| 739 |
+
"""Helper function for self.parse"""
|
| 740 |
+
xml: Element = ET.fromstring(mjcf_string.encode("utf-8"))
|
| 741 |
+
self.xml = xml
|
| 742 |
+
# handle includes
|
| 743 |
+
for include in xml.findall("include"):
|
| 744 |
+
include_file = include.attrib["file"]
|
| 745 |
+
with open(os.path.join(self.mjcf_dir, include_file), "r") as f:
|
| 746 |
+
include_file_str = f.read()
|
| 747 |
+
include_xml = ET.fromstring(include_file_str.encode("utf-8"))
|
| 748 |
+
for child in include_xml:
|
| 749 |
+
xml.append(child)
|
| 750 |
+
|
| 751 |
+
self._use_degrees = True # angles are in degrees by default
|
| 752 |
+
self._mjcf_options = DEFAULT_MJCF_OPTIONS
|
| 753 |
+
self._euler_seq = [1, 2, 3] # XYZ by default
|
| 754 |
+
|
| 755 |
+
### Parse compiler options ###
|
| 756 |
+
compiler = xml.find("compiler")
|
| 757 |
+
if compiler is not None:
|
| 758 |
+
self._use_degrees = (
|
| 759 |
+
compiler.attrib.get("angle", "degree").lower() == "degree"
|
| 760 |
+
)
|
| 761 |
+
self._euler_seq = [
|
| 762 |
+
"xyz".index(c) + 1
|
| 763 |
+
for c in compiler.attrib.get("eulerseq", "xyz").lower()
|
| 764 |
+
]
|
| 765 |
+
self._mesh_dir = compiler.attrib.get("meshdir", ".")
|
| 766 |
+
else:
|
| 767 |
+
self._mesh_dir = "."
|
| 768 |
+
self._mesh_dir = os.path.join(self.mjcf_dir, self._mesh_dir)
|
| 769 |
+
|
| 770 |
+
### Parse options/flags ###
|
| 771 |
+
option = xml.find("option")
|
| 772 |
+
if option is not None:
|
| 773 |
+
for flag in option.findall("flag"):
|
| 774 |
+
update_dict = dict()
|
| 775 |
+
for k, v in flag.attrib.items():
|
| 776 |
+
update_dict[k] = True if v == "enable" else False
|
| 777 |
+
self._mjcf_options.update(update_dict)
|
| 778 |
+
|
| 779 |
+
### Parse assets ###
|
| 780 |
+
for asset in xml.findall("asset"):
|
| 781 |
+
for texture in asset.findall("texture"):
|
| 782 |
+
self._parse_texture(texture)
|
| 783 |
+
for material in asset.findall("material"):
|
| 784 |
+
self._parse_material(material)
|
| 785 |
+
for mesh in asset.findall("mesh"):
|
| 786 |
+
self._parse_mesh(mesh)
|
| 787 |
+
|
| 788 |
+
### Parse defaults ###
|
| 789 |
+
for default in xml.findall("default"):
|
| 790 |
+
self._parse_default(default, None)
|
| 791 |
+
|
| 792 |
+
### Parse Kinematic Trees / Articulations in World Body ###
|
| 793 |
+
|
| 794 |
+
# NOTE (stao): For now we assume there is only one articulation. Some setups like Aloha 2 are technically 2 articulations
|
| 795 |
+
# but you can treat it as a single one anyway
|
| 796 |
+
articulation_builders: List[ArticulationBuilder] = []
|
| 797 |
+
actor_builders: List[ActorBuilder] = []
|
| 798 |
+
for i, body in enumerate(xml.find("worldbody").findall("body")):
|
| 799 |
+
# determine first if this body is really an articulation or a actor
|
| 800 |
+
has_freejoint = body.find("freejoint") is not None
|
| 801 |
+
|
| 802 |
+
def has_joint(body):
|
| 803 |
+
if body.find("joint") is not None:
|
| 804 |
+
return True
|
| 805 |
+
for child in body.findall("body"):
|
| 806 |
+
if has_joint(child):
|
| 807 |
+
return True
|
| 808 |
+
return False
|
| 809 |
+
|
| 810 |
+
is_articulation = has_joint(body) or has_freejoint
|
| 811 |
+
# <body> tag refers to an artciulation in physx only if there is another body tag inside it
|
| 812 |
+
if is_articulation:
|
| 813 |
+
builder = self.scene.create_articulation_builder()
|
| 814 |
+
articulation_builders.append(builder)
|
| 815 |
+
dummy_root_link = builder.create_link_builder(None)
|
| 816 |
+
dummy_root_link.name = f"dummy_root_{i}"
|
| 817 |
+
|
| 818 |
+
# Check if the body tag only contains another body tag and nothing else
|
| 819 |
+
body_children = list(body)
|
| 820 |
+
tag_counts = defaultdict(int)
|
| 821 |
+
for child in body_children:
|
| 822 |
+
tag_counts[child.tag] += 1
|
| 823 |
+
if (
|
| 824 |
+
tag_counts["body"] == 1
|
| 825 |
+
and "geom" not in tag_counts
|
| 826 |
+
and "joint" not in tag_counts
|
| 827 |
+
):
|
| 828 |
+
# If so, skip the current body and continue with its child
|
| 829 |
+
body = body.find("body")
|
| 830 |
+
self._parse_body(body, dummy_root_link, self._root_default, builder)
|
| 831 |
+
|
| 832 |
+
# handle free joints
|
| 833 |
+
fix_root_link = not has_freejoint
|
| 834 |
+
if fix_root_link:
|
| 835 |
+
dummy_root_link.set_joint_properties(
|
| 836 |
+
type="fixed",
|
| 837 |
+
limits=None,
|
| 838 |
+
pose_in_parent=Pose(),
|
| 839 |
+
pose_in_child=Pose(),
|
| 840 |
+
)
|
| 841 |
+
else:
|
| 842 |
+
builder = self.scene.create_actor_builder()
|
| 843 |
+
body_type = "dynamic" if has_freejoint else "static"
|
| 844 |
+
actor_builders.append(builder)
|
| 845 |
+
# NOTE that mujoco supports nested body tags to define groups of geoms
|
| 846 |
+
cur_body = body
|
| 847 |
+
while cur_body is not None:
|
| 848 |
+
for i, geom in enumerate(cur_body.findall("geom")):
|
| 849 |
+
self._build_geom(geom, builder, self._root_default)
|
| 850 |
+
builder.set_name(geom.get("name", ""))
|
| 851 |
+
builder.set_physx_body_type(body_type)
|
| 852 |
+
cur_body = cur_body.find("body")
|
| 853 |
+
|
| 854 |
+
### Parse geoms in World Body ###
|
| 855 |
+
# These can't have freejoints so they can't be dynamic
|
| 856 |
+
for i, geom in enumerate(xml.find("worldbody").findall("geom")):
|
| 857 |
+
builder = self.scene.create_actor_builder()
|
| 858 |
+
actor_builders.append(builder)
|
| 859 |
+
self._build_geom(geom, builder, self._root_default)
|
| 860 |
+
builder.set_name(geom.get("name", ""))
|
| 861 |
+
builder.set_physx_body_type("static")
|
| 862 |
+
|
| 863 |
+
### Parse contact and exclusions ###
|
| 864 |
+
for contact in xml.findall("contact"):
|
| 865 |
+
# TODO
|
| 866 |
+
pass
|
| 867 |
+
|
| 868 |
+
### Parse equality constraints ###
|
| 869 |
+
# tendon = xml.find("tendon")
|
| 870 |
+
# if tendon is not None:
|
| 871 |
+
# # TODO (stao): unclear if this actually works
|
| 872 |
+
# for constraint in tendon.findall("fixed"):
|
| 873 |
+
# record = self._parse_constraint(constraint)
|
| 874 |
+
# builder.mimic_joint_records.append(record)
|
| 875 |
+
|
| 876 |
+
if not self._mjcf_options["contact"]:
|
| 877 |
+
# means to disable all contacts
|
| 878 |
+
for actor in actor_builders:
|
| 879 |
+
actor.collision_groups[2] |= 1 << 1
|
| 880 |
+
for art in articulation_builders:
|
| 881 |
+
for link in art.link_builders:
|
| 882 |
+
link.collision_groups[2] |= 1 << 1
|
| 883 |
+
|
| 884 |
+
return articulation_builders, actor_builders, []
|
| 885 |
+
|
| 886 |
+
def parse(self, mjcf_file: str, package_dir=None):
|
| 887 |
+
"""Parses a given MJCF file into articulation builders and actor builders and sensor configs"""
|
| 888 |
+
self.package_dir = package_dir
|
| 889 |
+
self.mjcf_dir = os.path.dirname(mjcf_file)
|
| 890 |
+
|
| 891 |
+
with open(mjcf_file, "r") as f:
|
| 892 |
+
mjcf_string = f.read()
|
| 893 |
+
|
| 894 |
+
return self._parse_mjcf(mjcf_string)
|
| 895 |
+
|
| 896 |
+
def load(self, mjcf_file: str, package_dir=None):
|
| 897 |
+
"""Parses a given mjcf .xml file and builds all articulations and actors"""
|
| 898 |
+
articulation_builders, actor_builders, cameras = self.parse(
|
| 899 |
+
mjcf_file, package_dir
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
articulations: List[PhysxArticulation] = []
|
| 903 |
+
for b in articulation_builders:
|
| 904 |
+
articulations.append(b.build())
|
| 905 |
+
|
| 906 |
+
actors = []
|
| 907 |
+
for b in actor_builders:
|
| 908 |
+
actors.append(b.build())
|
| 909 |
+
|
| 910 |
+
# TODO (stao): how does mjcf specify sensors?
|
| 911 |
+
# name2entity = dict()
|
| 912 |
+
# for a in articulations:
|
| 913 |
+
# for l in a.links:
|
| 914 |
+
# name2entity[l.name] = l.entity
|
| 915 |
+
|
| 916 |
+
# for a in actors:
|
| 917 |
+
# name2entity[a.name] = a
|
| 918 |
+
return articulations[0]
|
| 919 |
+
|
| 920 |
+
# TODO (stao): function to also load the scene in?
|
| 921 |
+
# TODO (stao): function to load camera configs?
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/actor_builder.py
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import sapien
|
| 7 |
+
import sapien.physx as physx
|
| 8 |
+
import torch
|
| 9 |
+
from sapien import ActorBuilder as SAPIENActorBuilder
|
| 10 |
+
from sapien.wrapper.coacd import do_coacd
|
| 11 |
+
|
| 12 |
+
from mani_skill import logger
|
| 13 |
+
from mani_skill.utils import common
|
| 14 |
+
from mani_skill.utils.structs.actor import Actor
|
| 15 |
+
from mani_skill.utils.structs.pose import Pose, to_sapien_pose
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ActorBuilder(SAPIENActorBuilder):
|
| 22 |
+
"""
|
| 23 |
+
ActorBuilder class to flexibly build actors in both CPU and GPU simulations.
|
| 24 |
+
This directly inherits the original flexible ActorBuilder from sapien and changes the build functions to support a batch of scenes and return a batch of Actors
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
scene: ManiSkillScene
|
| 28 |
+
|
| 29 |
+
def __init__(self):
|
| 30 |
+
super().__init__()
|
| 31 |
+
self.initial_pose = None
|
| 32 |
+
self.scene_idxs = None
|
| 33 |
+
self._allow_overlapping_plane_collisions = False
|
| 34 |
+
self._plane_collision_poses = set()
|
| 35 |
+
self._procedural_shapes = []
|
| 36 |
+
"""procedurally generated shapes to attach"""
|
| 37 |
+
|
| 38 |
+
def set_scene_idxs(
|
| 39 |
+
self,
|
| 40 |
+
scene_idxs: Optional[
|
| 41 |
+
Union[List[int], Sequence[int], torch.Tensor, np.ndarray]
|
| 42 |
+
] = None,
|
| 43 |
+
):
|
| 44 |
+
"""
|
| 45 |
+
Set a list of scene indices to build this object in. Cannot be used in conjunction with scene mask
|
| 46 |
+
"""
|
| 47 |
+
self.scene_idxs = scene_idxs
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def set_allow_overlapping_plane_collisions(self, v: bool):
|
| 51 |
+
"""Set whether or not to permit allowing overlapping plane collisions. In general if you are creating an Actor with a plane collision that is parallelized across multiple
|
| 52 |
+
sub-scenes, you only need one of those collision shapes. If you add multiple, it will cause the simulation to slow down significantly. By default this is set to False
|
| 53 |
+
"""
|
| 54 |
+
self._allow_overlapping_plane_collisions = v
|
| 55 |
+
return self
|
| 56 |
+
|
| 57 |
+
def build_physx_component(self, link_parent=None):
|
| 58 |
+
for r in self.collision_records:
|
| 59 |
+
assert isinstance(r.material, physx.PhysxMaterial)
|
| 60 |
+
|
| 61 |
+
if self.physx_body_type == "dynamic":
|
| 62 |
+
component = physx.PhysxRigidDynamicComponent()
|
| 63 |
+
elif self.physx_body_type == "kinematic":
|
| 64 |
+
component = physx.PhysxRigidDynamicComponent()
|
| 65 |
+
component.kinematic = True
|
| 66 |
+
elif self.physx_body_type == "static":
|
| 67 |
+
component = physx.PhysxRigidStaticComponent()
|
| 68 |
+
elif self.physx_body_type == "link":
|
| 69 |
+
component = physx.PhysxArticulationLinkComponent(link_parent)
|
| 70 |
+
else:
|
| 71 |
+
raise Exception(f"invalid physx body type [{self.physx_body_type}]")
|
| 72 |
+
|
| 73 |
+
for r in self.collision_records:
|
| 74 |
+
try:
|
| 75 |
+
if r.type == "plane":
|
| 76 |
+
# skip adding plane collisions if we already added one.
|
| 77 |
+
pose_key = (tuple(r.pose.p), tuple(r.pose.q))
|
| 78 |
+
if (
|
| 79 |
+
self._allow_overlapping_plane_collisions
|
| 80 |
+
or pose_key not in self._plane_collision_poses
|
| 81 |
+
):
|
| 82 |
+
shape = physx.PhysxCollisionShapePlane(
|
| 83 |
+
material=r.material,
|
| 84 |
+
)
|
| 85 |
+
shapes = [shape]
|
| 86 |
+
self._plane_collision_poses.add(pose_key)
|
| 87 |
+
else:
|
| 88 |
+
continue
|
| 89 |
+
elif r.type == "box":
|
| 90 |
+
shape = physx.PhysxCollisionShapeBox(
|
| 91 |
+
half_size=r.scale, material=r.material
|
| 92 |
+
)
|
| 93 |
+
shapes = [shape]
|
| 94 |
+
elif r.type == "capsule":
|
| 95 |
+
shape = physx.PhysxCollisionShapeCapsule(
|
| 96 |
+
radius=r.radius,
|
| 97 |
+
half_length=r.length,
|
| 98 |
+
material=r.material,
|
| 99 |
+
)
|
| 100 |
+
shapes = [shape]
|
| 101 |
+
elif r.type == "cylinder":
|
| 102 |
+
shape = physx.PhysxCollisionShapeCylinder(
|
| 103 |
+
radius=r.radius,
|
| 104 |
+
half_length=r.length,
|
| 105 |
+
material=r.material,
|
| 106 |
+
)
|
| 107 |
+
shapes = [shape]
|
| 108 |
+
elif r.type == "sphere":
|
| 109 |
+
shape = physx.PhysxCollisionShapeSphere(
|
| 110 |
+
radius=r.radius,
|
| 111 |
+
material=r.material,
|
| 112 |
+
)
|
| 113 |
+
shapes = [shape]
|
| 114 |
+
elif r.type == "convex_mesh":
|
| 115 |
+
shape = physx.PhysxCollisionShapeConvexMesh(
|
| 116 |
+
filename=r.filename,
|
| 117 |
+
scale=r.scale,
|
| 118 |
+
material=r.material,
|
| 119 |
+
)
|
| 120 |
+
shapes = [shape]
|
| 121 |
+
elif r.type == "nonconvex_mesh":
|
| 122 |
+
shape = physx.PhysxCollisionShapeTriangleMesh(
|
| 123 |
+
filename=r.filename,
|
| 124 |
+
scale=r.scale,
|
| 125 |
+
material=r.material,
|
| 126 |
+
)
|
| 127 |
+
shapes = [shape]
|
| 128 |
+
elif r.type == "multiple_convex_meshes":
|
| 129 |
+
if r.decomposition == "coacd":
|
| 130 |
+
params = r.decomposition_params
|
| 131 |
+
if params is None:
|
| 132 |
+
params = dict()
|
| 133 |
+
|
| 134 |
+
filename = do_coacd(r.filename, **params)
|
| 135 |
+
else:
|
| 136 |
+
filename = r.filename
|
| 137 |
+
|
| 138 |
+
shapes = physx.PhysxCollisionShapeConvexMesh.load_multiple(
|
| 139 |
+
filename=filename,
|
| 140 |
+
scale=r.scale,
|
| 141 |
+
material=r.material,
|
| 142 |
+
)
|
| 143 |
+
else:
|
| 144 |
+
raise RuntimeError(f"invalid collision shape type [{r.type}]")
|
| 145 |
+
except RuntimeError:
|
| 146 |
+
# ignore runtime error (e.g., failed to cooke mesh)
|
| 147 |
+
continue
|
| 148 |
+
|
| 149 |
+
for shape in shapes:
|
| 150 |
+
shape.local_pose = r.pose
|
| 151 |
+
shape.set_collision_groups(self.collision_groups)
|
| 152 |
+
shape.set_density(r.density)
|
| 153 |
+
shape.set_patch_radius(r.patch_radius)
|
| 154 |
+
shape.set_min_patch_radius(r.min_patch_radius)
|
| 155 |
+
component.attach(shape)
|
| 156 |
+
|
| 157 |
+
if hasattr(self, "_auto_inertial"):
|
| 158 |
+
if not self._auto_inertial and self.physx_body_type != "kinematic":
|
| 159 |
+
component.mass = self._mass
|
| 160 |
+
component.cmass_local_pose = self._cmass_local_pose
|
| 161 |
+
component.inertia = self._inertia
|
| 162 |
+
|
| 163 |
+
return component
|
| 164 |
+
|
| 165 |
+
def build_dynamic(self, name):
|
| 166 |
+
self.set_physx_body_type("dynamic")
|
| 167 |
+
return self.build(name=name)
|
| 168 |
+
|
| 169 |
+
def build_kinematic(self, name):
|
| 170 |
+
self.set_physx_body_type("kinematic")
|
| 171 |
+
return self.build(name=name)
|
| 172 |
+
|
| 173 |
+
def build_static(self, name):
|
| 174 |
+
self.set_physx_body_type("static")
|
| 175 |
+
return self.build(name=name)
|
| 176 |
+
|
| 177 |
+
def build_entity(self):
|
| 178 |
+
"""
|
| 179 |
+
build the raw sapien entity. Modifies original SAPIEN function to accept new procedurally generated render components
|
| 180 |
+
"""
|
| 181 |
+
entity = sapien.Entity()
|
| 182 |
+
if self.visual_records or len(self._procedural_shapes) > 0:
|
| 183 |
+
render_component = self.build_render_component()
|
| 184 |
+
for shape in self._procedural_shapes:
|
| 185 |
+
render_component.attach(shape)
|
| 186 |
+
entity.add_component(render_component)
|
| 187 |
+
entity.add_component(self.build_physx_component())
|
| 188 |
+
entity.name = self.name
|
| 189 |
+
return entity
|
| 190 |
+
|
| 191 |
+
def build(self, name):
|
| 192 |
+
"""
|
| 193 |
+
Build the actor with the given name.
|
| 194 |
+
|
| 195 |
+
Different to the original SAPIEN API, a unique name is required here.
|
| 196 |
+
"""
|
| 197 |
+
self.set_name(name)
|
| 198 |
+
|
| 199 |
+
assert (
|
| 200 |
+
self.name is not None
|
| 201 |
+
and self.name != ""
|
| 202 |
+
and self.name not in self.scene.actors
|
| 203 |
+
), "built actors in ManiSkill must have unique names and cannot be None or empty strings"
|
| 204 |
+
|
| 205 |
+
num_actors = self.scene.num_envs
|
| 206 |
+
if self.scene_idxs is not None:
|
| 207 |
+
self.scene_idxs = common.to_tensor(
|
| 208 |
+
self.scene_idxs, device=self.scene.device
|
| 209 |
+
).to(torch.int)
|
| 210 |
+
else:
|
| 211 |
+
self.scene_idxs = torch.arange((self.scene.num_envs), dtype=int)
|
| 212 |
+
num_actors = len(self.scene_idxs)
|
| 213 |
+
|
| 214 |
+
if self.initial_pose is None:
|
| 215 |
+
logger.warn(
|
| 216 |
+
f"No initial pose set for actor builder of {self.name}, setting to default pose q=[1,0,0,0], p=[0,0,0]. Not setting reasonable initial poses may slow down simulation, see https://github.com/haosulab/ManiSkill/issues/421."
|
| 217 |
+
)
|
| 218 |
+
self.initial_pose = Pose.create(sapien.Pose())
|
| 219 |
+
else:
|
| 220 |
+
self.initial_pose = Pose.create(self.initial_pose, device=self.scene.device)
|
| 221 |
+
|
| 222 |
+
initial_pose_b = self.initial_pose.raw_pose.shape[0]
|
| 223 |
+
assert initial_pose_b == 1 or initial_pose_b == num_actors
|
| 224 |
+
initial_pose_np = common.to_numpy(self.initial_pose.raw_pose)
|
| 225 |
+
if initial_pose_b == 1:
|
| 226 |
+
initial_pose_np = initial_pose_np.repeat(num_actors, axis=0)
|
| 227 |
+
if self.scene.parallel_in_single_scene:
|
| 228 |
+
initial_pose_np[:, :3] += self.scene.scene_offsets_np[
|
| 229 |
+
common.to_numpy(self.scene_idxs)
|
| 230 |
+
]
|
| 231 |
+
entities = []
|
| 232 |
+
|
| 233 |
+
for i, scene_idx in enumerate(self.scene_idxs):
|
| 234 |
+
if self.scene.parallel_in_single_scene:
|
| 235 |
+
sub_scene = self.scene.sub_scenes[0]
|
| 236 |
+
else:
|
| 237 |
+
sub_scene = self.scene.sub_scenes[scene_idx]
|
| 238 |
+
entity = self.build_entity()
|
| 239 |
+
# prepend scene idx to entity name to indicate which sub-scene it is in
|
| 240 |
+
entity.name = f"scene-{scene_idx}_{self.name}"
|
| 241 |
+
# set pose before adding to scene
|
| 242 |
+
entity.pose = to_sapien_pose(initial_pose_np[i])
|
| 243 |
+
sub_scene.add_entity(entity)
|
| 244 |
+
entities.append(entity)
|
| 245 |
+
actor = Actor.create_from_entities(entities, self.scene, self.scene_idxs)
|
| 246 |
+
|
| 247 |
+
# if it is a static body type and this is a GPU sim but we are given a single initial pose, we repeat it for the purposes of observations
|
| 248 |
+
if (
|
| 249 |
+
self.physx_body_type == "static"
|
| 250 |
+
and initial_pose_b == 1
|
| 251 |
+
and self.scene.gpu_sim_enabled
|
| 252 |
+
):
|
| 253 |
+
actor.initial_pose = Pose.create(
|
| 254 |
+
self.initial_pose.raw_pose.repeat(num_actors, 1)
|
| 255 |
+
)
|
| 256 |
+
else:
|
| 257 |
+
actor.initial_pose = self.initial_pose
|
| 258 |
+
self.scene.actors[self.name] = actor
|
| 259 |
+
self.scene.add_to_state_dict_registry(actor)
|
| 260 |
+
return actor
|
| 261 |
+
|
| 262 |
+
"""
|
| 263 |
+
additional procedurally generated visual meshes
|
| 264 |
+
"""
|
| 265 |
+
|
| 266 |
+
def add_plane_repeated_visual(
|
| 267 |
+
self,
|
| 268 |
+
pose: sapien.Pose = sapien.Pose(),
|
| 269 |
+
half_size: List[float] = [5, 5],
|
| 270 |
+
mat: sapien.render.RenderMaterial = None,
|
| 271 |
+
texture_repeat: List[float] = [1, 1],
|
| 272 |
+
):
|
| 273 |
+
"""Procedurally generateds a repeated 2D texture. Works similarly to https://mujoco.readthedocs.io/en/stable/XMLreference.html#asset-material-texrepeat
|
| 274 |
+
|
| 275 |
+
currently this always adds a back face
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
texture_repeat: the number of times to repeat the texture in each direction.
|
| 279 |
+
"""
|
| 280 |
+
floor_width = half_size[0] * 2
|
| 281 |
+
floor_length = half_size[1] * 2
|
| 282 |
+
floor_width = int(np.ceil(floor_width))
|
| 283 |
+
floor_length = int(np.ceil(floor_length))
|
| 284 |
+
|
| 285 |
+
# generate a grid of right triangles that form 1x1 meter squares centered at (0, 0, 0)
|
| 286 |
+
# for squares on the edge we cut them off
|
| 287 |
+
|
| 288 |
+
# floor_length = floor_width if floor_length is None else floor_length
|
| 289 |
+
num_verts = (floor_width + 1) * (floor_length + 1)
|
| 290 |
+
vertices = np.zeros((int(num_verts), 3))
|
| 291 |
+
floor_half_width = floor_width / 2
|
| 292 |
+
floor_half_length = floor_length / 2
|
| 293 |
+
xrange = np.arange(start=-floor_half_width, stop=floor_half_width + 1)
|
| 294 |
+
yrange = np.arange(start=-floor_half_length, stop=floor_half_length + 1)
|
| 295 |
+
xx, yy = np.meshgrid(xrange, yrange)
|
| 296 |
+
xys = np.stack((xx, yy), axis=2).reshape(-1, 2)
|
| 297 |
+
vertices[:, 0] = xys[:, 0]
|
| 298 |
+
vertices[:, 1] = xys[:, 1]
|
| 299 |
+
normals = np.zeros((len(vertices), 3))
|
| 300 |
+
normals[:, 2] = -1
|
| 301 |
+
|
| 302 |
+
# the number of times the texture repeats essentially.
|
| 303 |
+
uvs = np.zeros((len(vertices), 2))
|
| 304 |
+
# texture_repeat = [1,1]
|
| 305 |
+
uvs[:, 0] = xys[:, 0] * texture_repeat[0]
|
| 306 |
+
uvs[:, 1] = xys[:, 1] * texture_repeat[1]
|
| 307 |
+
|
| 308 |
+
# TODO: This is fast but still two for loops which is a little annoying
|
| 309 |
+
triangles = []
|
| 310 |
+
for i in range(floor_length):
|
| 311 |
+
triangles.append(
|
| 312 |
+
np.stack(
|
| 313 |
+
[
|
| 314 |
+
np.arange(floor_width) + i * (floor_width + 1),
|
| 315 |
+
np.arange(floor_width)
|
| 316 |
+
+ 1
|
| 317 |
+
+ floor_width
|
| 318 |
+
+ i * (floor_width + 1),
|
| 319 |
+
np.arange(floor_width) + 1 + i * (floor_width + 1),
|
| 320 |
+
],
|
| 321 |
+
axis=1,
|
| 322 |
+
)
|
| 323 |
+
)
|
| 324 |
+
for i in range(floor_length):
|
| 325 |
+
triangles.append(
|
| 326 |
+
np.stack(
|
| 327 |
+
[
|
| 328 |
+
np.arange(floor_width)
|
| 329 |
+
+ 1
|
| 330 |
+
+ floor_width
|
| 331 |
+
+ i * (floor_width + 1),
|
| 332 |
+
np.arange(floor_width)
|
| 333 |
+
+ floor_width
|
| 334 |
+
+ 2
|
| 335 |
+
+ i * (floor_width + 1),
|
| 336 |
+
np.arange(floor_width) + 1 + i * (floor_width + 1),
|
| 337 |
+
],
|
| 338 |
+
axis=1,
|
| 339 |
+
)
|
| 340 |
+
)
|
| 341 |
+
triangles = np.concatenate(triangles)
|
| 342 |
+
|
| 343 |
+
# vertices: (N, 3)
|
| 344 |
+
# triangles: (M, 3) of index triplets referencing vertices
|
| 345 |
+
# normals: (N, 3) normals of the vertices. These should all face the same direction
|
| 346 |
+
# uvs: (N, 2) uv coordinates for the vertices
|
| 347 |
+
if half_size[0] < floor_half_width:
|
| 348 |
+
diff = floor_half_width - half_size[0]
|
| 349 |
+
for sign in [-1, 1]:
|
| 350 |
+
mask = vertices[:, 0] == floor_half_width * sign
|
| 351 |
+
vertices[mask, 0] = half_size[0] * sign
|
| 352 |
+
uvs[mask, 0] -= 1 * diff * sign * texture_repeat[0]
|
| 353 |
+
|
| 354 |
+
if half_size[1] < floor_half_length:
|
| 355 |
+
diff = floor_half_length - half_size[1]
|
| 356 |
+
for sign in [-1, 1]:
|
| 357 |
+
mask = vertices[:, 1] == floor_half_length * sign
|
| 358 |
+
vertices[mask, 1] = half_size[1] * sign
|
| 359 |
+
uvs[mask, 1] -= diff * sign * texture_repeat[1]
|
| 360 |
+
shape = sapien.render.RenderShapeTriangleMesh(
|
| 361 |
+
vertices=vertices,
|
| 362 |
+
triangles=triangles,
|
| 363 |
+
normals=normals,
|
| 364 |
+
uvs=uvs,
|
| 365 |
+
material=mat,
|
| 366 |
+
)
|
| 367 |
+
shape.local_pose = pose
|
| 368 |
+
self._procedural_shapes.append(shape)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/ground.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Useful utilities for creating the ground of a scene
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import os.path as osp
|
| 8 |
+
from typing import TYPE_CHECKING
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import sapien
|
| 12 |
+
import sapien.render
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def build_ground(
|
| 19 |
+
scene: ManiSkillScene,
|
| 20 |
+
floor_width: int = 100,
|
| 21 |
+
floor_length: int = None,
|
| 22 |
+
xy_origin: tuple = (0, 0),
|
| 23 |
+
altitude=0,
|
| 24 |
+
name="ground",
|
| 25 |
+
texture_file=osp.join(osp.dirname(__file__), "assets/grid_texture.png"),
|
| 26 |
+
texture_square_len=4,
|
| 27 |
+
mipmap_levels=4,
|
| 28 |
+
add_collision=True,
|
| 29 |
+
):
|
| 30 |
+
"""Procedurally creates a checkered floor given a floor width in meters.
|
| 31 |
+
|
| 32 |
+
Note that this function runs slower as floor width becomes larger, but in general this function takes no more than 0.05s to run
|
| 33 |
+
and usually is never run more than once as it is for building a scene, not loading.
|
| 34 |
+
"""
|
| 35 |
+
ground = scene.create_actor_builder()
|
| 36 |
+
if add_collision:
|
| 37 |
+
ground.add_plane_collision(
|
| 38 |
+
sapien.Pose(p=[0, 0, altitude], q=[0.7071068, 0, -0.7071068, 0]),
|
| 39 |
+
)
|
| 40 |
+
ground.initial_pose = sapien.Pose(p=[0, 0, 0], q=[1, 0, 0, 0])
|
| 41 |
+
if scene.parallel_in_single_scene:
|
| 42 |
+
# when building a ground and using a parallel render in the GUI, we want to only build one ground visual+collision plane
|
| 43 |
+
ground.set_scene_idxs([0])
|
| 44 |
+
actor = ground.build_static(name=name)
|
| 45 |
+
|
| 46 |
+
# generate a grid of right triangles that form 1x1 meter squares centered at (0, 0, 0)
|
| 47 |
+
floor_length = floor_width if floor_length is None else floor_length
|
| 48 |
+
num_verts = (floor_width + 1) * (floor_length + 1)
|
| 49 |
+
vertices = np.zeros((num_verts, 3))
|
| 50 |
+
floor_half_width = floor_width / 2
|
| 51 |
+
floor_half_length = floor_length / 2
|
| 52 |
+
xrange = np.arange(start=-floor_half_width, stop=floor_half_width + 1)
|
| 53 |
+
yrange = np.arange(start=-floor_half_length, stop=floor_half_length + 1)
|
| 54 |
+
xx, yy = np.meshgrid(xrange, yrange)
|
| 55 |
+
xys = np.stack((yy, xx), axis=2).reshape(-1, 2)
|
| 56 |
+
vertices[:, 0] = xys[:, 0] + xy_origin[0]
|
| 57 |
+
vertices[:, 1] = xys[:, 1] + xy_origin[1]
|
| 58 |
+
vertices[:, 2] = altitude
|
| 59 |
+
normals = np.zeros((len(vertices), 3))
|
| 60 |
+
normals[:, 2] = 1
|
| 61 |
+
|
| 62 |
+
mat = sapien.render.RenderMaterial()
|
| 63 |
+
mat.base_color_texture = sapien.render.RenderTexture2D(
|
| 64 |
+
filename=texture_file,
|
| 65 |
+
mipmap_levels=mipmap_levels,
|
| 66 |
+
)
|
| 67 |
+
uv_scale = floor_width / texture_square_len
|
| 68 |
+
uvs = np.zeros((len(vertices), 2))
|
| 69 |
+
uvs[:, 0] = (xys[:, 0] * uv_scale + floor_half_width) / floor_width
|
| 70 |
+
uvs[:, 1] = (xys[:, 1] * uv_scale + floor_half_width) / floor_width
|
| 71 |
+
|
| 72 |
+
# TODO: This is fast but still two for loops which is a little annoying
|
| 73 |
+
triangles = []
|
| 74 |
+
for i in range(floor_length):
|
| 75 |
+
triangles.append(
|
| 76 |
+
np.stack(
|
| 77 |
+
[
|
| 78 |
+
np.arange(floor_width) + i * (floor_width + 1),
|
| 79 |
+
np.arange(floor_width) + 1 + floor_width + i * (floor_width + 1),
|
| 80 |
+
np.arange(floor_width) + 1 + i * (floor_width + 1),
|
| 81 |
+
],
|
| 82 |
+
axis=1,
|
| 83 |
+
)
|
| 84 |
+
)
|
| 85 |
+
for i in range(floor_length):
|
| 86 |
+
triangles.append(
|
| 87 |
+
np.stack(
|
| 88 |
+
[
|
| 89 |
+
np.arange(floor_width) + 1 + floor_width + i * (floor_width + 1),
|
| 90 |
+
np.arange(floor_width) + floor_width + 2 + i * (floor_width + 1),
|
| 91 |
+
np.arange(floor_width) + 1 + i * (floor_width + 1),
|
| 92 |
+
],
|
| 93 |
+
axis=1,
|
| 94 |
+
)
|
| 95 |
+
)
|
| 96 |
+
triangles = np.concatenate(triangles)
|
| 97 |
+
|
| 98 |
+
shape = sapien.render.RenderShapeTriangleMesh(
|
| 99 |
+
vertices=vertices, triangles=triangles, normals=normals, uvs=uvs, material=mat
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
for obj in actor._objs:
|
| 103 |
+
floor_comp = sapien.render.RenderBodyComponent()
|
| 104 |
+
floor_comp.attach(shape)
|
| 105 |
+
obj.add_component(floor_comp)
|
| 106 |
+
|
| 107 |
+
return actor
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/mjcf_loader.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, Any, List, TypedDict
|
| 4 |
+
|
| 5 |
+
from mani_skill.utils.building.actor_builder import ActorBuilder
|
| 6 |
+
from mani_skill.utils.building.articulation_builder import ArticulationBuilder
|
| 7 |
+
from mani_skill.utils.structs import Actor, Articulation
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 11 |
+
|
| 12 |
+
from ._mjcf_loader import MJCFLoader as SAPIENMJCFLoader
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ParsedMJCFData(TypedDict):
|
| 16 |
+
articulation_builders: List[ArticulationBuilder]
|
| 17 |
+
actor_builders: List[ActorBuilder]
|
| 18 |
+
cameras: List[Any]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MJCFLoader(SAPIENMJCFLoader):
|
| 22 |
+
"""
|
| 23 |
+
Wrapper for the SAPIEN MJCF Loader to support easy parallelization
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
scene: ManiSkillScene
|
| 27 |
+
name: str = None
|
| 28 |
+
disable_self_collisions: bool = False
|
| 29 |
+
|
| 30 |
+
def parse(self, mjcf_file, package_dir=None) -> ParsedMJCFData:
|
| 31 |
+
articulation_builders, actor_builders, cameras = super().parse(
|
| 32 |
+
mjcf_file, package_dir
|
| 33 |
+
)
|
| 34 |
+
for i, a in enumerate(articulation_builders):
|
| 35 |
+
if len(articulation_builders) > 1:
|
| 36 |
+
a.set_name(f"{self.name}-articulation-{i}")
|
| 37 |
+
else:
|
| 38 |
+
a.set_name(f"{self.name}")
|
| 39 |
+
if self.disable_self_collisions:
|
| 40 |
+
for l in a.link_builders:
|
| 41 |
+
# NOTE (stao): Currently this may not be working as intended
|
| 42 |
+
l.collision_groups[2] |= 1 << 29
|
| 43 |
+
for i, b in enumerate(actor_builders):
|
| 44 |
+
b.set_name(f"{self.name}-actor-{i}")
|
| 45 |
+
return dict(
|
| 46 |
+
articulation_builders=articulation_builders,
|
| 47 |
+
actor_builders=actor_builders,
|
| 48 |
+
cameras=cameras,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def load(
|
| 52 |
+
self,
|
| 53 |
+
mjcf_file: str,
|
| 54 |
+
package_dir=None,
|
| 55 |
+
name=None,
|
| 56 |
+
scene_idxs=None,
|
| 57 |
+
) -> Articulation:
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
urdf_file: filename for URDL file
|
| 61 |
+
srdf_file: SRDF for urdf_file. If srdf_file is None, it defaults to the ".srdf" file with the same as the urdf file
|
| 62 |
+
package_dir: base directory used to resolve asset files in the URDF file. If an asset path starts with "package://", "package://" is simply removed from the file name
|
| 63 |
+
name (str): name of the created articulation
|
| 64 |
+
scene_idxs (list[int]): the ids of the scenes to build the objects in
|
| 65 |
+
Returns:
|
| 66 |
+
returns a single Articulation loaded from the URDF file. It throws an error if multiple objects exists
|
| 67 |
+
"""
|
| 68 |
+
if name is not None:
|
| 69 |
+
self.name = name
|
| 70 |
+
_parsed_mjcf_data = self.parse(mjcf_file, package_dir)
|
| 71 |
+
articulation_builders = _parsed_mjcf_data["articulation_builders"]
|
| 72 |
+
_parsed_mjcf_data["actor_builders"]
|
| 73 |
+
cameras = _parsed_mjcf_data["cameras"]
|
| 74 |
+
|
| 75 |
+
articulations: List[Articulation] = []
|
| 76 |
+
for b in articulation_builders[:1]:
|
| 77 |
+
b.set_scene_idxs(scene_idxs)
|
| 78 |
+
b.disable_self_collisions = self.disable_self_collisions
|
| 79 |
+
articulations.append(b.build())
|
| 80 |
+
|
| 81 |
+
actors: List[Actor] = []
|
| 82 |
+
# for b in actor_builders:
|
| 83 |
+
# actors.append(b.build())
|
| 84 |
+
|
| 85 |
+
if len(cameras) > 0:
|
| 86 |
+
name2entity = dict()
|
| 87 |
+
for a in articulations:
|
| 88 |
+
for sapien_articulation in a._objs:
|
| 89 |
+
for l in sapien_articulation.links:
|
| 90 |
+
name2entity[l.name] = l.entity
|
| 91 |
+
|
| 92 |
+
for a in actors:
|
| 93 |
+
name2entity[a.name] = a
|
| 94 |
+
|
| 95 |
+
# TODO (stao): support extracting sensors
|
| 96 |
+
# for scene_idx, scene in enumerate(self.scene.sub_scenes):
|
| 97 |
+
# for cam in cameras:
|
| 98 |
+
# cam_component = RenderCameraComponent(cam["width"], cam["height"])
|
| 99 |
+
# if cam["fovx"] is not None and cam["fovy"] is not None:
|
| 100 |
+
# cam_component.set_fovx(cam["fovx"], False)
|
| 101 |
+
# cam_component.set_fovy(cam["fovy"], False)
|
| 102 |
+
# elif cam["fovy"] is None:
|
| 103 |
+
# cam_component.set_fovx(cam["fovx"], True)
|
| 104 |
+
# elif cam["fovx"] is None:
|
| 105 |
+
# cam_component.set_fovy(cam["fovy"], True)
|
| 106 |
+
|
| 107 |
+
# cam_component.near = cam["near"]
|
| 108 |
+
# cam_component.far = cam["far"]
|
| 109 |
+
# name2entity[f"scene-{scene_idx}_{cam['reference']}"].add_component(
|
| 110 |
+
# cam_component
|
| 111 |
+
# )
|
| 112 |
+
|
| 113 |
+
return articulations[0]
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/building/urdf_loader.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, Any, List, TypedDict
|
| 4 |
+
|
| 5 |
+
from sapien.render import RenderCameraComponent
|
| 6 |
+
from sapien.wrapper.urdf_loader import URDFLoader as SapienURDFLoader
|
| 7 |
+
|
| 8 |
+
from mani_skill.utils.building.actor_builder import ActorBuilder
|
| 9 |
+
from mani_skill.utils.building.articulation_builder import ArticulationBuilder
|
| 10 |
+
from mani_skill.utils.structs.actor import Actor
|
| 11 |
+
from mani_skill.utils.structs.articulation import Articulation
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ParsedURDFData(TypedDict):
|
| 18 |
+
articulation_builders: List[ArticulationBuilder]
|
| 19 |
+
actor_builders: List[ActorBuilder]
|
| 20 |
+
cameras: List[Any]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class URDFLoader(SapienURDFLoader):
|
| 24 |
+
scene: ManiSkillScene
|
| 25 |
+
name: str = None
|
| 26 |
+
disable_self_collisions: bool = False
|
| 27 |
+
|
| 28 |
+
def parse(self, urdf_file, srdf_file=None, package_dir=None) -> ParsedURDFData:
|
| 29 |
+
articulation_builders, actor_builders, cameras = super().parse(
|
| 30 |
+
urdf_file, srdf_file, package_dir
|
| 31 |
+
)
|
| 32 |
+
for i, a in enumerate(articulation_builders):
|
| 33 |
+
if len(articulation_builders) > 1:
|
| 34 |
+
a.set_name(f"{self.name}-articulation-{i}")
|
| 35 |
+
else:
|
| 36 |
+
a.set_name(f"{self.name}")
|
| 37 |
+
if self.disable_self_collisions:
|
| 38 |
+
for l in a.link_builders:
|
| 39 |
+
# NOTE (stao): Currently this may not be working as intended
|
| 40 |
+
l.collision_groups[2] |= 1 << 29
|
| 41 |
+
for i, b in enumerate(actor_builders):
|
| 42 |
+
b.set_name(f"{self.name}-actor-{i}")
|
| 43 |
+
return dict(
|
| 44 |
+
articulation_builders=articulation_builders,
|
| 45 |
+
actor_builders=actor_builders,
|
| 46 |
+
cameras=cameras,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def load_file_as_articulation_builder(
|
| 50 |
+
self, urdf_file, srdf_file=None, package_dir=None
|
| 51 |
+
) -> ArticulationBuilder:
|
| 52 |
+
return super().load_file_as_articulation_builder(
|
| 53 |
+
urdf_file, srdf_file, package_dir
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def load(
|
| 57 |
+
self,
|
| 58 |
+
urdf_file: str,
|
| 59 |
+
srdf_file=None,
|
| 60 |
+
package_dir=None,
|
| 61 |
+
name=None,
|
| 62 |
+
scene_idxs=None,
|
| 63 |
+
) -> Articulation:
|
| 64 |
+
"""
|
| 65 |
+
Args:
|
| 66 |
+
urdf_file: filename for URDL file
|
| 67 |
+
srdf_file: SRDF for urdf_file. If srdf_file is None, it defaults to the ".srdf" file with the same as the urdf file
|
| 68 |
+
package_dir: base directory used to resolve asset files in the URDF file. If an asset path starts with "package://", "package://" is simply removed from the file name
|
| 69 |
+
name (str): name of the created articulation
|
| 70 |
+
scene_idxs (list[int]): the ids of the scenes to build the objects in
|
| 71 |
+
Returns:
|
| 72 |
+
returns a single Articulation loaded from the URDF file. It throws an error if multiple objects exists
|
| 73 |
+
"""
|
| 74 |
+
if name is not None:
|
| 75 |
+
self.name = name
|
| 76 |
+
_parsed_urdf_data = self.parse(urdf_file, srdf_file, package_dir)
|
| 77 |
+
articulation_builders = _parsed_urdf_data["articulation_builders"]
|
| 78 |
+
actor_builders = _parsed_urdf_data["actor_builders"]
|
| 79 |
+
cameras = _parsed_urdf_data["cameras"]
|
| 80 |
+
|
| 81 |
+
if len(articulation_builders) > 1 or len(actor_builders) != 0:
|
| 82 |
+
raise Exception(
|
| 83 |
+
"URDF contains multiple objects, call load_multiple instead"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
articulations: List[Articulation] = []
|
| 87 |
+
for b in articulation_builders:
|
| 88 |
+
b.set_scene_idxs(scene_idxs)
|
| 89 |
+
b.disable_self_collisions = self.disable_self_collisions
|
| 90 |
+
articulations.append(b.build())
|
| 91 |
+
|
| 92 |
+
actors: List[Actor] = []
|
| 93 |
+
for b in actor_builders:
|
| 94 |
+
actors.append(b.build())
|
| 95 |
+
|
| 96 |
+
if len(cameras) > 0:
|
| 97 |
+
name2entity = dict()
|
| 98 |
+
for a in articulations:
|
| 99 |
+
for sapien_articulation in a._objs:
|
| 100 |
+
for l in sapien_articulation.links:
|
| 101 |
+
name2entity[l.name] = l.entity
|
| 102 |
+
|
| 103 |
+
for a in actors:
|
| 104 |
+
name2entity[a.name] = a
|
| 105 |
+
|
| 106 |
+
for scene_idx, scene in enumerate(self.scene.sub_scenes):
|
| 107 |
+
for cam in cameras:
|
| 108 |
+
cam_component = RenderCameraComponent(cam["width"], cam["height"])
|
| 109 |
+
if cam["fovx"] is not None and cam["fovy"] is not None:
|
| 110 |
+
cam_component.set_fovx(cam["fovx"], False)
|
| 111 |
+
cam_component.set_fovy(cam["fovy"], False)
|
| 112 |
+
elif cam["fovy"] is None:
|
| 113 |
+
cam_component.set_fovx(cam["fovx"], True)
|
| 114 |
+
elif cam["fovx"] is None:
|
| 115 |
+
cam_component.set_fovy(cam["fovy"], True)
|
| 116 |
+
|
| 117 |
+
cam_component.near = cam["near"]
|
| 118 |
+
cam_component.far = cam["far"]
|
| 119 |
+
name2entity[f"scene-{scene_idx}_{cam['reference']}"].add_component(
|
| 120 |
+
cam_component
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
return articulations[0]
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .geometry import *
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (237 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/bounding_cylinder.cpython-310.pyc
ADDED
|
Binary file (3.21 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/geometry.cpython-310.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/rotation_conversions.cpython-310.pyc
ADDED
|
Binary file (18.3 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/__pycache__/trimesh_utils.cpython-310.pyc
ADDED
|
Binary file (3.54 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/bounding_cylinder.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Smallest enclosing circle - Library (Python)
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2020 Project Nayuki
|
| 5 |
+
# https://www.nayuki.io/page/smallest-enclosing-circle
|
| 6 |
+
#
|
| 7 |
+
# This program is free software: you can redistribute it and/or modify
|
| 8 |
+
# it under the terms of the GNU Lesser General Public License as published by
|
| 9 |
+
# the Free Software Foundation, either version 3 of the License, or
|
| 10 |
+
# (at your option) any later version.
|
| 11 |
+
#
|
| 12 |
+
# This program is distributed in the hope that it will be useful,
|
| 13 |
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 14 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 15 |
+
# GNU Lesser General Public License for more details.
|
| 16 |
+
#
|
| 17 |
+
# You should have received a copy of the GNU Lesser General Public License
|
| 18 |
+
# along with this program (see COPYING.txt and COPYING.LESSER.txt).
|
| 19 |
+
# If not, see <http://www.gnu.org/licenses/>.
|
| 20 |
+
#
|
| 21 |
+
|
| 22 |
+
# fmt: off
|
| 23 |
+
# isort: skip_file
|
| 24 |
+
import math, random
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Data conventions: A point is a pair of floats (x, y). A circle is a triple of floats (center x, center y, radius).
|
| 29 |
+
|
| 30 |
+
# Returns the smallest circle that encloses all the given points. Runs in expected O(n) time, randomized.
|
| 31 |
+
# Input: A sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
|
| 32 |
+
# Output: A triple of floats representing a circle.
|
| 33 |
+
# Note: If 0 points are given, None is returned. If 1 point is given, a circle of radius 0 is returned.
|
| 34 |
+
#
|
| 35 |
+
# Initially: No boundary points known
|
| 36 |
+
def make_circle(points):
|
| 37 |
+
# Convert to float and randomize order
|
| 38 |
+
shuffled = [(float(x), float(y)) for (x, y) in points]
|
| 39 |
+
random.shuffle(shuffled)
|
| 40 |
+
|
| 41 |
+
# Progressively add points to circle or recompute circle
|
| 42 |
+
c = None
|
| 43 |
+
for (i, p) in enumerate(shuffled):
|
| 44 |
+
if c is None or not is_in_circle(c, p):
|
| 45 |
+
c = _make_circle_one_point(shuffled[ : i + 1], p)
|
| 46 |
+
return c
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# One boundary point known
|
| 50 |
+
def _make_circle_one_point(points, p):
|
| 51 |
+
c = (p[0], p[1], 0.0)
|
| 52 |
+
for (i, q) in enumerate(points):
|
| 53 |
+
if not is_in_circle(c, q):
|
| 54 |
+
if c[2] == 0.0:
|
| 55 |
+
c = make_diameter(p, q)
|
| 56 |
+
else:
|
| 57 |
+
c = _make_circle_two_points(points[ : i + 1], p, q)
|
| 58 |
+
return c
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# Two boundary points known
|
| 62 |
+
def _make_circle_two_points(points, p, q):
|
| 63 |
+
circ = make_diameter(p, q)
|
| 64 |
+
left = None
|
| 65 |
+
right = None
|
| 66 |
+
px, py = p
|
| 67 |
+
qx, qy = q
|
| 68 |
+
|
| 69 |
+
# For each point not in the two-point circle
|
| 70 |
+
for r in points:
|
| 71 |
+
if is_in_circle(circ, r):
|
| 72 |
+
continue
|
| 73 |
+
|
| 74 |
+
# Form a circumcircle and classify it on left or right side
|
| 75 |
+
cross = _cross_product(px, py, qx, qy, r[0], r[1])
|
| 76 |
+
c = make_circumcircle(p, q, r)
|
| 77 |
+
if c is None:
|
| 78 |
+
continue
|
| 79 |
+
elif cross > 0.0 and (left is None or _cross_product(px, py, qx, qy, c[0], c[1]) > _cross_product(px, py, qx, qy, left[0], left[1])):
|
| 80 |
+
left = c
|
| 81 |
+
elif cross < 0.0 and (right is None or _cross_product(px, py, qx, qy, c[0], c[1]) < _cross_product(px, py, qx, qy, right[0], right[1])):
|
| 82 |
+
right = c
|
| 83 |
+
|
| 84 |
+
# Select which circle to return
|
| 85 |
+
if left is None and right is None:
|
| 86 |
+
return circ
|
| 87 |
+
elif left is None:
|
| 88 |
+
return right
|
| 89 |
+
elif right is None:
|
| 90 |
+
return left
|
| 91 |
+
else:
|
| 92 |
+
return left if (left[2] <= right[2]) else right
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def make_diameter(a, b):
|
| 96 |
+
cx = (a[0] + b[0]) / 2
|
| 97 |
+
cy = (a[1] + b[1]) / 2
|
| 98 |
+
r0 = math.hypot(cx - a[0], cy - a[1])
|
| 99 |
+
r1 = math.hypot(cx - b[0], cy - b[1])
|
| 100 |
+
return (cx, cy, max(r0, r1))
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def make_circumcircle(a, b, c):
|
| 104 |
+
# Mathematical algorithm from Wikipedia: Circumscribed circle
|
| 105 |
+
ox = (min(a[0], b[0], c[0]) + max(a[0], b[0], c[0])) / 2
|
| 106 |
+
oy = (min(a[1], b[1], c[1]) + max(a[1], b[1], c[1])) / 2
|
| 107 |
+
ax = a[0] - ox; ay = a[1] - oy
|
| 108 |
+
bx = b[0] - ox; by = b[1] - oy
|
| 109 |
+
cx = c[0] - ox; cy = c[1] - oy
|
| 110 |
+
d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0
|
| 111 |
+
if d == 0.0:
|
| 112 |
+
return None
|
| 113 |
+
x = ox + ((ax*ax + ay*ay) * (by - cy) + (bx*bx + by*by) * (cy - ay) + (cx*cx + cy*cy) * (ay - by)) / d
|
| 114 |
+
y = oy + ((ax*ax + ay*ay) * (cx - bx) + (bx*bx + by*by) * (ax - cx) + (cx*cx + cy*cy) * (bx - ax)) / d
|
| 115 |
+
ra = math.hypot(x - a[0], y - a[1])
|
| 116 |
+
rb = math.hypot(x - b[0], y - b[1])
|
| 117 |
+
rc = math.hypot(x - c[0], y - c[1])
|
| 118 |
+
return (x, y, max(ra, rb, rc))
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
_MULTIPLICATIVE_EPSILON = 1 + 1e-14
|
| 122 |
+
|
| 123 |
+
def is_in_circle(c, p):
|
| 124 |
+
return c is not None and math.hypot(p[0] - c[0], p[1] - c[1]) <= c[2] * _MULTIPLICATIVE_EPSILON
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# Returns twice the signed area of the triangle defined by (x0, y0), (x1, y1), (x2, y2).
|
| 128 |
+
def _cross_product(x0, y0, x1, y1, x2, y2):
|
| 129 |
+
return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def aabc(points):
|
| 133 |
+
points = np.asarray(points)
|
| 134 |
+
z_min = points[:, 2].min()
|
| 135 |
+
z_max = points[:, 2].max()
|
| 136 |
+
x, y, r = make_circle(points[:, :2])
|
| 137 |
+
return x, y, r, z_min, z_max
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/geometry.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import sapien
|
| 5 |
+
import sapien.physx as physx
|
| 6 |
+
import torch
|
| 7 |
+
from scipy.spatial.transform import Rotation
|
| 8 |
+
|
| 9 |
+
from mani_skill.utils.geometry.bounding_cylinder import aabc
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def sample_on_unit_sphere(rng):
|
| 13 |
+
"""
|
| 14 |
+
Algo from http://corysimon.github.io/articles/uniformdistn-on-sphere/
|
| 15 |
+
"""
|
| 16 |
+
v = np.zeros(3)
|
| 17 |
+
while np.linalg.norm(v) < 1e-4:
|
| 18 |
+
v[0] = rng.normal() # random standard normal
|
| 19 |
+
v[1] = rng.normal()
|
| 20 |
+
v[2] = rng.normal()
|
| 21 |
+
|
| 22 |
+
v = v / np.linalg.norm(v)
|
| 23 |
+
return v
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def sample_on_unit_circle(rng):
|
| 27 |
+
v = np.zeros(2)
|
| 28 |
+
while np.linalg.norm(v) < 1e-4:
|
| 29 |
+
v[0] = rng.normal() # random standard normal
|
| 30 |
+
v[1] = rng.normal()
|
| 31 |
+
|
| 32 |
+
v = v / np.linalg.norm(v)
|
| 33 |
+
return v
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def rotation_between_vec(a, b): # from a to b
|
| 37 |
+
a = a / np.linalg.norm(a)
|
| 38 |
+
b = b / np.linalg.norm(b)
|
| 39 |
+
axis = np.cross(a, b)
|
| 40 |
+
axis = axis / np.linalg.norm(axis) # norm might be 0
|
| 41 |
+
angle = np.arccos(a @ b)
|
| 42 |
+
R = Rotation.from_rotvec(axis * angle)
|
| 43 |
+
return R
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def angle_between_vec(a, b): # from a to b
|
| 47 |
+
a = a / np.linalg.norm(a)
|
| 48 |
+
b = b / np.linalg.norm(b)
|
| 49 |
+
angle = np.arccos(a @ b)
|
| 50 |
+
return angle
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def wxyz_to_xyzw(q):
|
| 54 |
+
return np.concatenate([q[1:4], q[0:1]])
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def xyzw_to_wxyz(q):
|
| 58 |
+
return np.concatenate([q[3:4], q[0:3]])
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def rotate_2d_vec_by_angle(vec, theta):
|
| 62 |
+
rot_mat = np.array(
|
| 63 |
+
[[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
|
| 64 |
+
)
|
| 65 |
+
return rot_mat @ vec
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def angle_distance(q0: sapien.Pose, q1: sapien.Pose):
|
| 69 |
+
qd = (q0.inv() * q1).q
|
| 70 |
+
return 2 * np.arctan2(np.linalg.norm(qd[1:]), qd[0]) / np.pi
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_axis_aligned_bbox_for_articulation(art: physx.PhysxArticulation):
|
| 74 |
+
mins = np.array([np.inf, np.inf, np.inf])
|
| 75 |
+
maxs = -mins
|
| 76 |
+
for link in art.get_links():
|
| 77 |
+
lp = link.pose
|
| 78 |
+
for s in link.get_collision_shapes():
|
| 79 |
+
p = lp * s.get_local_pose()
|
| 80 |
+
T = p.to_transformation_matrix()
|
| 81 |
+
assert isinstance(s, physx.PhysxCollisionShapeConvexMesh)
|
| 82 |
+
vertices = s.vertices * s.scale
|
| 83 |
+
vertices = vertices @ T[:3, :3].T + T[:3, 3]
|
| 84 |
+
mins = np.minimum(mins, vertices.min(0))
|
| 85 |
+
maxs = np.maximum(maxs, vertices.max(0))
|
| 86 |
+
return mins, maxs
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_axis_aligned_bbox_for_actor(actor: sapien.Entity):
|
| 90 |
+
mins = np.ones(3) * np.inf
|
| 91 |
+
maxs = -mins
|
| 92 |
+
|
| 93 |
+
for shape in actor.find_component_by_type(
|
| 94 |
+
physx.PhysxRigidDynamicComponent
|
| 95 |
+
).get_collision_shapes(): # this is CollisionShape
|
| 96 |
+
assert isinstance(shape, physx.PhysxCollisionShapeConvexMesh)
|
| 97 |
+
scaled_vertices = shape.vertices * shape.scale
|
| 98 |
+
local_pose = shape.get_local_pose()
|
| 99 |
+
mat = (actor.get_pose() * local_pose).to_transformation_matrix()
|
| 100 |
+
world_vertices = scaled_vertices @ (mat[:3, :3].T) + mat[:3, 3]
|
| 101 |
+
mins = np.minimum(mins, world_vertices.min(0))
|
| 102 |
+
maxs = np.maximum(maxs, world_vertices.max(0))
|
| 103 |
+
|
| 104 |
+
return mins, maxs
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def get_local_axis_aligned_bbox_for_link(link: physx.PhysxArticulationLinkComponent):
|
| 108 |
+
mins = np.array([np.inf, np.inf, np.inf])
|
| 109 |
+
maxs = -mins
|
| 110 |
+
for s in link.get_collision_shapes():
|
| 111 |
+
assert isinstance(s, physx.PhysxCollisionShapeConvexMesh)
|
| 112 |
+
p = s.get_local_pose()
|
| 113 |
+
T = p.to_transformation_matrix()
|
| 114 |
+
vertices = s.vertices * s.scale
|
| 115 |
+
vertices = vertices @ T[:3, :3].T + T[:3, 3]
|
| 116 |
+
mins = np.minimum(mins, vertices.min(0))
|
| 117 |
+
maxs = np.maximum(maxs, vertices.max(0))
|
| 118 |
+
return mins, maxs
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_local_aabc_for_actor(actor: sapien.Entity):
|
| 122 |
+
all_vertices = []
|
| 123 |
+
for s in actor.find_component_by_type(
|
| 124 |
+
physx.PhysxRigidDynamicComponent
|
| 125 |
+
).get_collision_shapes():
|
| 126 |
+
assert isinstance(s, physx.PhysxCollisionShapeConvexMesh)
|
| 127 |
+
p = s.get_local_pose()
|
| 128 |
+
T = p.to_transformation_matrix()
|
| 129 |
+
vertices = s.vertices * s.scale
|
| 130 |
+
vertices = vertices @ T[:3, :3].T + T[:3, 3]
|
| 131 |
+
all_vertices.append(vertices)
|
| 132 |
+
vertices = np.vstack(all_vertices)
|
| 133 |
+
return aabc(vertices)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def transform_points(H: torch.Tensor, pts: torch.Tensor) -> torch.Tensor:
|
| 137 |
+
"""transforms a batch of pts by a batch of transformation matrices H"""
|
| 138 |
+
assert H.shape[1:] == (4, 4), H.shape
|
| 139 |
+
assert pts.ndim == 2 and pts.shape[1] == 3, pts.shape
|
| 140 |
+
return (
|
| 141 |
+
torch.bmm(pts[:, None, :], H[:, :3, :3].transpose(2, 1))[:, 0, :] + H[:, :3, 3]
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def invert_transform(H: np.ndarray):
|
| 146 |
+
assert H.shape[-2:] == (4, 4), H.shape
|
| 147 |
+
H_inv = H.copy()
|
| 148 |
+
R_T = np.swapaxes(H[..., :3, :3], -1, -2)
|
| 149 |
+
H_inv[..., :3, :3] = R_T
|
| 150 |
+
H_inv[..., :3, 3:] = -R_T @ H[..., :3, 3:]
|
| 151 |
+
return H_inv
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def get_oriented_bounding_box_for_2d_points(
|
| 155 |
+
points_2d: np.ndarray, resolution=0.0
|
| 156 |
+
) -> Dict:
|
| 157 |
+
assert len(points_2d.shape) == 2 and points_2d.shape[1] == 2
|
| 158 |
+
if resolution > 0.0:
|
| 159 |
+
points_2d = np.round(points_2d / resolution) * resolution
|
| 160 |
+
points_2d = np.unique(points_2d, axis=0)
|
| 161 |
+
ca = np.cov(points_2d, y=None, rowvar=0, bias=1)
|
| 162 |
+
|
| 163 |
+
v, vect = np.linalg.eig(ca)
|
| 164 |
+
tvect = np.transpose(vect)
|
| 165 |
+
|
| 166 |
+
# use the inverse of the eigenvectors as a rotation matrix and
|
| 167 |
+
# rotate the points so they align with the x and y axes
|
| 168 |
+
ar = np.dot(points_2d, np.linalg.inv(tvect))
|
| 169 |
+
|
| 170 |
+
# get the minimum and maximum x and y
|
| 171 |
+
mina = np.min(ar, axis=0)
|
| 172 |
+
maxa = np.max(ar, axis=0)
|
| 173 |
+
half_size = (maxa - mina) * 0.5
|
| 174 |
+
|
| 175 |
+
# the center is just half way between the min and max xy
|
| 176 |
+
center = mina + half_size
|
| 177 |
+
# get the 4 corners by subtracting and adding half the bounding boxes height and width to the center
|
| 178 |
+
corners = np.array(
|
| 179 |
+
[
|
| 180 |
+
center + [-half_size[0], -half_size[1]],
|
| 181 |
+
center + [half_size[0], -half_size[1]],
|
| 182 |
+
center + [half_size[0], half_size[1]],
|
| 183 |
+
center + [-half_size[0], half_size[1]],
|
| 184 |
+
]
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# use the the eigenvectors as a rotation matrix and
|
| 188 |
+
# rotate the corners and the centerback
|
| 189 |
+
corners = np.dot(corners, tvect)
|
| 190 |
+
center = np.dot(center, tvect)
|
| 191 |
+
|
| 192 |
+
return {"center": center, "half_size": half_size, "axes": vect, "corners": corners}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# -------------------------------------------------------------------------- #
|
| 196 |
+
# Functions pulled out from SAPIEN
|
| 197 |
+
# -------------------------------------------------------------------------- #
|
| 198 |
+
def rotate_vector(v, q):
|
| 199 |
+
w = q[0]
|
| 200 |
+
u = q[1:]
|
| 201 |
+
return 2.0 * u.dot(v) * u + (w * w - u.dot(u)) * v + 2.0 * w * np.cross(u, v)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/rotation_conversions.py
ADDED
|
@@ -0,0 +1,633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
|
| 12 |
+
Device = Union[str, torch.device]
|
| 13 |
+
|
| 14 |
+
# -------------------------------------------------------------------------- #
|
| 15 |
+
# Functions pulled out from pytorch3d to avoid having to install the whole package
|
| 16 |
+
# -------------------------------------------------------------------------- #
|
| 17 |
+
|
| 18 |
+
"""
|
| 19 |
+
The transformation matrices returned from the functions in this file assume
|
| 20 |
+
the points on which the transformation will be applied are column vectors.
|
| 21 |
+
i.e. the R matrix is structured as
|
| 22 |
+
|
| 23 |
+
R = [
|
| 24 |
+
[Rxx, Rxy, Rxz],
|
| 25 |
+
[Ryx, Ryy, Ryz],
|
| 26 |
+
[Rzx, Rzy, Rzz],
|
| 27 |
+
] # (3, 3)
|
| 28 |
+
|
| 29 |
+
This matrix can be applied to column vectors by post multiplication
|
| 30 |
+
by the points e.g.
|
| 31 |
+
|
| 32 |
+
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
|
| 33 |
+
transformed_points = R * points
|
| 34 |
+
|
| 35 |
+
To apply the same matrix to points which are row vectors, the R matrix
|
| 36 |
+
can be transposed and pre multiplied by the points:
|
| 37 |
+
|
| 38 |
+
e.g.
|
| 39 |
+
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
|
| 40 |
+
transformed_points = points * R.transpose(1, 0)
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
|
| 45 |
+
"""
|
| 46 |
+
Convert rotations given as quaternions to rotation matrices.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
quaternions: quaternions with real part first,
|
| 50 |
+
as tensor of shape (..., 4).
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Rotation matrices as tensor of shape (..., 3, 3).
|
| 54 |
+
"""
|
| 55 |
+
r, i, j, k = torch.unbind(quaternions, -1)
|
| 56 |
+
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
|
| 57 |
+
two_s = 2.0 / (quaternions * quaternions).sum(-1)
|
| 58 |
+
|
| 59 |
+
o = torch.stack(
|
| 60 |
+
(
|
| 61 |
+
1 - two_s * (j * j + k * k),
|
| 62 |
+
two_s * (i * j - k * r),
|
| 63 |
+
two_s * (i * k + j * r),
|
| 64 |
+
two_s * (i * j + k * r),
|
| 65 |
+
1 - two_s * (i * i + k * k),
|
| 66 |
+
two_s * (j * k - i * r),
|
| 67 |
+
two_s * (i * k - j * r),
|
| 68 |
+
two_s * (j * k + i * r),
|
| 69 |
+
1 - two_s * (i * i + j * j),
|
| 70 |
+
),
|
| 71 |
+
-1,
|
| 72 |
+
)
|
| 73 |
+
return o.reshape(quaternions.shape[:-1] + (3, 3))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| 77 |
+
"""
|
| 78 |
+
Return a tensor where each element has the absolute value taken from the,
|
| 79 |
+
corresponding element of a, with sign taken from the corresponding
|
| 80 |
+
element of b. This is like the standard copysign floating-point operation,
|
| 81 |
+
but is not careful about negative 0 and NaN.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
a: source tensor.
|
| 85 |
+
b: tensor whose signs will be used, of the same shape as a.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Tensor of the same shape as a with the signs of b.
|
| 89 |
+
"""
|
| 90 |
+
signs_differ = (a < 0) != (b < 0)
|
| 91 |
+
return torch.where(signs_differ, -a, a)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
|
| 95 |
+
"""
|
| 96 |
+
Returns torch.sqrt(torch.max(0, x))
|
| 97 |
+
but with a zero subgradient where x is 0.
|
| 98 |
+
"""
|
| 99 |
+
ret = torch.zeros_like(x)
|
| 100 |
+
positive_mask = x > 0
|
| 101 |
+
ret[positive_mask] = torch.sqrt(x[positive_mask])
|
| 102 |
+
return ret
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
|
| 106 |
+
"""
|
| 107 |
+
Convert rotations given as rotation matrices to quaternions.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
matrix: Rotation matrices as tensor of shape (..., 3, 3).
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
quaternions with real part first, as tensor of shape (..., 4).
|
| 114 |
+
"""
|
| 115 |
+
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
|
| 116 |
+
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
|
| 117 |
+
|
| 118 |
+
batch_dim = matrix.shape[:-2]
|
| 119 |
+
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
|
| 120 |
+
matrix.reshape(batch_dim + (9,)), dim=-1
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
q_abs = _sqrt_positive_part(
|
| 124 |
+
torch.stack(
|
| 125 |
+
[
|
| 126 |
+
1.0 + m00 + m11 + m22,
|
| 127 |
+
1.0 + m00 - m11 - m22,
|
| 128 |
+
1.0 - m00 + m11 - m22,
|
| 129 |
+
1.0 - m00 - m11 + m22,
|
| 130 |
+
],
|
| 131 |
+
dim=-1,
|
| 132 |
+
)
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# we produce the desired quaternion multiplied by each of r, i, j, k
|
| 136 |
+
quat_by_rijk = torch.stack(
|
| 137 |
+
[
|
| 138 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 139 |
+
# `int`.
|
| 140 |
+
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
|
| 141 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 142 |
+
# `int`.
|
| 143 |
+
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
|
| 144 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 145 |
+
# `int`.
|
| 146 |
+
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
|
| 147 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 148 |
+
# `int`.
|
| 149 |
+
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
|
| 150 |
+
],
|
| 151 |
+
dim=-2,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
|
| 155 |
+
# the candidate won't be picked.
|
| 156 |
+
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
|
| 157 |
+
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
|
| 158 |
+
|
| 159 |
+
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
|
| 160 |
+
# forall i; we pick the best-conditioned one (with the largest denominator)
|
| 161 |
+
out = quat_candidates[
|
| 162 |
+
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
|
| 163 |
+
].reshape(batch_dim + (4,))
|
| 164 |
+
return standardize_quaternion(out)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:
|
| 168 |
+
"""
|
| 169 |
+
Return the rotation matrices for one of the rotations about an axis
|
| 170 |
+
of which Euler angles describe, for each value of the angle given.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
axis: Axis label "X" or "Y or "Z".
|
| 174 |
+
angle: any shape tensor of Euler angles in radians
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
Rotation matrices as tensor of shape (..., 3, 3).
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
cos = torch.cos(angle)
|
| 181 |
+
sin = torch.sin(angle)
|
| 182 |
+
one = torch.ones_like(angle)
|
| 183 |
+
zero = torch.zeros_like(angle)
|
| 184 |
+
|
| 185 |
+
if axis == "X":
|
| 186 |
+
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
|
| 187 |
+
elif axis == "Y":
|
| 188 |
+
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
|
| 189 |
+
elif axis == "Z":
|
| 190 |
+
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
|
| 191 |
+
else:
|
| 192 |
+
raise ValueError("letter must be either X, Y or Z.")
|
| 193 |
+
|
| 194 |
+
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
|
| 198 |
+
"""
|
| 199 |
+
Convert rotations given as Euler angles in radians to rotation matrices.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
euler_angles: Euler angles in radians as tensor of shape (..., 3).
|
| 203 |
+
convention: Convention string of three uppercase letters from
|
| 204 |
+
{"X", "Y", and "Z"}.
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
Rotation matrices as tensor of shape (..., 3, 3).
|
| 208 |
+
"""
|
| 209 |
+
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
|
| 210 |
+
raise ValueError("Invalid input euler angles.")
|
| 211 |
+
if len(convention) != 3:
|
| 212 |
+
raise ValueError("Convention must have 3 letters.")
|
| 213 |
+
if convention[1] in (convention[0], convention[2]):
|
| 214 |
+
raise ValueError(f"Invalid convention {convention}.")
|
| 215 |
+
for letter in convention:
|
| 216 |
+
if letter not in ("X", "Y", "Z"):
|
| 217 |
+
raise ValueError(f"Invalid letter {letter} in convention string.")
|
| 218 |
+
matrices = [
|
| 219 |
+
_axis_angle_rotation(c, e)
|
| 220 |
+
for c, e in zip(convention, torch.unbind(euler_angles, -1))
|
| 221 |
+
]
|
| 222 |
+
# return functools.reduce(torch.matmul, matrices)
|
| 223 |
+
return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2])
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _angle_from_tan(
|
| 227 |
+
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
|
| 228 |
+
) -> torch.Tensor:
|
| 229 |
+
"""
|
| 230 |
+
Extract the first or third Euler angle from the two members of
|
| 231 |
+
the matrix which are positive constant times its sine and cosine.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
|
| 235 |
+
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
|
| 236 |
+
convention.
|
| 237 |
+
data: Rotation matrices as tensor of shape (..., 3, 3).
|
| 238 |
+
horizontal: Whether we are looking for the angle for the third axis,
|
| 239 |
+
which means the relevant entries are in the same row of the
|
| 240 |
+
rotation matrix. If not, they are in the same column.
|
| 241 |
+
tait_bryan: Whether the first and third axes in the convention differ.
|
| 242 |
+
|
| 243 |
+
Returns:
|
| 244 |
+
Euler Angles in radians for each matrix in data as a tensor
|
| 245 |
+
of shape (...).
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
|
| 249 |
+
if horizontal:
|
| 250 |
+
i2, i1 = i1, i2
|
| 251 |
+
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
|
| 252 |
+
if horizontal == even:
|
| 253 |
+
return torch.atan2(data[..., i1], data[..., i2])
|
| 254 |
+
if tait_bryan:
|
| 255 |
+
return torch.atan2(-data[..., i2], data[..., i1])
|
| 256 |
+
return torch.atan2(data[..., i2], -data[..., i1])
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _index_from_letter(letter: str) -> int:
|
| 260 |
+
if letter == "X":
|
| 261 |
+
return 0
|
| 262 |
+
if letter == "Y":
|
| 263 |
+
return 1
|
| 264 |
+
if letter == "Z":
|
| 265 |
+
return 2
|
| 266 |
+
raise ValueError("letter must be either X, Y or Z.")
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor:
|
| 270 |
+
"""
|
| 271 |
+
Convert rotations given as rotation matrices to Euler angles in radians.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
matrix: Rotation matrices as tensor of shape (..., 3, 3).
|
| 275 |
+
convention: Convention string of three uppercase letters.
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
Euler angles in radians as tensor of shape (..., 3).
|
| 279 |
+
"""
|
| 280 |
+
if len(convention) != 3:
|
| 281 |
+
raise ValueError("Convention must have 3 letters.")
|
| 282 |
+
if convention[1] in (convention[0], convention[2]):
|
| 283 |
+
raise ValueError(f"Invalid convention {convention}.")
|
| 284 |
+
for letter in convention:
|
| 285 |
+
if letter not in ("X", "Y", "Z"):
|
| 286 |
+
raise ValueError(f"Invalid letter {letter} in convention string.")
|
| 287 |
+
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
|
| 288 |
+
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
|
| 289 |
+
i0 = _index_from_letter(convention[0])
|
| 290 |
+
i2 = _index_from_letter(convention[2])
|
| 291 |
+
tait_bryan = i0 != i2
|
| 292 |
+
if tait_bryan:
|
| 293 |
+
central_angle = torch.asin(
|
| 294 |
+
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
|
| 295 |
+
)
|
| 296 |
+
else:
|
| 297 |
+
central_angle = torch.acos(matrix[..., i0, i0])
|
| 298 |
+
|
| 299 |
+
o = (
|
| 300 |
+
_angle_from_tan(
|
| 301 |
+
convention[0], convention[1], matrix[..., i2], False, tait_bryan
|
| 302 |
+
),
|
| 303 |
+
central_angle,
|
| 304 |
+
_angle_from_tan(
|
| 305 |
+
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
|
| 306 |
+
),
|
| 307 |
+
)
|
| 308 |
+
return torch.stack(o, -1)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def random_quaternions(
|
| 312 |
+
n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
|
| 313 |
+
) -> torch.Tensor:
|
| 314 |
+
"""
|
| 315 |
+
Generate random quaternions representing rotations,
|
| 316 |
+
i.e. versors with nonnegative real part.
|
| 317 |
+
|
| 318 |
+
Args:
|
| 319 |
+
n: Number of quaternions in a batch to return.
|
| 320 |
+
dtype: Type to return.
|
| 321 |
+
device: Desired device of returned tensor. Default:
|
| 322 |
+
uses the current device for the default tensor type.
|
| 323 |
+
|
| 324 |
+
Returns:
|
| 325 |
+
Quaternions as tensor of shape (N, 4).
|
| 326 |
+
"""
|
| 327 |
+
if isinstance(device, str):
|
| 328 |
+
device = torch.device(device)
|
| 329 |
+
o = torch.randn((n, 4), dtype=dtype, device=device)
|
| 330 |
+
s = (o * o).sum(1)
|
| 331 |
+
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
|
| 332 |
+
return o
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def random_rotations(
|
| 336 |
+
n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
|
| 337 |
+
) -> torch.Tensor:
|
| 338 |
+
"""
|
| 339 |
+
Generate random rotations as 3x3 rotation matrices.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
n: Number of rotation matrices in a batch to return.
|
| 343 |
+
dtype: Type to return.
|
| 344 |
+
device: Device of returned tensor. Default: if None,
|
| 345 |
+
uses the current device for the default tensor type.
|
| 346 |
+
|
| 347 |
+
Returns:
|
| 348 |
+
Rotation matrices as tensor of shape (n, 3, 3).
|
| 349 |
+
"""
|
| 350 |
+
quaternions = random_quaternions(n, dtype=dtype, device=device)
|
| 351 |
+
return quaternion_to_matrix(quaternions)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def random_rotation(
|
| 355 |
+
dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
|
| 356 |
+
) -> torch.Tensor:
|
| 357 |
+
"""
|
| 358 |
+
Generate a single random 3x3 rotation matrix.
|
| 359 |
+
|
| 360 |
+
Args:
|
| 361 |
+
dtype: Type to return
|
| 362 |
+
device: Device of returned tensor. Default: if None,
|
| 363 |
+
uses the current device for the default tensor type
|
| 364 |
+
|
| 365 |
+
Returns:
|
| 366 |
+
Rotation matrix as tensor of shape (3, 3).
|
| 367 |
+
"""
|
| 368 |
+
return random_rotations(1, dtype, device)[0]
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
|
| 372 |
+
"""
|
| 373 |
+
Convert a unit quaternion to a standard form: one in which the real
|
| 374 |
+
part is non negative.
|
| 375 |
+
|
| 376 |
+
Args:
|
| 377 |
+
quaternions: Quaternions with real part first,
|
| 378 |
+
as tensor of shape (..., 4).
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
Standardized quaternions as tensor of shape (..., 4).
|
| 382 |
+
"""
|
| 383 |
+
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def quaternion_raw_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| 387 |
+
"""
|
| 388 |
+
Multiply two quaternions.
|
| 389 |
+
Usual torch rules for broadcasting apply.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
a: Quaternions as tensor of shape (..., 4), real part first.
|
| 393 |
+
b: Quaternions as tensor of shape (..., 4), real part first.
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
The product of a and b, a tensor of quaternions shape (..., 4).
|
| 397 |
+
"""
|
| 398 |
+
aw, ax, ay, az = torch.unbind(a, -1)
|
| 399 |
+
bw, bx, by, bz = torch.unbind(b, -1)
|
| 400 |
+
ow = aw * bw - ax * bx - ay * by - az * bz
|
| 401 |
+
ox = aw * bx + ax * bw + ay * bz - az * by
|
| 402 |
+
oy = aw * by - ax * bz + ay * bw + az * bx
|
| 403 |
+
oz = aw * bz + ax * by - ay * bx + az * bw
|
| 404 |
+
return torch.stack((ow, ox, oy, oz), -1)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| 408 |
+
"""
|
| 409 |
+
Multiply two quaternions representing rotations, returning the quaternion
|
| 410 |
+
representing their composition, i.e. the versor with nonnegative real part.
|
| 411 |
+
Usual torch rules for broadcasting apply.
|
| 412 |
+
|
| 413 |
+
Args:
|
| 414 |
+
a: Quaternions as tensor of shape (..., 4), real part first.
|
| 415 |
+
b: Quaternions as tensor of shape (..., 4), real part first.
|
| 416 |
+
|
| 417 |
+
Returns:
|
| 418 |
+
The product of a and b, a tensor of quaternions of shape (..., 4).
|
| 419 |
+
"""
|
| 420 |
+
ab = quaternion_raw_multiply(a, b)
|
| 421 |
+
return standardize_quaternion(ab)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def quaternion_invert(quaternion: torch.Tensor) -> torch.Tensor:
|
| 425 |
+
"""
|
| 426 |
+
Given a quaternion representing rotation, get the quaternion representing
|
| 427 |
+
its inverse.
|
| 428 |
+
|
| 429 |
+
Args:
|
| 430 |
+
quaternion: Quaternions as tensor of shape (..., 4), with real part
|
| 431 |
+
first, which must be versors (unit quaternions).
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
The inverse, a tensor of quaternions of shape (..., 4).
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
scaling = torch.tensor([1, -1, -1, -1], device=quaternion.device)
|
| 438 |
+
return quaternion * scaling
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def quaternion_apply(quaternion: torch.Tensor, point: torch.Tensor) -> torch.Tensor:
|
| 442 |
+
"""
|
| 443 |
+
Apply the rotation given by a quaternion to a 3D point.
|
| 444 |
+
Usual torch rules for broadcasting apply.
|
| 445 |
+
|
| 446 |
+
Args:
|
| 447 |
+
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
|
| 448 |
+
point: Tensor of 3D points of shape (..., 3).
|
| 449 |
+
|
| 450 |
+
Returns:
|
| 451 |
+
Tensor of rotated points of shape (..., 3).
|
| 452 |
+
"""
|
| 453 |
+
if point.size(-1) != 3:
|
| 454 |
+
raise ValueError(f"Points are not in 3D, {point.shape}.")
|
| 455 |
+
real_parts = point.new_zeros(point.shape[:-1] + (1,))
|
| 456 |
+
point_as_quaternion = torch.cat((real_parts, point), -1)
|
| 457 |
+
out = quaternion_raw_multiply(
|
| 458 |
+
quaternion_raw_multiply(quaternion, point_as_quaternion),
|
| 459 |
+
quaternion_invert(quaternion),
|
| 460 |
+
)
|
| 461 |
+
return out[..., 1:]
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor:
|
| 465 |
+
"""
|
| 466 |
+
Convert rotations given as axis/angle to rotation matrices.
|
| 467 |
+
|
| 468 |
+
Args:
|
| 469 |
+
axis_angle: Rotations given as a vector in axis angle form,
|
| 470 |
+
as a tensor of shape (..., 3), where the magnitude is
|
| 471 |
+
the angle turned anticlockwise in radians around the
|
| 472 |
+
vector's direction.
|
| 473 |
+
|
| 474 |
+
Returns:
|
| 475 |
+
Rotation matrices as tensor of shape (..., 3, 3).
|
| 476 |
+
"""
|
| 477 |
+
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def matrix_to_axis_angle(matrix: torch.Tensor) -> torch.Tensor:
|
| 481 |
+
"""
|
| 482 |
+
Convert rotations given as rotation matrices to axis/angle.
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
matrix: Rotation matrices as tensor of shape (..., 3, 3).
|
| 486 |
+
|
| 487 |
+
Returns:
|
| 488 |
+
Rotations given as a vector in axis angle form, as a tensor
|
| 489 |
+
of shape (..., 3), where the magnitude is the angle
|
| 490 |
+
turned anticlockwise in radians around the vector's
|
| 491 |
+
direction.
|
| 492 |
+
"""
|
| 493 |
+
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def axis_angle_to_quaternion(axis_angle: torch.Tensor) -> torch.Tensor:
|
| 497 |
+
"""
|
| 498 |
+
Convert rotations given as axis/angle to quaternions.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
axis_angle: Rotations given as a vector in axis angle form,
|
| 502 |
+
as a tensor of shape (..., 3), where the magnitude is
|
| 503 |
+
the angle turned anticlockwise in radians around the
|
| 504 |
+
vector's direction.
|
| 505 |
+
|
| 506 |
+
Returns:
|
| 507 |
+
quaternions with real part first, as tensor of shape (..., 4).
|
| 508 |
+
"""
|
| 509 |
+
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
|
| 510 |
+
half_angles = angles * 0.5
|
| 511 |
+
eps = 1e-6
|
| 512 |
+
small_angles = angles.abs() < eps
|
| 513 |
+
sin_half_angles_over_angles = torch.empty_like(angles)
|
| 514 |
+
sin_half_angles_over_angles[~small_angles] = (
|
| 515 |
+
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
|
| 516 |
+
)
|
| 517 |
+
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
|
| 518 |
+
# so sin(x/2)/x is about 1/2 - (x*x)/48
|
| 519 |
+
sin_half_angles_over_angles[small_angles] = (
|
| 520 |
+
0.5 - (angles[small_angles] * angles[small_angles]) / 48
|
| 521 |
+
)
|
| 522 |
+
quaternions = torch.cat(
|
| 523 |
+
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
|
| 524 |
+
)
|
| 525 |
+
return quaternions
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:
|
| 529 |
+
"""
|
| 530 |
+
Convert rotations given as quaternions to axis/angle.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
quaternions: quaternions with real part first,
|
| 534 |
+
as tensor of shape (..., 4).
|
| 535 |
+
|
| 536 |
+
Returns:
|
| 537 |
+
Rotations given as a vector in axis angle form, as a tensor
|
| 538 |
+
of shape (..., 3), where the magnitude is the angle
|
| 539 |
+
turned anticlockwise in radians around the vector's
|
| 540 |
+
direction.
|
| 541 |
+
"""
|
| 542 |
+
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
|
| 543 |
+
half_angles = torch.atan2(norms, quaternions[..., :1])
|
| 544 |
+
angles = 2 * half_angles
|
| 545 |
+
eps = 1e-6
|
| 546 |
+
small_angles = angles.abs() < eps
|
| 547 |
+
sin_half_angles_over_angles = torch.empty_like(angles)
|
| 548 |
+
sin_half_angles_over_angles[~small_angles] = (
|
| 549 |
+
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
|
| 550 |
+
)
|
| 551 |
+
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
|
| 552 |
+
# so sin(x/2)/x is about 1/2 - (x*x)/48
|
| 553 |
+
sin_half_angles_over_angles[small_angles] = (
|
| 554 |
+
0.5 - (angles[small_angles] * angles[small_angles]) / 48
|
| 555 |
+
)
|
| 556 |
+
return quaternions[..., 1:] / sin_half_angles_over_angles
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
|
| 560 |
+
"""
|
| 561 |
+
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
|
| 562 |
+
using Gram--Schmidt orthogonalization per Section B of [1].
|
| 563 |
+
Args:
|
| 564 |
+
d6: 6D rotation representation, of size (*, 6)
|
| 565 |
+
|
| 566 |
+
Returns:
|
| 567 |
+
batch of rotation matrices of size (*, 3, 3)
|
| 568 |
+
|
| 569 |
+
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
|
| 570 |
+
On the Continuity of Rotation Representations in Neural Networks.
|
| 571 |
+
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
|
| 572 |
+
Retrieved from http://arxiv.org/abs/1812.07035
|
| 573 |
+
"""
|
| 574 |
+
|
| 575 |
+
a1, a2 = d6[..., :3], d6[..., 3:]
|
| 576 |
+
b1 = F.normalize(a1, dim=-1)
|
| 577 |
+
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
|
| 578 |
+
b2 = F.normalize(b2, dim=-1)
|
| 579 |
+
b3 = torch.cross(b1, b2, dim=-1)
|
| 580 |
+
return torch.stack((b1, b2, b3), dim=-2)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
|
| 584 |
+
"""
|
| 585 |
+
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
|
| 586 |
+
by dropping the last row. Note that 6D representation is not unique.
|
| 587 |
+
Args:
|
| 588 |
+
matrix: batch of rotation matrices of size (*, 3, 3)
|
| 589 |
+
|
| 590 |
+
Returns:
|
| 591 |
+
6D rotation representation, of size (*, 6)
|
| 592 |
+
|
| 593 |
+
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
|
| 594 |
+
On the Continuity of Rotation Representations in Neural Networks.
|
| 595 |
+
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
|
| 596 |
+
Retrieved from http://arxiv.org/abs/1812.07035
|
| 597 |
+
"""
|
| 598 |
+
batch_dim = matrix.size()[:-2]
|
| 599 |
+
return matrix[..., :2, :].clone().reshape(batch_dim + (6,))
|
| 600 |
+
|
| 601 |
+
def euler_to_quaternion(euler_angles: torch.Tensor, convention: str = "ZYX") -> torch.Tensor:
|
| 602 |
+
"""
|
| 603 |
+
Convert rotations given as Euler angles in radians to quaternions.
|
| 604 |
+
|
| 605 |
+
Args:
|
| 606 |
+
euler_angles: Euler angles in radians as a tensor of shape (..., 3).
|
| 607 |
+
convention: Convention string of three uppercase letters from {"X", "Y", "Z"}.
|
| 608 |
+
Specifies the order of rotations (e.g., "ZYX" for yaw-pitch-roll).
|
| 609 |
+
Defaults to "ZYX".
|
| 610 |
+
|
| 611 |
+
Returns:
|
| 612 |
+
Quaternions with real part first, as a tensor of shape (..., 4).
|
| 613 |
+
"""
|
| 614 |
+
rotation_matrix = euler_angles_to_matrix(euler_angles, convention)
|
| 615 |
+
return matrix_to_quaternion(rotation_matrix)
|
| 616 |
+
|
| 617 |
+
def quaternion_to_euler(quaternions: torch.Tensor, convention: str = "ZYX") -> torch.Tensor:
|
| 618 |
+
"""
|
| 619 |
+
Convert rotations given as quaternions to Euler angles in radians.
|
| 620 |
+
|
| 621 |
+
Args:
|
| 622 |
+
quaternions: Quaternions with real part first, as tensor of shape (..., 4).
|
| 623 |
+
convention: Convention string of three uppercase letters from {"X", "Y", "Z"}.
|
| 624 |
+
Specifies the order of rotations (e.g., "ZYX" for yaw-pitch-roll).
|
| 625 |
+
Defaults to "ZYX".
|
| 626 |
+
|
| 627 |
+
Returns:
|
| 628 |
+
Euler angles in radians as tensor of shape (..., 3).
|
| 629 |
+
"""
|
| 630 |
+
# First, convert the quaternion to a rotation matrix.
|
| 631 |
+
rotation_matrix = quaternion_to_matrix(quaternions)
|
| 632 |
+
# Next, convert the rotation matrix to Euler angles using the specified convention.
|
| 633 |
+
return matrix_to_euler_angles(rotation_matrix, convention)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/geometry/trimesh_utils.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
## TODO clean up the code here, too many functions that are plurals of one or the other and confusing naming
|
| 4 |
+
import numpy as np
|
| 5 |
+
import sapien
|
| 6 |
+
import sapien.physx as physx
|
| 7 |
+
import sapien.render
|
| 8 |
+
import trimesh
|
| 9 |
+
import trimesh.creation
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_component_meshes(component: physx.PhysxRigidBaseComponent):
|
| 13 |
+
"""Get component (collision) meshes in the component's frame."""
|
| 14 |
+
meshes = []
|
| 15 |
+
for geom in component.get_collision_shapes():
|
| 16 |
+
if isinstance(geom, physx.PhysxCollisionShapeBox):
|
| 17 |
+
mesh = trimesh.creation.box(extents=2 * geom.half_size)
|
| 18 |
+
elif isinstance(geom, physx.PhysxCollisionShapeCapsule):
|
| 19 |
+
mesh = trimesh.creation.capsule(
|
| 20 |
+
height=2 * geom.half_length, radius=geom.radius
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
elif isinstance(geom, physx.PhysxCollisionShapeCylinder):
|
| 24 |
+
mesh = trimesh.creation.cylinder(
|
| 25 |
+
radius=geom.radius, height=2 * geom.half_length
|
| 26 |
+
)
|
| 27 |
+
elif isinstance(geom, physx.PhysxCollisionShapeSphere):
|
| 28 |
+
mesh = trimesh.creation.icosphere(radius=geom.radius)
|
| 29 |
+
elif isinstance(geom, physx.PhysxCollisionShapePlane):
|
| 30 |
+
continue
|
| 31 |
+
elif isinstance(geom, (physx.PhysxCollisionShapeConvexMesh)):
|
| 32 |
+
vertices = geom.vertices # [n, 3]
|
| 33 |
+
faces = geom.get_triangles()
|
| 34 |
+
vertices = vertices * geom.scale
|
| 35 |
+
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
|
| 36 |
+
elif isinstance(geom, physx.PhysxCollisionShapeTriangleMesh):
|
| 37 |
+
vertices = geom.vertices
|
| 38 |
+
faces = geom.get_triangles()
|
| 39 |
+
vertices = vertices * geom.scale
|
| 40 |
+
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
|
| 41 |
+
else:
|
| 42 |
+
raise TypeError(type(geom))
|
| 43 |
+
mesh.apply_transform(geom.get_local_pose().to_transformation_matrix())
|
| 44 |
+
meshes.append(mesh)
|
| 45 |
+
return meshes
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_render_body_meshes(visual_body: sapien.render.RenderBodyComponent):
|
| 49 |
+
meshes = []
|
| 50 |
+
for render_shape in visual_body.render_shapes:
|
| 51 |
+
meshes += get_render_shape_meshes(render_shape)
|
| 52 |
+
return meshes
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def get_render_shape_meshes(render_shape: sapien.render.RenderShape):
|
| 56 |
+
meshes = []
|
| 57 |
+
if type(render_shape) == sapien.render.RenderShapeTriangleMesh:
|
| 58 |
+
for part in render_shape.parts:
|
| 59 |
+
vertices = part.vertices * render_shape.scale # [n, 3]
|
| 60 |
+
faces = part.triangles
|
| 61 |
+
# faces = render_shape.mesh.indices.reshape(-1, 3) # [m * 3]
|
| 62 |
+
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
|
| 63 |
+
mesh.apply_transform(render_shape.local_pose.to_transformation_matrix())
|
| 64 |
+
meshes.append(mesh)
|
| 65 |
+
return meshes
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_actor_visual_meshes(actor: sapien.Entity):
|
| 69 |
+
"""Get actor (visual) meshes in the actor frame."""
|
| 70 |
+
meshes = []
|
| 71 |
+
comp = actor.find_component_by_type(sapien.render.RenderBodyComponent)
|
| 72 |
+
if comp is not None:
|
| 73 |
+
meshes.extend(get_render_body_meshes(comp))
|
| 74 |
+
return meshes
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def merge_meshes(meshes: List[trimesh.Trimesh]):
|
| 78 |
+
n, vs, fs = 0, [], []
|
| 79 |
+
for mesh in meshes:
|
| 80 |
+
v, f = mesh.vertices, mesh.faces
|
| 81 |
+
vs.append(v)
|
| 82 |
+
fs.append(f + n)
|
| 83 |
+
n = n + v.shape[0]
|
| 84 |
+
if n:
|
| 85 |
+
return trimesh.Trimesh(np.vstack(vs), np.vstack(fs))
|
| 86 |
+
else:
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_component_mesh(component: physx.PhysxRigidBaseComponent, to_world_frame=True):
|
| 91 |
+
mesh = merge_meshes(get_component_meshes(component))
|
| 92 |
+
if mesh is None:
|
| 93 |
+
return None
|
| 94 |
+
if to_world_frame:
|
| 95 |
+
T = component.pose.to_transformation_matrix()
|
| 96 |
+
mesh.apply_transform(T)
|
| 97 |
+
return mesh
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def get_actor_visual_mesh(actor: sapien.Entity):
|
| 101 |
+
mesh = merge_meshes(get_actor_visual_meshes(actor))
|
| 102 |
+
if mesh is None:
|
| 103 |
+
return None
|
| 104 |
+
return mesh
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def get_articulation_meshes(
|
| 108 |
+
articulation: physx.PhysxArticulation, exclude_link_names=()
|
| 109 |
+
):
|
| 110 |
+
"""Get link meshes in the world frame."""
|
| 111 |
+
meshes = []
|
| 112 |
+
for link in articulation.get_links():
|
| 113 |
+
if link.name in exclude_link_names:
|
| 114 |
+
continue
|
| 115 |
+
mesh = get_component_mesh(link, True)
|
| 116 |
+
if mesh is None:
|
| 117 |
+
continue
|
| 118 |
+
meshes.append(mesh)
|
| 119 |
+
return meshes
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .scene_builder import SceneBuilder
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (267 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/registration.cpython-310.pyc
ADDED
|
Binary file (1.82 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/__pycache__/scene_builder.cpython-310.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/ai2thor/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .variants import (
|
| 2 |
+
ArchitecTHORSceneBuilder,
|
| 3 |
+
ProcTHORSceneBuilder,
|
| 4 |
+
RoboTHORSceneBuilder,
|
| 5 |
+
iTHORSceneBuilder,
|
| 6 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/utils/scene_builder/ai2thor/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (372 Bytes). View file
|
|
|