Add files using upload-large-folder tool
Browse files- tactile_tasks/collect_data.py +89 -24
- tactile_tasks/collect_keyboard.py +478 -0
- tactile_tasks/envs/cup_retrieval.py +277 -0
- tactile_tasks/envs/fragile_transport.py +285 -0
- tactile_tasks/envs/peg_insertion.py +1 -7
- tactile_tasks/envs/pen_insertion.py +309 -0
- tactile_tasks/eval_act.py +208 -0
- tactile_tasks/eval_dp.py +226 -0
tactile_tasks/collect_data.py
CHANGED
|
@@ -8,7 +8,7 @@ import os
|
|
| 8 |
import sys
|
| 9 |
import argparse
|
| 10 |
from datetime import datetime
|
| 11 |
-
|
| 12 |
import numpy as np
|
| 13 |
import h5py
|
| 14 |
|
|
@@ -198,7 +198,7 @@ def run_peg_insertion(env, sensor, recorder):
|
|
| 198 |
grasp_steps=30,
|
| 199 |
retry_dz=-0.012,
|
| 200 |
)
|
| 201 |
-
|
| 202 |
# ── Phase 2: Lift high ──────────────────────────────────────────────────
|
| 203 |
lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.22
|
| 204 |
recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
|
|
@@ -207,29 +207,29 @@ def run_peg_insertion(env, sensor, recorder):
|
|
| 207 |
# ── Phase 3: Move to above hole ────────────────────────────────────────
|
| 208 |
hole_pos = env.sim.data.body_xpos[env.hole_body_id].copy()
|
| 209 |
hole_xy = hole_pos[:2].copy()
|
| 210 |
-
|
|
|
|
| 211 |
above_hole = np.array([hole_xy[0], hole_xy[1], get_eef_pos(env)[2]])
|
| 212 |
recorder.run_until(env, sensor,
|
| 213 |
lambda: move_action(env, above_hole, gain=4.0, gripper=1.0),
|
| 214 |
done_fn=lambda: at_target(env, above_hole, 0.01), max_steps=120)
|
| 215 |
|
| 216 |
# ── Phase 4: Position align + gradual orientation correction ────────────
|
| 217 |
-
# First: precise xy alignment (no ori)
|
| 218 |
align_object_to_xy(
|
| 219 |
env, sensor, recorder,
|
| 220 |
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
|
| 221 |
-
target_xy=
|
| 222 |
-
gripper=1.0, xy_tol=0.
|
| 223 |
)
|
| 224 |
|
| 225 |
-
# Gradual ori ramp
|
| 226 |
for step in range(200):
|
| 227 |
if recorder.done:
|
| 228 |
break
|
| 229 |
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
|
| 230 |
eef = get_eef_pos(env)
|
| 231 |
target = eef.copy()
|
| 232 |
-
target[:2] += (
|
| 233 |
og = min(1.5, step / 150.0 * 1.5)
|
| 234 |
action = move_action(env, target, gain=10.0, gripper=1.0, ori_gain=og)
|
| 235 |
recorder.step(env, sensor, action)
|
|
@@ -238,7 +238,6 @@ def run_peg_insertion(env, sensor, recorder):
|
|
| 238 |
eef_settled = get_eef_pos(env).copy()
|
| 239 |
peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
|
| 240 |
eef_to_peg = eef_settled - peg_settled
|
| 241 |
-
|
| 242 |
# ── Phase 5: Descend to just above hole ─────────────────────────────────
|
| 243 |
hole_top_z = hole_pos[2] + env.hole_height * 0.5
|
| 244 |
target_eef_z = hole_top_z + 0.005 + eef_to_peg[2] + env.peg_height
|
|
@@ -252,28 +251,63 @@ def run_peg_insertion(env, sensor, recorder):
|
|
| 252 |
max_steps=120, ori_gain=0.5,
|
| 253 |
)
|
| 254 |
|
| 255 |
-
|
| 256 |
-
|
|
|
|
|
|
|
|
|
|
| 257 |
if recorder.done:
|
| 258 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
|
|
|
|
|
|
|
| 260 |
xy_err = hole_xy - peg_xy
|
| 261 |
ori = get_ori_correction(env, ori_gain=0.5)
|
|
|
|
| 262 |
action = np.zeros(7)
|
| 263 |
-
action[:2] = np.clip(xy_err * 15.0, -1, 1)
|
| 264 |
-
action[2] = -0.15
|
| 265 |
action[3:6] = ori
|
| 266 |
-
action[6] = 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
recorder.step(env, sensor, action)
|
| 268 |
-
|
|
|
|
|
|
|
| 269 |
break
|
| 270 |
|
| 271 |
-
# ── Phase 7:
|
| 272 |
-
recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=
|
| 273 |
-
retreat = get_eef_pos(env).copy(); retreat[2] += 0.10
|
| 274 |
-
recorder.run_until(env, sensor, lambda: move_action(env, retreat, gain=4.0, gripper=-1),
|
| 275 |
-
done_fn=lambda: at_target(env, retreat, 0.01), max_steps=50)
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
|
| 278 |
def run_gentle_stack(env, sensor, recorder):
|
| 279 |
# ── Phase 1: Grasp box, measure real EEF→box offset ──────────────────────
|
|
@@ -343,7 +377,13 @@ def run_gentle_stack(env, sensor, recorder):
|
|
| 343 |
z_gain=3.0,
|
| 344 |
max_steps=120,
|
| 345 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
|
|
|
|
|
|
| 347 |
# ── Phase 5: Gentle final push until contact ──────────────────────────────
|
| 348 |
for _ in range(80):
|
| 349 |
if recorder.done:
|
|
@@ -474,33 +514,57 @@ TASK_CONFIGS = {
|
|
| 474 |
"precision_grasp": {
|
| 475 |
"env_class": "PrecisionGrasp",
|
| 476 |
"run_fn": run_precision_grasp,
|
| 477 |
-
"horizon":
|
| 478 |
"controller": "OSC_POSE",
|
| 479 |
},
|
| 480 |
"peg_insertion": {
|
| 481 |
"env_class": "PegInsertion",
|
| 482 |
"run_fn": run_peg_insertion,
|
| 483 |
-
"horizon":
|
| 484 |
"controller": "OSC_POSE",
|
| 485 |
},
|
| 486 |
"gentle_stack": {
|
| 487 |
"env_class": "GentleStack",
|
| 488 |
"run_fn": run_gentle_stack,
|
| 489 |
-
"horizon":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
"controller": "OSC_POSE",
|
| 491 |
},
|
| 492 |
}
|
| 493 |
|
| 494 |
|
| 495 |
-
def create_env(task_name, has_renderer=False, camera_names=None):
|
| 496 |
from tactile_tasks.envs.precision_grasp import PrecisionGrasp
|
| 497 |
from tactile_tasks.envs.peg_insertion import PegInsertion
|
| 498 |
from tactile_tasks.envs.gentle_stack import GentleStack
|
|
|
|
|
|
|
|
|
|
| 499 |
|
| 500 |
env_classes = {
|
| 501 |
"PrecisionGrasp": PrecisionGrasp,
|
| 502 |
"PegInsertion": PegInsertion,
|
| 503 |
"GentleStack": GentleStack,
|
|
|
|
|
|
|
|
|
|
| 504 |
}
|
| 505 |
|
| 506 |
config = TASK_CONFIGS[task_name]
|
|
@@ -540,6 +604,7 @@ def create_env(task_name, has_renderer=False, camera_names=None):
|
|
| 540 |
controller_configs=controller_configs,
|
| 541 |
has_renderer=has_renderer,
|
| 542 |
has_offscreen_renderer=True,
|
|
|
|
| 543 |
use_camera_obs=True,
|
| 544 |
use_object_obs=True,
|
| 545 |
control_freq=20,
|
|
|
|
| 8 |
import sys
|
| 9 |
import argparse
|
| 10 |
from datetime import datetime
|
| 11 |
+
import math
|
| 12 |
import numpy as np
|
| 13 |
import h5py
|
| 14 |
|
|
|
|
| 198 |
grasp_steps=30,
|
| 199 |
retry_dz=-0.012,
|
| 200 |
)
|
| 201 |
+
# grasp_object结束后
|
| 202 |
# ── Phase 2: Lift high ──────────────────────────────────────────────────
|
| 203 |
lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.22
|
| 204 |
recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
|
|
|
|
| 207 |
# ── Phase 3: Move to above hole ────────────────────────────────────────
|
| 208 |
hole_pos = env.sim.data.body_xpos[env.hole_body_id].copy()
|
| 209 |
hole_xy = hole_pos[:2].copy()
|
| 210 |
+
hole_xy_target = hole_xy.copy()
|
| 211 |
+
hole_xy_target[1] += eef_to_peg[1]
|
| 212 |
above_hole = np.array([hole_xy[0], hole_xy[1], get_eef_pos(env)[2]])
|
| 213 |
recorder.run_until(env, sensor,
|
| 214 |
lambda: move_action(env, above_hole, gain=4.0, gripper=1.0),
|
| 215 |
done_fn=lambda: at_target(env, above_hole, 0.01), max_steps=120)
|
| 216 |
|
| 217 |
# ── Phase 4: Position align + gradual orientation correction ────────────
|
|
|
|
| 218 |
align_object_to_xy(
|
| 219 |
env, sensor, recorder,
|
| 220 |
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
|
| 221 |
+
target_xy=hole_xy_target,
|
| 222 |
+
gripper=1.0, xy_tol=0.001, max_steps=500, gain=15.0, ori_gain=0.0,
|
| 223 |
)
|
| 224 |
|
| 225 |
+
# Gradual ori ramp — use hole_xy_target throughout for consistency
|
| 226 |
for step in range(200):
|
| 227 |
if recorder.done:
|
| 228 |
break
|
| 229 |
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
|
| 230 |
eef = get_eef_pos(env)
|
| 231 |
target = eef.copy()
|
| 232 |
+
target[:2] += (hole_xy_target - peg_xy) * 3.0
|
| 233 |
og = min(1.5, step / 150.0 * 1.5)
|
| 234 |
action = move_action(env, target, gain=10.0, gripper=1.0, ori_gain=og)
|
| 235 |
recorder.step(env, sensor, action)
|
|
|
|
| 238 |
eef_settled = get_eef_pos(env).copy()
|
| 239 |
peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
|
| 240 |
eef_to_peg = eef_settled - peg_settled
|
|
|
|
| 241 |
# ── Phase 5: Descend to just above hole ─────────────────────────────────
|
| 242 |
hole_top_z = hole_pos[2] + env.hole_height * 0.5
|
| 243 |
target_eef_z = hole_top_z + 0.005 + eef_to_peg[2] + env.peg_height
|
|
|
|
| 251 |
max_steps=120, ori_gain=0.5,
|
| 252 |
)
|
| 253 |
|
| 254 |
+
search_radius = 0.002
|
| 255 |
+
search_angle = 0.0
|
| 256 |
+
stuck_steps = 0
|
| 257 |
+
|
| 258 |
+
for step in range(400):
|
| 259 |
if recorder.done:
|
| 260 |
break
|
| 261 |
+
|
| 262 |
+
mags = sensor.get_force_magnitudes()
|
| 263 |
+
total_force = mags["left_finger"].mean() + mags["right_finger"].mean()
|
| 264 |
+
|
| 265 |
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
|
| 266 |
+
peg_z = env.sim.data.body_xpos[env.peg_body_id][2]
|
| 267 |
+
hole_z = env.sim.data.body_xpos[env.hole_body_id][2]
|
| 268 |
xy_err = hole_xy - peg_xy
|
| 269 |
ori = get_ori_correction(env, ori_gain=0.5)
|
| 270 |
+
|
| 271 |
action = np.zeros(7)
|
|
|
|
|
|
|
| 272 |
action[3:6] = ori
|
| 273 |
+
action[6] = 1.0 # 保持抓住
|
| 274 |
+
|
| 275 |
+
if total_force > 0.8:
|
| 276 |
+
stuck_steps += 1
|
| 277 |
+
search_angle += 0.4
|
| 278 |
+
sx = search_radius * math.cos(search_angle)
|
| 279 |
+
sy = search_radius * math.sin(search_angle)
|
| 280 |
+
action[0] = np.clip(xy_err[0] * 15.0 + sx * 20.0, -1, 1)
|
| 281 |
+
action[1] = np.clip(xy_err[1] * 15.0 + sy * 20.0, -1, 1)
|
| 282 |
+
action[2] = +0.05
|
| 283 |
+
if stuck_steps > 20:
|
| 284 |
+
search_radius = min(0.003, search_radius + 0.0002)
|
| 285 |
+
else:
|
| 286 |
+
stuck_steps = 0
|
| 287 |
+
action[0] = np.clip(xy_err[0] * 15.0, -1, 1)
|
| 288 |
+
action[1] = np.clip(xy_err[1] * 15.0, -1, 1)
|
| 289 |
+
action[2] = -0.08
|
| 290 |
+
|
| 291 |
recorder.step(env, sensor, action)
|
| 292 |
+
|
| 293 |
+
# peg插入足够深再停,不依赖_check_success
|
| 294 |
+
if peg_z < hole_z - env.peg_height * 0.3:
|
| 295 |
break
|
| 296 |
|
| 297 |
+
# ── Phase 7: 松手,等peg稳定,再退出 ────────────────────────────────────
|
| 298 |
+
recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=20)
|
|
|
|
|
|
|
|
|
|
| 299 |
|
| 300 |
+
# 等peg自然稳定
|
| 301 |
+
eef_hold = get_eef_pos(env).copy()
|
| 302 |
+
recorder.run_for(env, sensor,
|
| 303 |
+
lambda: move_action(env, eef_hold, gain=5.0, gripper=-1),
|
| 304 |
+
steps=30)
|
| 305 |
+
|
| 306 |
+
retreat = get_eef_pos(env).copy(); retreat[2] += 0.10
|
| 307 |
+
recorder.run_until(env, sensor,
|
| 308 |
+
lambda: move_action(env, retreat, gain=4.0, gripper=-1),
|
| 309 |
+
done_fn=lambda: at_target(env, retreat, 0.01),
|
| 310 |
+
max_steps=50)
|
| 311 |
|
| 312 |
def run_gentle_stack(env, sensor, recorder):
|
| 313 |
# ── Phase 1: Grasp box, measure real EEF→box offset ──────────────────────
|
|
|
|
| 377 |
z_gain=3.0,
|
| 378 |
max_steps=120,
|
| 379 |
)
|
| 380 |
+
# Re-measure offset with final orientation
|
| 381 |
+
eef_settled = get_eef_pos(env).copy()
|
| 382 |
+
peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
|
| 383 |
+
eef_to_peg = eef_settled - peg_settled
|
| 384 |
|
| 385 |
+
# 加这行
|
| 386 |
+
print(f"对齐后误差: {hole_xy - env.sim.data.body_xpos[env.peg_body_id][:2]}")
|
| 387 |
# ── Phase 5: Gentle final push until contact ──────────────────────────────
|
| 388 |
for _ in range(80):
|
| 389 |
if recorder.done:
|
|
|
|
| 514 |
"precision_grasp": {
|
| 515 |
"env_class": "PrecisionGrasp",
|
| 516 |
"run_fn": run_precision_grasp,
|
| 517 |
+
"horizon": 3000,
|
| 518 |
"controller": "OSC_POSE",
|
| 519 |
},
|
| 520 |
"peg_insertion": {
|
| 521 |
"env_class": "PegInsertion",
|
| 522 |
"run_fn": run_peg_insertion,
|
| 523 |
+
"horizon": 5000,
|
| 524 |
"controller": "OSC_POSE",
|
| 525 |
},
|
| 526 |
"gentle_stack": {
|
| 527 |
"env_class": "GentleStack",
|
| 528 |
"run_fn": run_gentle_stack,
|
| 529 |
+
"horizon": 5000,
|
| 530 |
+
"controller": "OSC_POSE",
|
| 531 |
+
},
|
| 532 |
+
"fragile_transport": {
|
| 533 |
+
"env_class": "FragileTransport",
|
| 534 |
+
"run_fn": None, # keyboard-only collection
|
| 535 |
+
"horizon": 5000,
|
| 536 |
+
"controller": "OSC_POSE",
|
| 537 |
+
},
|
| 538 |
+
"cup_retrieval": {
|
| 539 |
+
"env_class": "CupRetrieval",
|
| 540 |
+
"run_fn": None,
|
| 541 |
+
"horizon": 5000,
|
| 542 |
+
"controller": "OSC_POSE",
|
| 543 |
+
},
|
| 544 |
+
"pen_insertion": {
|
| 545 |
+
"env_class": "PenInsertion",
|
| 546 |
+
"run_fn": None,
|
| 547 |
+
"horizon": 5000,
|
| 548 |
"controller": "OSC_POSE",
|
| 549 |
},
|
| 550 |
}
|
| 551 |
|
| 552 |
|
| 553 |
+
def create_env(task_name, has_renderer=False, camera_names=None, render_camera="agentview"):
|
| 554 |
from tactile_tasks.envs.precision_grasp import PrecisionGrasp
|
| 555 |
from tactile_tasks.envs.peg_insertion import PegInsertion
|
| 556 |
from tactile_tasks.envs.gentle_stack import GentleStack
|
| 557 |
+
from tactile_tasks.envs.fragile_transport import FragileTransport
|
| 558 |
+
from tactile_tasks.envs.cup_retrieval import CupRetrieval
|
| 559 |
+
from tactile_tasks.envs.pen_insertion import PenInsertion
|
| 560 |
|
| 561 |
env_classes = {
|
| 562 |
"PrecisionGrasp": PrecisionGrasp,
|
| 563 |
"PegInsertion": PegInsertion,
|
| 564 |
"GentleStack": GentleStack,
|
| 565 |
+
"FragileTransport": FragileTransport,
|
| 566 |
+
"CupRetrieval": CupRetrieval,
|
| 567 |
+
"PenInsertion": PenInsertion,
|
| 568 |
}
|
| 569 |
|
| 570 |
config = TASK_CONFIGS[task_name]
|
|
|
|
| 604 |
controller_configs=controller_configs,
|
| 605 |
has_renderer=has_renderer,
|
| 606 |
has_offscreen_renderer=True,
|
| 607 |
+
render_camera=render_camera,
|
| 608 |
use_camera_obs=True,
|
| 609 |
use_object_obs=True,
|
| 610 |
control_freq=20,
|
tactile_tasks/collect_keyboard.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Keyboard teleoperation with smooth target-tracking control.
|
| 4 |
+
|
| 5 |
+
Instead of raw delta commands (jerky, hard to control), this maintains a
|
| 6 |
+
"target EEF position" that the robot smoothly tracks via proportional control.
|
| 7 |
+
Keyboard inputs move the target, not the robot directly.
|
| 8 |
+
|
| 9 |
+
Controls:
|
| 10 |
+
Arrow keys : move target in x-y plane (3mm per tap)
|
| 11 |
+
. / ; : move target down / up
|
| 12 |
+
o / p : rotate yaw
|
| 13 |
+
y / h : rotate pitch
|
| 14 |
+
e / r : rotate roll
|
| 15 |
+
Space : toggle gripper open/close
|
| 16 |
+
1 : snap target to object (nearest graspable body)
|
| 17 |
+
2 : lift target +10cm
|
| 18 |
+
3 : descend target 3cm
|
| 19 |
+
Enter : finish episode (save)
|
| 20 |
+
Ctrl+q / Esc : discard episode and reset
|
| 21 |
+
+/- : increase/decrease step size
|
| 22 |
+
|
| 23 |
+
Usage:
|
| 24 |
+
python tactile_tasks/collect_keyboard.py --task fragile_transport --num_episodes 1 --save_all
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
import os
|
| 28 |
+
import sys
|
| 29 |
+
import argparse
|
| 30 |
+
import time
|
| 31 |
+
import threading
|
| 32 |
+
|
| 33 |
+
import numpy as np
|
| 34 |
+
import cv2
|
| 35 |
+
import mujoco
|
| 36 |
+
from pynput.keyboard import Key, Listener
|
| 37 |
+
|
| 38 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 39 |
+
|
| 40 |
+
from tactile_tasks.collect_data import (
|
| 41 |
+
create_env, TASK_CONFIGS, EpisodeRecorder, save_episode_hdf5,
|
| 42 |
+
)
|
| 43 |
+
from tactile_tasks.uskin_sensor import USkinSensor
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class SmoothKeyboardController:
|
| 47 |
+
"""
|
| 48 |
+
Target-tracking keyboard controller.
|
| 49 |
+
|
| 50 |
+
Maintains a target EEF pose. Keyboard inputs move the target;
|
| 51 |
+
a proportional controller generates smooth OSC_POSE actions.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, env, pos_step=0.015, rot_step=0.08, pos_gain=15.0, ori_gain=3.0):
|
| 55 |
+
self.env = env
|
| 56 |
+
self.pos_step = pos_step # meters per keypress
|
| 57 |
+
self.rot_step = rot_step # radians per keypress
|
| 58 |
+
self.pos_gain = pos_gain
|
| 59 |
+
self.ori_gain = ori_gain
|
| 60 |
+
|
| 61 |
+
# State
|
| 62 |
+
self.target_pos = self._get_eef_pos()
|
| 63 |
+
self.gripper_action = -1.0 # -1=open, 1=closed
|
| 64 |
+
self.ori_delta = np.zeros(3) # accumulated orientation delta
|
| 65 |
+
self._reset_flag = False
|
| 66 |
+
self._save_flag = False
|
| 67 |
+
self._quit_flag = False
|
| 68 |
+
self._started = False # first movement detected
|
| 69 |
+
|
| 70 |
+
# Key state tracking for held keys
|
| 71 |
+
self._held_keys = set()
|
| 72 |
+
self._lock = threading.Lock()
|
| 73 |
+
|
| 74 |
+
# Start listener
|
| 75 |
+
self.listener = Listener(on_press=self._on_press, on_release=self._on_release)
|
| 76 |
+
self.listener.start()
|
| 77 |
+
|
| 78 |
+
def _get_eef_pos(self):
|
| 79 |
+
return self.env.sim.data.site_xpos[
|
| 80 |
+
self.env.robots[0].eef_site_id["right"]
|
| 81 |
+
].copy()
|
| 82 |
+
|
| 83 |
+
def _get_eef_mat(self):
|
| 84 |
+
return self.env.sim.data.site_xmat[
|
| 85 |
+
self.env.robots[0].eef_site_id["right"]
|
| 86 |
+
].reshape(3, 3).copy()
|
| 87 |
+
|
| 88 |
+
def _get_ori_correction(self):
|
| 89 |
+
"""Compute orientation delta to maintain vertical (z down)."""
|
| 90 |
+
z_axis = self._get_eef_mat()[:, 2]
|
| 91 |
+
target_z = np.array([0.0, 0.0, -1.0])
|
| 92 |
+
cross = np.cross(z_axis, target_z)
|
| 93 |
+
sin_a = np.linalg.norm(cross)
|
| 94 |
+
if sin_a < 1e-6:
|
| 95 |
+
return np.zeros(3)
|
| 96 |
+
axis = cross / sin_a
|
| 97 |
+
cos_a = np.dot(z_axis, target_z)
|
| 98 |
+
angle = np.arctan2(sin_a, cos_a)
|
| 99 |
+
return axis * angle
|
| 100 |
+
|
| 101 |
+
def _find_nearest_object(self):
|
| 102 |
+
"""Find nearest object body position for snap-to functionality."""
|
| 103 |
+
eef = self._get_eef_pos()
|
| 104 |
+
best_pos = None
|
| 105 |
+
best_dist = float('inf')
|
| 106 |
+
|
| 107 |
+
# Look for known object body IDs on the env
|
| 108 |
+
for attr in dir(self.env):
|
| 109 |
+
if attr.endswith('_body_id') and attr not in ('_body_id',):
|
| 110 |
+
try:
|
| 111 |
+
bid = getattr(self.env, attr)
|
| 112 |
+
if isinstance(bid, (int, np.integer)):
|
| 113 |
+
pos = self.env.sim.data.body_xpos[bid].copy()
|
| 114 |
+
d = np.linalg.norm(pos - eef)
|
| 115 |
+
if d < best_dist:
|
| 116 |
+
best_dist = d
|
| 117 |
+
best_pos = pos
|
| 118 |
+
except:
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
return best_pos
|
| 122 |
+
|
| 123 |
+
def _on_press(self, key):
|
| 124 |
+
with self._lock:
|
| 125 |
+
try:
|
| 126 |
+
if key == Key.up:
|
| 127 |
+
self._held_keys.add('up')
|
| 128 |
+
elif key == Key.down:
|
| 129 |
+
self._held_keys.add('down')
|
| 130 |
+
elif key == Key.left:
|
| 131 |
+
self._held_keys.add('left')
|
| 132 |
+
elif key == Key.right:
|
| 133 |
+
self._held_keys.add('right')
|
| 134 |
+
elif hasattr(key, 'char'):
|
| 135 |
+
if key.char == '.':
|
| 136 |
+
self._held_keys.add('z_down')
|
| 137 |
+
elif key.char == ';':
|
| 138 |
+
self._held_keys.add('z_up')
|
| 139 |
+
elif key.char == 'o':
|
| 140 |
+
self._held_keys.add('yaw_left')
|
| 141 |
+
elif key.char == 'p':
|
| 142 |
+
self._held_keys.add('yaw_right')
|
| 143 |
+
elif key.char == 'y':
|
| 144 |
+
self._held_keys.add('pitch_up')
|
| 145 |
+
elif key.char == 'h':
|
| 146 |
+
self._held_keys.add('pitch_down')
|
| 147 |
+
elif key.char == 'e':
|
| 148 |
+
self._held_keys.add('roll_left')
|
| 149 |
+
elif key.char == 'r':
|
| 150 |
+
self._held_keys.add('roll_right')
|
| 151 |
+
except:
|
| 152 |
+
pass
|
| 153 |
+
|
| 154 |
+
def _on_release(self, key):
|
| 155 |
+
with self._lock:
|
| 156 |
+
try:
|
| 157 |
+
if key == Key.up:
|
| 158 |
+
self._held_keys.discard('up')
|
| 159 |
+
elif key == Key.down:
|
| 160 |
+
self._held_keys.discard('down')
|
| 161 |
+
elif key == Key.left:
|
| 162 |
+
self._held_keys.discard('left')
|
| 163 |
+
elif key == Key.right:
|
| 164 |
+
self._held_keys.discard('right')
|
| 165 |
+
elif key == Key.space:
|
| 166 |
+
self.gripper_action *= -1.0 # toggle
|
| 167 |
+
self._started = True
|
| 168 |
+
elif key == Key.enter:
|
| 169 |
+
self._save_flag = True
|
| 170 |
+
elif key == Key.esc:
|
| 171 |
+
self._quit_flag = True
|
| 172 |
+
elif hasattr(key, 'char'):
|
| 173 |
+
if key.char == '.':
|
| 174 |
+
self._held_keys.discard('z_down')
|
| 175 |
+
elif key.char == ';':
|
| 176 |
+
self._held_keys.discard('z_up')
|
| 177 |
+
elif key.char == 'o':
|
| 178 |
+
self._held_keys.discard('yaw_left')
|
| 179 |
+
elif key.char == 'p':
|
| 180 |
+
self._held_keys.discard('yaw_right')
|
| 181 |
+
elif key.char == 'y':
|
| 182 |
+
self._held_keys.discard('pitch_up')
|
| 183 |
+
elif key.char == 'h':
|
| 184 |
+
self._held_keys.discard('pitch_down')
|
| 185 |
+
elif key.char == 'e':
|
| 186 |
+
self._held_keys.discard('roll_left')
|
| 187 |
+
elif key.char == 'r':
|
| 188 |
+
self._held_keys.discard('roll_right')
|
| 189 |
+
elif key.char == 'q':
|
| 190 |
+
self._reset_flag = True
|
| 191 |
+
elif key.char == '1':
|
| 192 |
+
# Snap to nearest object
|
| 193 |
+
obj_pos = self._find_nearest_object()
|
| 194 |
+
if obj_pos is not None:
|
| 195 |
+
above = obj_pos.copy()
|
| 196 |
+
above[2] += 0.06 # 6cm above object
|
| 197 |
+
self.target_pos = above
|
| 198 |
+
self._started = True
|
| 199 |
+
print(f" -> Snap to object at {obj_pos[:2].round(3)}")
|
| 200 |
+
elif key.char == '2':
|
| 201 |
+
# Lift
|
| 202 |
+
self.target_pos[2] += 0.10
|
| 203 |
+
self._started = True
|
| 204 |
+
print(" -> Lift +10cm")
|
| 205 |
+
elif key.char == '3':
|
| 206 |
+
# Descend
|
| 207 |
+
self.target_pos[2] -= 0.03
|
| 208 |
+
self._started = True
|
| 209 |
+
print(" -> Descend -3cm")
|
| 210 |
+
elif key.char == '+' or key.char == '=':
|
| 211 |
+
self.pos_step = min(0.01, self.pos_step * 1.5)
|
| 212 |
+
print(f" -> Step size: {self.pos_step*1000:.1f}mm")
|
| 213 |
+
elif key.char == '-':
|
| 214 |
+
self.pos_step = max(0.001, self.pos_step / 1.5)
|
| 215 |
+
print(f" -> Step size: {self.pos_step*1000:.1f}mm")
|
| 216 |
+
except:
|
| 217 |
+
pass
|
| 218 |
+
|
| 219 |
+
def process_held_keys(self):
|
| 220 |
+
"""Process continuously held keys - called each control step."""
|
| 221 |
+
with self._lock:
|
| 222 |
+
moved = False
|
| 223 |
+
for k in self._held_keys:
|
| 224 |
+
if k == 'up':
|
| 225 |
+
self.target_pos[0] -= self.pos_step
|
| 226 |
+
moved = True
|
| 227 |
+
elif k == 'down':
|
| 228 |
+
self.target_pos[0] += self.pos_step
|
| 229 |
+
moved = True
|
| 230 |
+
elif k == 'left':
|
| 231 |
+
self.target_pos[1] -= self.pos_step
|
| 232 |
+
moved = True
|
| 233 |
+
elif k == 'right':
|
| 234 |
+
self.target_pos[1] += self.pos_step
|
| 235 |
+
moved = True
|
| 236 |
+
elif k == 'z_down':
|
| 237 |
+
self.target_pos[2] -= self.pos_step
|
| 238 |
+
moved = True
|
| 239 |
+
elif k == 'z_up':
|
| 240 |
+
self.target_pos[2] += self.pos_step
|
| 241 |
+
moved = True
|
| 242 |
+
elif k == 'yaw_left':
|
| 243 |
+
self.ori_delta[2] -= self.rot_step
|
| 244 |
+
moved = True
|
| 245 |
+
elif k == 'yaw_right':
|
| 246 |
+
self.ori_delta[2] += self.rot_step
|
| 247 |
+
moved = True
|
| 248 |
+
elif k == 'pitch_up':
|
| 249 |
+
self.ori_delta[0] += self.rot_step
|
| 250 |
+
moved = True
|
| 251 |
+
elif k == 'pitch_down':
|
| 252 |
+
self.ori_delta[0] -= self.rot_step
|
| 253 |
+
moved = True
|
| 254 |
+
elif k == 'roll_left':
|
| 255 |
+
self.ori_delta[1] -= self.rot_step
|
| 256 |
+
moved = True
|
| 257 |
+
elif k == 'roll_right':
|
| 258 |
+
self.ori_delta[1] += self.rot_step
|
| 259 |
+
moved = True
|
| 260 |
+
|
| 261 |
+
if moved and not self._started:
|
| 262 |
+
self._started = True
|
| 263 |
+
|
| 264 |
+
def get_action(self):
|
| 265 |
+
"""Generate smooth OSC_POSE action tracking target pose."""
|
| 266 |
+
self.process_held_keys()
|
| 267 |
+
|
| 268 |
+
eef_pos = self._get_eef_pos()
|
| 269 |
+
pos_error = self.target_pos - eef_pos
|
| 270 |
+
|
| 271 |
+
# Proportional control with capping to avoid overshoot
|
| 272 |
+
pos_action = pos_error * self.pos_gain
|
| 273 |
+
output_max = 0.15
|
| 274 |
+
error_norm = np.linalg.norm(pos_error)
|
| 275 |
+
if error_norm > 0.001:
|
| 276 |
+
max_mag = 0.7 * error_norm / output_max
|
| 277 |
+
mag = np.linalg.norm(pos_action)
|
| 278 |
+
if mag > max_mag:
|
| 279 |
+
pos_action = pos_action * (max_mag / mag)
|
| 280 |
+
pos_action = np.clip(pos_action, -1.0, 1.0)
|
| 281 |
+
|
| 282 |
+
# Orientation: maintain vertical + apply user's rotation delta
|
| 283 |
+
ori_action = self._get_ori_correction() * self.ori_gain
|
| 284 |
+
ori_action += self.ori_delta
|
| 285 |
+
self.ori_delta *= 0.85 # decay rotation commands
|
| 286 |
+
ori_action = np.clip(ori_action, -1.0, 1.0)
|
| 287 |
+
|
| 288 |
+
return np.concatenate([pos_action, ori_action, [self.gripper_action]])
|
| 289 |
+
|
| 290 |
+
def reset_target(self):
|
| 291 |
+
"""Reset target to current EEF position."""
|
| 292 |
+
self.target_pos = self._get_eef_pos()
|
| 293 |
+
self.ori_delta = np.zeros(3)
|
| 294 |
+
self._reset_flag = False
|
| 295 |
+
self._save_flag = False
|
| 296 |
+
self._started = False
|
| 297 |
+
|
| 298 |
+
def stop(self):
|
| 299 |
+
self.listener.stop()
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def render_multi_view(env, cam_size=256):
|
| 303 |
+
"""Render multiple camera views tiled in one OpenCV window."""
|
| 304 |
+
cameras = ["agentview", "sideview", "robot0_eye_in_hand"]
|
| 305 |
+
imgs = []
|
| 306 |
+
for cam in cameras:
|
| 307 |
+
try:
|
| 308 |
+
img = env.sim.render(
|
| 309 |
+
camera_name=cam, height=cam_size, width=cam_size,
|
| 310 |
+
)[::-1] # flip vertically (MuJoCo convention)
|
| 311 |
+
img = np.ascontiguousarray(img[..., ::-1]) # RGB -> BGR for OpenCV
|
| 312 |
+
except Exception:
|
| 313 |
+
img = np.zeros((cam_size, cam_size, 3), dtype=np.uint8)
|
| 314 |
+
# Add camera label
|
| 315 |
+
cv2.putText(img, cam, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
| 316 |
+
imgs.append(img)
|
| 317 |
+
|
| 318 |
+
# Tile horizontally
|
| 319 |
+
tiled = np.concatenate(imgs, axis=1)
|
| 320 |
+
cv2.imshow("Multi-View", tiled)
|
| 321 |
+
cv2.waitKey(1)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def collect_episode(env, controller, sensor, max_steps, max_fr=20):
|
| 325 |
+
"""Collect one episode with smooth keyboard control."""
|
| 326 |
+
env.reset()
|
| 327 |
+
env.render()
|
| 328 |
+
|
| 329 |
+
# Let objects settle
|
| 330 |
+
for _ in range(50):
|
| 331 |
+
env.step(np.zeros(env.action_dim))
|
| 332 |
+
|
| 333 |
+
controller.reset_target()
|
| 334 |
+
recorder = EpisodeRecorder(env, sensor)
|
| 335 |
+
|
| 336 |
+
print(f"\n Controls: arrows=xy ;/.=z space=gripper 1=snap-to-obj 2=lift 3=descend")
|
| 337 |
+
print(f" Enter=save Ctrl+q=discard +/-=step size ({controller.pos_step*1000:.0f}mm)")
|
| 338 |
+
print(f" Waiting for first input... (max {max_steps} steps after start)")
|
| 339 |
+
|
| 340 |
+
status = "discard"
|
| 341 |
+
active_steps = 0
|
| 342 |
+
view_counter = 0
|
| 343 |
+
|
| 344 |
+
while True:
|
| 345 |
+
start = time.time()
|
| 346 |
+
|
| 347 |
+
if controller._reset_flag:
|
| 348 |
+
status = "discard"
|
| 349 |
+
break
|
| 350 |
+
|
| 351 |
+
if controller._quit_flag:
|
| 352 |
+
status = "quit"
|
| 353 |
+
break
|
| 354 |
+
|
| 355 |
+
if controller._save_flag:
|
| 356 |
+
status = "done"
|
| 357 |
+
break
|
| 358 |
+
|
| 359 |
+
action = controller.get_action()
|
| 360 |
+
recorder.step(env, sensor, action)
|
| 361 |
+
env.render()
|
| 362 |
+
|
| 363 |
+
# Multi-view every 3 frames (save GPU)
|
| 364 |
+
view_counter += 1
|
| 365 |
+
if view_counter % 3 == 0:
|
| 366 |
+
render_multi_view(env)
|
| 367 |
+
|
| 368 |
+
if controller._started:
|
| 369 |
+
active_steps += 1
|
| 370 |
+
|
| 371 |
+
if env._check_success():
|
| 372 |
+
if active_steps % 50 == 1:
|
| 373 |
+
print(f" Task success detected at step {active_steps}!")
|
| 374 |
+
|
| 375 |
+
if recorder.done:
|
| 376 |
+
status = "done"
|
| 377 |
+
break
|
| 378 |
+
|
| 379 |
+
if controller._started and active_steps >= max_steps:
|
| 380 |
+
print(f" Max steps ({max_steps}) reached.")
|
| 381 |
+
status = "done"
|
| 382 |
+
break
|
| 383 |
+
|
| 384 |
+
if controller._started and active_steps % 100 == 0:
|
| 385 |
+
eef = controller._get_eef_pos()
|
| 386 |
+
tgt = controller.target_pos
|
| 387 |
+
dist = np.linalg.norm(eef - tgt)
|
| 388 |
+
grip = "CLOSED" if controller.gripper_action > 0 else "open"
|
| 389 |
+
print(f" step {active_steps}/{max_steps} "
|
| 390 |
+
f"eef=[{eef[0]:.3f},{eef[1]:.3f},{eef[2]:.3f}] "
|
| 391 |
+
f"err={dist:.3f}m grip={grip}")
|
| 392 |
+
|
| 393 |
+
if max_fr is not None:
|
| 394 |
+
elapsed = time.time() - start
|
| 395 |
+
diff = 1.0 / max_fr - elapsed
|
| 396 |
+
if diff > 0:
|
| 397 |
+
time.sleep(diff)
|
| 398 |
+
|
| 399 |
+
cv2.destroyAllWindows()
|
| 400 |
+
recorder.finalize()
|
| 401 |
+
return recorder, status
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def main():
|
| 405 |
+
parser = argparse.ArgumentParser(description="Smooth keyboard teleoperation")
|
| 406 |
+
parser.add_argument("--task", type=str, required=True,
|
| 407 |
+
choices=list(TASK_CONFIGS.keys()))
|
| 408 |
+
parser.add_argument("--num_episodes", type=int, default=10)
|
| 409 |
+
parser.add_argument("--save_dir", type=str, default=None)
|
| 410 |
+
parser.add_argument("--save_all", action="store_true",
|
| 411 |
+
help="save all episodes, not just successful ones")
|
| 412 |
+
parser.add_argument("--max_fr", type=int, default=20, help="max frame rate")
|
| 413 |
+
parser.add_argument("--max_steps", type=int, default=None,
|
| 414 |
+
help="override max steps (default: from task config)")
|
| 415 |
+
parser.add_argument("--step_size", type=float, default=10.0,
|
| 416 |
+
help="movement step size in mm (default: 10)")
|
| 417 |
+
parser.add_argument("--render_camera", type=str, default="agentview",
|
| 418 |
+
help="initial camera view (agentview/sideview/frontview)")
|
| 419 |
+
args = parser.parse_args()
|
| 420 |
+
|
| 421 |
+
task = args.task
|
| 422 |
+
save_dir = args.save_dir or os.path.join("tactile_data", f"{task}_keyboard")
|
| 423 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 424 |
+
|
| 425 |
+
existing = len([f for f in os.listdir(save_dir) if f.endswith(".hdf5")])
|
| 426 |
+
print(f"Saving to: {save_dir} (existing: {existing} episodes)")
|
| 427 |
+
|
| 428 |
+
env = create_env(task, has_renderer=True, render_camera=args.render_camera)
|
| 429 |
+
env.reset()
|
| 430 |
+
sensor = USkinSensor(env.sim)
|
| 431 |
+
controller = SmoothKeyboardController(
|
| 432 |
+
env, pos_step=args.step_size / 1000.0,
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
max_steps = args.max_steps or TASK_CONFIGS[task]["horizon"]
|
| 436 |
+
saved = 0
|
| 437 |
+
attempted = 0
|
| 438 |
+
|
| 439 |
+
try:
|
| 440 |
+
while saved < args.num_episodes:
|
| 441 |
+
attempted += 1
|
| 442 |
+
print(f"\n{'='*50}")
|
| 443 |
+
print(f" Episode {attempted} (saved: {saved}/{args.num_episodes})")
|
| 444 |
+
print(f"{'='*50}")
|
| 445 |
+
|
| 446 |
+
recorder, status = collect_episode(
|
| 447 |
+
env, controller, sensor, max_steps, args.max_fr,
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
if status == "quit":
|
| 451 |
+
print(" Quit.")
|
| 452 |
+
break
|
| 453 |
+
|
| 454 |
+
if status == "discard":
|
| 455 |
+
print(" Discarded.")
|
| 456 |
+
continue
|
| 457 |
+
|
| 458 |
+
success = recorder.data.get("task_success", False)
|
| 459 |
+
|
| 460 |
+
if not success and not args.save_all:
|
| 461 |
+
print(f" Task failed. Discarding (use --save_all to keep).")
|
| 462 |
+
continue
|
| 463 |
+
|
| 464 |
+
ep_idx = existing + saved
|
| 465 |
+
filepath = os.path.join(save_dir, f"episode_{ep_idx:02d}.hdf5")
|
| 466 |
+
save_episode_hdf5(filepath, recorder.data, task)
|
| 467 |
+
saved += 1
|
| 468 |
+
tag = "SUCCESS" if success else "saved"
|
| 469 |
+
print(f" {tag} -> {filepath} ({recorder.step_count} steps)")
|
| 470 |
+
|
| 471 |
+
finally:
|
| 472 |
+
controller.stop()
|
| 473 |
+
print(f"\nDone! Saved {saved} episodes to {save_dir}")
|
| 474 |
+
env.close()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
if __name__ == "__main__":
|
| 478 |
+
main()
|
tactile_tasks/envs/cup_retrieval.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Task: Cup Retrieval (Upright)
|
| 3 |
+
|
| 4 |
+
Contact-rich task: Grasp an upside-down cup and flip it right-side up.
|
| 5 |
+
Requires careful reorientation while maintaining grip — tactile feedback prevents slip.
|
| 6 |
+
|
| 7 |
+
Objects: Cup (021_cup) — placed upside-down on its rim
|
| 8 |
+
Success: Cup standing upright on table, gripper released, cup stable
|
| 9 |
+
Tactile role: Detect slip during rotation, adjust grip force for reorientation
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from collections import OrderedDict
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import mujoco
|
| 16 |
+
|
| 17 |
+
from robosuite.environments.manipulation.manipulation_env import ManipulationEnv
|
| 18 |
+
from robosuite.models.arenas import TableArena
|
| 19 |
+
from robosuite.models.objects.objects import MujocoXMLObject
|
| 20 |
+
from robosuite.models.tasks import ManipulationTask
|
| 21 |
+
from robosuite.utils.observables import Observable, sensor
|
| 22 |
+
from robosuite.utils.mjcf_utils import xml_path_completion
|
| 23 |
+
from robosuite.utils.placement_samplers import UniformRandomSampler
|
| 24 |
+
from robosuite.utils.transform_utils import convert_quat
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class CupRetrieval(ManipulationEnv):
|
| 28 |
+
"""
|
| 29 |
+
Cup retrieval task: grasp an upside-down cup and flip it right-side up.
|
| 30 |
+
|
| 31 |
+
The cup starts upside-down on its rim. The robot must grasp it,
|
| 32 |
+
flip it 180°, and place it standing upright on the table.
|
| 33 |
+
This is contact-rich because:
|
| 34 |
+
- Grasping an inverted cup requires careful approach angle
|
| 35 |
+
- 180° flip requires maintaining grip while rotating
|
| 36 |
+
- Placement requires detecting table contact via tactile
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
robots,
|
| 42 |
+
env_configuration="default",
|
| 43 |
+
controller_configs=None,
|
| 44 |
+
gripper_types="Robotiq85Gripper",
|
| 45 |
+
base_types="default",
|
| 46 |
+
initialization_noise="default",
|
| 47 |
+
table_full_size=(0.8, 0.8, 0.05),
|
| 48 |
+
table_friction=(1.0, 5e-3, 1e-4),
|
| 49 |
+
use_camera_obs=True,
|
| 50 |
+
use_object_obs=True,
|
| 51 |
+
reward_scale=1.0,
|
| 52 |
+
reward_shaping=True,
|
| 53 |
+
placement_initializer=None,
|
| 54 |
+
has_renderer=False,
|
| 55 |
+
has_offscreen_renderer=True,
|
| 56 |
+
render_camera="frontview",
|
| 57 |
+
render_collision_mesh=False,
|
| 58 |
+
render_visual_mesh=True,
|
| 59 |
+
render_gpu_device_id=-1,
|
| 60 |
+
control_freq=20,
|
| 61 |
+
lite_physics=True,
|
| 62 |
+
horizon=600,
|
| 63 |
+
ignore_done=False,
|
| 64 |
+
hard_reset=True,
|
| 65 |
+
camera_names="agentview",
|
| 66 |
+
camera_heights=256,
|
| 67 |
+
camera_widths=256,
|
| 68 |
+
camera_depths=False,
|
| 69 |
+
camera_segmentations=None,
|
| 70 |
+
renderer="mjviewer",
|
| 71 |
+
renderer_config=None,
|
| 72 |
+
seed=None,
|
| 73 |
+
):
|
| 74 |
+
self.table_full_size = table_full_size
|
| 75 |
+
self.table_friction = table_friction
|
| 76 |
+
self.table_offset = np.array((0, 0, 0.8))
|
| 77 |
+
self.reward_scale = reward_scale
|
| 78 |
+
self.reward_shaping = reward_shaping
|
| 79 |
+
self.use_object_obs = use_object_obs
|
| 80 |
+
self.placement_initializer = placement_initializer
|
| 81 |
+
|
| 82 |
+
super().__init__(
|
| 83 |
+
robots=robots,
|
| 84 |
+
env_configuration=env_configuration,
|
| 85 |
+
controller_configs=controller_configs,
|
| 86 |
+
base_types=base_types,
|
| 87 |
+
gripper_types=gripper_types,
|
| 88 |
+
initialization_noise=initialization_noise,
|
| 89 |
+
use_camera_obs=use_camera_obs,
|
| 90 |
+
has_renderer=has_renderer,
|
| 91 |
+
has_offscreen_renderer=has_offscreen_renderer,
|
| 92 |
+
render_camera=render_camera,
|
| 93 |
+
render_collision_mesh=render_collision_mesh,
|
| 94 |
+
render_visual_mesh=render_visual_mesh,
|
| 95 |
+
render_gpu_device_id=render_gpu_device_id,
|
| 96 |
+
control_freq=control_freq,
|
| 97 |
+
lite_physics=lite_physics,
|
| 98 |
+
horizon=horizon,
|
| 99 |
+
ignore_done=ignore_done,
|
| 100 |
+
hard_reset=hard_reset,
|
| 101 |
+
camera_names=camera_names,
|
| 102 |
+
camera_heights=camera_heights,
|
| 103 |
+
camera_widths=camera_widths,
|
| 104 |
+
camera_depths=camera_depths,
|
| 105 |
+
camera_segmentations=camera_segmentations,
|
| 106 |
+
renderer=renderer,
|
| 107 |
+
renderer_config=renderer_config,
|
| 108 |
+
seed=seed,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
def reward(self, action=None):
|
| 112 |
+
reward = 0.0
|
| 113 |
+
if self._check_success():
|
| 114 |
+
reward = 3.0
|
| 115 |
+
elif self.reward_shaping:
|
| 116 |
+
# Reaching
|
| 117 |
+
dist = self._gripper_to_target(
|
| 118 |
+
gripper=self.robots[0].gripper,
|
| 119 |
+
target=self.cup.root_body,
|
| 120 |
+
target_type="body",
|
| 121 |
+
return_distance=True,
|
| 122 |
+
)
|
| 123 |
+
reward += 0.5 * (1 - np.tanh(10.0 * dist))
|
| 124 |
+
|
| 125 |
+
# Grasping
|
| 126 |
+
grasped = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.cup)
|
| 127 |
+
if grasped:
|
| 128 |
+
reward += 0.5
|
| 129 |
+
|
| 130 |
+
# Uprightness reward
|
| 131 |
+
uprightness = self._get_cup_uprightness()
|
| 132 |
+
reward += 1.0 * uprightness
|
| 133 |
+
|
| 134 |
+
# Lifting reward
|
| 135 |
+
cup_pos = self.sim.data.body_xpos[self.cup_body_id]
|
| 136 |
+
lift = max(0, cup_pos[2] - self.table_offset[2] - 0.02)
|
| 137 |
+
reward += min(0.5, lift * 5)
|
| 138 |
+
|
| 139 |
+
if self.reward_scale is not None:
|
| 140 |
+
reward *= self.reward_scale / 3.0
|
| 141 |
+
return reward
|
| 142 |
+
|
| 143 |
+
def _get_cup_uprightness(self):
|
| 144 |
+
"""Return how upright the cup is (0 = sideways, 1 = perfectly upright)."""
|
| 145 |
+
quat = self.sim.data.body_xquat[self.cup_body_id]
|
| 146 |
+
mat = np.zeros(9)
|
| 147 |
+
mujoco.mju_quat2Mat(mat, quat)
|
| 148 |
+
z_axis = mat.reshape(3, 3)[:, 2]
|
| 149 |
+
# Cup's z-axis should point up (z_axis[2] close to 1)
|
| 150 |
+
return max(0, z_axis[2])
|
| 151 |
+
|
| 152 |
+
def _load_model(self):
|
| 153 |
+
super()._load_model()
|
| 154 |
+
|
| 155 |
+
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
|
| 156 |
+
self.robots[0].robot_model.set_base_xpos(xpos)
|
| 157 |
+
|
| 158 |
+
mujoco_arena = TableArena(
|
| 159 |
+
table_full_size=self.table_full_size,
|
| 160 |
+
table_friction=self.table_friction,
|
| 161 |
+
table_offset=self.table_offset,
|
| 162 |
+
)
|
| 163 |
+
mujoco_arena.set_origin([0, 0, 0])
|
| 164 |
+
|
| 165 |
+
self.cup = MujocoXMLObject(
|
| 166 |
+
fname=xml_path_completion("objects/custom/021_cup/021_cup.xml"),
|
| 167 |
+
name="cup",
|
| 168 |
+
joints=[dict(type="free", damping="0.0005")],
|
| 169 |
+
obj_type="all",
|
| 170 |
+
duplicate_collision_geoms=False,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Placement — cup will be tipped over in _reset_internal
|
| 174 |
+
self.placement_initializer = UniformRandomSampler(
|
| 175 |
+
name="CupSampler",
|
| 176 |
+
mujoco_objects=self.cup,
|
| 177 |
+
x_range=[-0.03, 0.03],
|
| 178 |
+
y_range=[-0.03, 0.03],
|
| 179 |
+
rotation=None,
|
| 180 |
+
ensure_object_boundary_in_range=False,
|
| 181 |
+
ensure_valid_placement=True,
|
| 182 |
+
reference_pos=self.table_offset,
|
| 183 |
+
z_offset=0.05,
|
| 184 |
+
rng=self.rng,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
self.model = ManipulationTask(
|
| 188 |
+
mujoco_arena=mujoco_arena,
|
| 189 |
+
mujoco_robots=[robot.robot_model for robot in self.robots],
|
| 190 |
+
mujoco_objects=self.cup,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
def _setup_references(self):
|
| 194 |
+
super()._setup_references()
|
| 195 |
+
self.cup_body_id = self.sim.model.body_name2id(self.cup.root_body)
|
| 196 |
+
|
| 197 |
+
def _setup_observables(self):
|
| 198 |
+
observables = super()._setup_observables()
|
| 199 |
+
|
| 200 |
+
if self.use_object_obs:
|
| 201 |
+
modality = "object"
|
| 202 |
+
|
| 203 |
+
@sensor(modality=modality)
|
| 204 |
+
def cup_pos(obs_cache):
|
| 205 |
+
return np.array(self.sim.data.body_xpos[self.cup_body_id])
|
| 206 |
+
|
| 207 |
+
@sensor(modality=modality)
|
| 208 |
+
def cup_quat(obs_cache):
|
| 209 |
+
return convert_quat(np.array(self.sim.data.body_xquat[self.cup_body_id]), to="xyzw")
|
| 210 |
+
|
| 211 |
+
sensors = [cup_pos, cup_quat]
|
| 212 |
+
arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False)
|
| 213 |
+
full_prefixes = self._get_arm_prefixes(self.robots[0])
|
| 214 |
+
|
| 215 |
+
sensors += [
|
| 216 |
+
self._get_obj_eef_sensor(full_pf, "cup_pos", f"{arm_pf}gripper_to_cup_pos", modality)
|
| 217 |
+
for arm_pf, full_pf in zip(arm_prefixes, full_prefixes)
|
| 218 |
+
]
|
| 219 |
+
names = [s.__name__ for s in sensors]
|
| 220 |
+
|
| 221 |
+
for name, s in zip(names, sensors):
|
| 222 |
+
observables[name] = Observable(
|
| 223 |
+
name=name, sensor=s, sampling_rate=self.control_freq,
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
return observables
|
| 227 |
+
|
| 228 |
+
def _reset_internal(self):
|
| 229 |
+
super()._reset_internal()
|
| 230 |
+
if not self.deterministic_reset:
|
| 231 |
+
# Place cup upside-down (180° rotation around x-axis)
|
| 232 |
+
# This is a stable configuration — cup rests on its rim
|
| 233 |
+
# Task: robot must flip it right-side up
|
| 234 |
+
obj_x = self.table_offset[0] + self.rng.uniform(-0.03, 0.03)
|
| 235 |
+
obj_y = self.table_offset[1] + self.rng.uniform(-0.03, 0.03)
|
| 236 |
+
# 180° around x: quat = [cos(90°), sin(90°), 0, 0] = [0, 1, 0, 0]
|
| 237 |
+
# Add slight random tilt for variety
|
| 238 |
+
angle = np.pi + self.rng.uniform(-0.15, 0.15)
|
| 239 |
+
quat = np.array([np.cos(angle/2), np.sin(angle/2), 0, 0])
|
| 240 |
+
|
| 241 |
+
obj_z = self.table_offset[2] + 0.06 # slightly above to settle
|
| 242 |
+
if self.cup.joints:
|
| 243 |
+
self.sim.data.set_joint_qpos(
|
| 244 |
+
self.cup.joints[0],
|
| 245 |
+
np.concatenate([[obj_x, obj_y, obj_z], quat]),
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
def _check_success(self):
|
| 249 |
+
cup_pos = self.sim.data.body_xpos[self.cup_body_id]
|
| 250 |
+
|
| 251 |
+
# Cup must be upright
|
| 252 |
+
uprightness = self._get_cup_uprightness()
|
| 253 |
+
is_upright = uprightness > 0.9
|
| 254 |
+
|
| 255 |
+
# Cup must be on table
|
| 256 |
+
on_table = cup_pos[2] < self.table_offset[2] + 0.12
|
| 257 |
+
|
| 258 |
+
# Gripper released
|
| 259 |
+
not_grasped = not self._check_grasp(
|
| 260 |
+
gripper=self.robots[0].gripper, object_geoms=self.cup
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
# Cup stable
|
| 264 |
+
if self.cup.joints:
|
| 265 |
+
jid = self.sim.model.joint_name2id(self.cup.joints[0])
|
| 266 |
+
qvel_addr = self.sim.model.jnt_dofadr[jid]
|
| 267 |
+
vel = np.linalg.norm(self.sim.data.qvel[qvel_addr:qvel_addr + 3])
|
| 268 |
+
else:
|
| 269 |
+
vel = 0.0
|
| 270 |
+
stable = vel < 0.1
|
| 271 |
+
|
| 272 |
+
return is_upright and on_table and not_grasped and stable
|
| 273 |
+
|
| 274 |
+
def visualize(self, vis_settings):
|
| 275 |
+
super().visualize(vis_settings=vis_settings)
|
| 276 |
+
if vis_settings["grippers"]:
|
| 277 |
+
self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.cup)
|
tactile_tasks/envs/fragile_transport.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Task: Fragile Transport
|
| 3 |
+
|
| 4 |
+
Contact-rich task: Grasp a fragile object (apple/egg-sized) and transport it to a
|
| 5 |
+
marked target zone on the table without dropping or crushing it.
|
| 6 |
+
|
| 7 |
+
Objects: Apple (035_apple) or Rubik's cube (073_rubikscube) — randomly selected
|
| 8 |
+
Success: Object placed in target zone (within tolerance), gripper released, object stable
|
| 9 |
+
Tactile role: Monitor grip force during transport — too little = drop, too much = crush
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from collections import OrderedDict
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
from robosuite.environments.manipulation.manipulation_env import ManipulationEnv
|
| 17 |
+
from robosuite.models.arenas import TableArena
|
| 18 |
+
from robosuite.models.objects import BoxObject
|
| 19 |
+
from robosuite.models.objects.objects import MujocoXMLObject
|
| 20 |
+
from robosuite.models.tasks import ManipulationTask
|
| 21 |
+
from robosuite.utils.observables import Observable, sensor
|
| 22 |
+
from robosuite.utils.placement_samplers import UniformRandomSampler
|
| 23 |
+
from robosuite.utils.transform_utils import convert_quat
|
| 24 |
+
|
| 25 |
+
from robosuite.utils.mjcf_utils import xml_path_completion
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class FragileTransport(ManipulationEnv):
|
| 29 |
+
"""
|
| 30 |
+
Transport a fragile object from one side of the table to a target zone.
|
| 31 |
+
|
| 32 |
+
The target zone is visualized as a flat marker on the table.
|
| 33 |
+
The robot must pick up the object, carry it across the table, and
|
| 34 |
+
gently place it in the target zone.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
robots,
|
| 40 |
+
env_configuration="default",
|
| 41 |
+
controller_configs=None,
|
| 42 |
+
gripper_types="Robotiq85Gripper",
|
| 43 |
+
base_types="default",
|
| 44 |
+
initialization_noise="default",
|
| 45 |
+
table_full_size=(0.8, 0.8, 0.05),
|
| 46 |
+
table_friction=(1.0, 5e-3, 1e-4),
|
| 47 |
+
use_camera_obs=True,
|
| 48 |
+
use_object_obs=True,
|
| 49 |
+
reward_scale=1.0,
|
| 50 |
+
reward_shaping=True,
|
| 51 |
+
placement_initializer=None,
|
| 52 |
+
has_renderer=False,
|
| 53 |
+
has_offscreen_renderer=True,
|
| 54 |
+
render_camera="frontview",
|
| 55 |
+
render_collision_mesh=False,
|
| 56 |
+
render_visual_mesh=True,
|
| 57 |
+
render_gpu_device_id=-1,
|
| 58 |
+
control_freq=20,
|
| 59 |
+
lite_physics=True,
|
| 60 |
+
horizon=600,
|
| 61 |
+
ignore_done=False,
|
| 62 |
+
hard_reset=True,
|
| 63 |
+
camera_names="agentview",
|
| 64 |
+
camera_heights=256,
|
| 65 |
+
camera_widths=256,
|
| 66 |
+
camera_depths=False,
|
| 67 |
+
camera_segmentations=None,
|
| 68 |
+
renderer="mjviewer",
|
| 69 |
+
renderer_config=None,
|
| 70 |
+
seed=None,
|
| 71 |
+
):
|
| 72 |
+
self.table_full_size = table_full_size
|
| 73 |
+
self.table_friction = table_friction
|
| 74 |
+
self.table_offset = np.array((0, 0, 0.8))
|
| 75 |
+
self.reward_scale = reward_scale
|
| 76 |
+
self.reward_shaping = reward_shaping
|
| 77 |
+
self.use_object_obs = use_object_obs
|
| 78 |
+
self.placement_initializer = placement_initializer
|
| 79 |
+
|
| 80 |
+
# Target zone position (set in _reset_internal)
|
| 81 |
+
self.target_pos = None
|
| 82 |
+
|
| 83 |
+
super().__init__(
|
| 84 |
+
robots=robots,
|
| 85 |
+
env_configuration=env_configuration,
|
| 86 |
+
controller_configs=controller_configs,
|
| 87 |
+
base_types=base_types,
|
| 88 |
+
gripper_types=gripper_types,
|
| 89 |
+
initialization_noise=initialization_noise,
|
| 90 |
+
use_camera_obs=use_camera_obs,
|
| 91 |
+
has_renderer=has_renderer,
|
| 92 |
+
has_offscreen_renderer=has_offscreen_renderer,
|
| 93 |
+
render_camera=render_camera,
|
| 94 |
+
render_collision_mesh=render_collision_mesh,
|
| 95 |
+
render_visual_mesh=render_visual_mesh,
|
| 96 |
+
render_gpu_device_id=render_gpu_device_id,
|
| 97 |
+
control_freq=control_freq,
|
| 98 |
+
lite_physics=lite_physics,
|
| 99 |
+
horizon=horizon,
|
| 100 |
+
ignore_done=ignore_done,
|
| 101 |
+
hard_reset=hard_reset,
|
| 102 |
+
camera_names=camera_names,
|
| 103 |
+
camera_heights=camera_heights,
|
| 104 |
+
camera_widths=camera_widths,
|
| 105 |
+
camera_depths=camera_depths,
|
| 106 |
+
camera_segmentations=camera_segmentations,
|
| 107 |
+
renderer=renderer,
|
| 108 |
+
renderer_config=renderer_config,
|
| 109 |
+
seed=seed,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def reward(self, action=None):
|
| 113 |
+
reward = 0.0
|
| 114 |
+
if self._check_success():
|
| 115 |
+
reward = 3.0
|
| 116 |
+
elif self.reward_shaping:
|
| 117 |
+
obj_pos = self.sim.data.body_xpos[self.obj_body_id]
|
| 118 |
+
# Reaching
|
| 119 |
+
dist = self._gripper_to_target(
|
| 120 |
+
gripper=self.robots[0].gripper,
|
| 121 |
+
target=self.obj.root_body,
|
| 122 |
+
target_type="body",
|
| 123 |
+
return_distance=True,
|
| 124 |
+
)
|
| 125 |
+
reward += 0.5 * (1 - np.tanh(10.0 * dist))
|
| 126 |
+
|
| 127 |
+
# Grasping
|
| 128 |
+
if self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.obj):
|
| 129 |
+
reward += 0.5
|
| 130 |
+
# Transport progress: distance from obj to target
|
| 131 |
+
transport_dist = np.linalg.norm(obj_pos[:2] - self.target_pos[:2])
|
| 132 |
+
reward += 1.0 * (1 - np.tanh(5.0 * transport_dist))
|
| 133 |
+
|
| 134 |
+
# Height bonus (keep object lifted during transport)
|
| 135 |
+
if obj_pos[2] > self.table_offset[2] + 0.05:
|
| 136 |
+
reward += 0.5
|
| 137 |
+
|
| 138 |
+
if self.reward_scale is not None:
|
| 139 |
+
reward *= self.reward_scale / 3.0
|
| 140 |
+
return reward
|
| 141 |
+
|
| 142 |
+
def _load_model(self):
|
| 143 |
+
super()._load_model()
|
| 144 |
+
|
| 145 |
+
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
|
| 146 |
+
self.robots[0].robot_model.set_base_xpos(xpos)
|
| 147 |
+
|
| 148 |
+
mujoco_arena = TableArena(
|
| 149 |
+
table_full_size=self.table_full_size,
|
| 150 |
+
table_friction=self.table_friction,
|
| 151 |
+
table_offset=self.table_offset,
|
| 152 |
+
)
|
| 153 |
+
mujoco_arena.set_origin([0, 0, 0])
|
| 154 |
+
|
| 155 |
+
# Use apple from converted objects
|
| 156 |
+
self.obj = MujocoXMLObject(
|
| 157 |
+
fname=xml_path_completion("objects/custom/035_apple/035_apple.xml"),
|
| 158 |
+
name="fragile_obj",
|
| 159 |
+
joints=[dict(type="free", damping="0.0005")],
|
| 160 |
+
obj_type="all",
|
| 161 |
+
duplicate_collision_geoms=False,
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# Target zone marker (flat box on table — needs mass for free joint)
|
| 165 |
+
self.target_marker = BoxObject(
|
| 166 |
+
name="target_zone",
|
| 167 |
+
size=[0.04, 0.04, 0.002],
|
| 168 |
+
rgba=[0.2, 0.8, 0.2, 0.5],
|
| 169 |
+
density=500,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
self.placement_initializer = UniformRandomSampler(
|
| 173 |
+
name="ObjectSampler",
|
| 174 |
+
mujoco_objects=self.obj,
|
| 175 |
+
x_range=[-0.05, -0.02],
|
| 176 |
+
y_range=[-0.02, 0.02],
|
| 177 |
+
rotation=None,
|
| 178 |
+
ensure_object_boundary_in_range=False,
|
| 179 |
+
ensure_valid_placement=True,
|
| 180 |
+
reference_pos=self.table_offset,
|
| 181 |
+
z_offset=0.01,
|
| 182 |
+
rng=self.rng,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
self.model = ManipulationTask(
|
| 186 |
+
mujoco_arena=mujoco_arena,
|
| 187 |
+
mujoco_robots=[robot.robot_model for robot in self.robots],
|
| 188 |
+
mujoco_objects=[self.obj, self.target_marker],
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
def _setup_references(self):
|
| 192 |
+
super()._setup_references()
|
| 193 |
+
self.obj_body_id = self.sim.model.body_name2id(self.obj.root_body)
|
| 194 |
+
self.target_body_id = self.sim.model.body_name2id(self.target_marker.root_body)
|
| 195 |
+
|
| 196 |
+
def _setup_observables(self):
|
| 197 |
+
observables = super()._setup_observables()
|
| 198 |
+
|
| 199 |
+
if self.use_object_obs:
|
| 200 |
+
modality = "object"
|
| 201 |
+
|
| 202 |
+
@sensor(modality=modality)
|
| 203 |
+
def obj_pos(obs_cache):
|
| 204 |
+
return np.array(self.sim.data.body_xpos[self.obj_body_id])
|
| 205 |
+
|
| 206 |
+
@sensor(modality=modality)
|
| 207 |
+
def obj_quat(obs_cache):
|
| 208 |
+
return convert_quat(np.array(self.sim.data.body_xquat[self.obj_body_id]), to="xyzw")
|
| 209 |
+
|
| 210 |
+
@sensor(modality=modality)
|
| 211 |
+
def target_pos(obs_cache):
|
| 212 |
+
return self.target_pos.copy() if self.target_pos is not None else np.zeros(3)
|
| 213 |
+
|
| 214 |
+
sensors = [obj_pos, obj_quat, target_pos]
|
| 215 |
+
arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False)
|
| 216 |
+
full_prefixes = self._get_arm_prefixes(self.robots[0])
|
| 217 |
+
|
| 218 |
+
sensors += [
|
| 219 |
+
self._get_obj_eef_sensor(full_pf, "obj_pos", f"{arm_pf}gripper_to_obj_pos", modality)
|
| 220 |
+
for arm_pf, full_pf in zip(arm_prefixes, full_prefixes)
|
| 221 |
+
]
|
| 222 |
+
names = [s.__name__ for s in sensors]
|
| 223 |
+
|
| 224 |
+
for name, s in zip(names, sensors):
|
| 225 |
+
observables[name] = Observable(
|
| 226 |
+
name=name, sensor=s, sampling_rate=self.control_freq,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
return observables
|
| 230 |
+
|
| 231 |
+
def _reset_internal(self):
|
| 232 |
+
super()._reset_internal()
|
| 233 |
+
|
| 234 |
+
# Place object on left side
|
| 235 |
+
if not self.deterministic_reset:
|
| 236 |
+
object_placements = self.placement_initializer.sample()
|
| 237 |
+
for obj_pos, obj_quat, obj in object_placements.values():
|
| 238 |
+
self.sim.data.set_joint_qpos(
|
| 239 |
+
obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
# Place target zone on right side
|
| 243 |
+
target_x = self.rng.uniform(0.03, 0.06)
|
| 244 |
+
target_y = self.rng.uniform(-0.02, 0.02)
|
| 245 |
+
self.target_pos = np.array([
|
| 246 |
+
target_x + self.table_offset[0],
|
| 247 |
+
target_y + self.table_offset[1],
|
| 248 |
+
self.table_offset[2] + 0.002,
|
| 249 |
+
])
|
| 250 |
+
|
| 251 |
+
# Move target marker
|
| 252 |
+
if self.target_marker.joints:
|
| 253 |
+
self.sim.data.set_joint_qpos(
|
| 254 |
+
self.target_marker.joints[0],
|
| 255 |
+
np.concatenate([self.target_pos, [1, 0, 0, 0]]),
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
def _check_success(self):
|
| 259 |
+
obj_pos = self.sim.data.body_xpos[self.obj_body_id]
|
| 260 |
+
|
| 261 |
+
# Object must be in target zone
|
| 262 |
+
xy_dist = np.linalg.norm(obj_pos[:2] - self.target_pos[:2])
|
| 263 |
+
on_table = obj_pos[2] < self.table_offset[2] + 0.08
|
| 264 |
+
in_zone = xy_dist < 0.04 and on_table
|
| 265 |
+
|
| 266 |
+
# Gripper must be released
|
| 267 |
+
not_grasped = not self._check_grasp(
|
| 268 |
+
gripper=self.robots[0].gripper, object_geoms=self.obj
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Object must be stable
|
| 272 |
+
if self.obj.joints:
|
| 273 |
+
jid = self.sim.model.joint_name2id(self.obj.joints[0])
|
| 274 |
+
qvel_addr = self.sim.model.jnt_dofadr[jid]
|
| 275 |
+
vel = np.linalg.norm(self.sim.data.qvel[qvel_addr:qvel_addr + 3])
|
| 276 |
+
else:
|
| 277 |
+
vel = 0.0
|
| 278 |
+
stable = vel < 0.1
|
| 279 |
+
|
| 280 |
+
return in_zone and not_grasped and stable
|
| 281 |
+
|
| 282 |
+
def visualize(self, vis_settings):
|
| 283 |
+
super().visualize(vis_settings=vis_settings)
|
| 284 |
+
if vis_settings["grippers"]:
|
| 285 |
+
self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.obj)
|
tactile_tasks/envs/peg_insertion.py
CHANGED
|
@@ -291,13 +291,7 @@ class PegInsertion(ManipulationEnv):
|
|
| 291 |
self._success_steps = 0
|
| 292 |
return False
|
| 293 |
|
| 294 |
-
|
| 295 |
-
if self.peg_joint_id is not None:
|
| 296 |
-
qvel_addr = self.sim.model.jnt_dofadr[self.peg_joint_id]
|
| 297 |
-
peg_vel = self.sim.data.qvel[qvel_addr: qvel_addr + 3] # translational only
|
| 298 |
-
if np.linalg.norm(peg_vel) > 0.1:
|
| 299 |
-
self._success_steps = 0
|
| 300 |
-
return False
|
| 301 |
|
| 302 |
self._success_steps += 1
|
| 303 |
return self._success_steps >= self._required_success_steps
|
|
|
|
| 291 |
self._success_steps = 0
|
| 292 |
return False
|
| 293 |
|
| 294 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
|
| 296 |
self._success_steps += 1
|
| 297 |
return self._success_steps >= self._required_success_steps
|
tactile_tasks/envs/pen_insertion.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Task: Pen Insertion
|
| 3 |
+
|
| 4 |
+
Contact-rich task: Grasp a pen/marker and insert it into a cup (pencup).
|
| 5 |
+
Requires precise alignment and force-controlled insertion.
|
| 6 |
+
|
| 7 |
+
Objects: Marker pen (058_markpen) + Pen cup (059_pencup)
|
| 8 |
+
Success: Pen inserted into cup (pen tip below cup rim, pen roughly vertical)
|
| 9 |
+
Tactile role: Detect contact with cup rim for alignment, control insertion force
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from collections import OrderedDict
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import mujoco
|
| 16 |
+
|
| 17 |
+
from robosuite.environments.manipulation.manipulation_env import ManipulationEnv
|
| 18 |
+
from robosuite.models.arenas import TableArena
|
| 19 |
+
from robosuite.models.objects.objects import MujocoXMLObject
|
| 20 |
+
from robosuite.models.tasks import ManipulationTask
|
| 21 |
+
from robosuite.utils.observables import Observable, sensor
|
| 22 |
+
from robosuite.utils.mjcf_utils import xml_path_completion
|
| 23 |
+
from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler
|
| 24 |
+
from robosuite.utils.transform_utils import convert_quat
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PenInsertion(ManipulationEnv):
|
| 28 |
+
"""
|
| 29 |
+
Pen insertion task: grasp a pen and insert it into a cup.
|
| 30 |
+
|
| 31 |
+
The pen starts lying on the table. The robot must:
|
| 32 |
+
1. Grasp the pen
|
| 33 |
+
2. Lift and reorient to vertical
|
| 34 |
+
3. Align with cup opening
|
| 35 |
+
4. Insert with controlled force
|
| 36 |
+
|
| 37 |
+
Contact-rich because:
|
| 38 |
+
- Grasping a thin cylindrical object requires precise contact
|
| 39 |
+
- Alignment with cup opening needs tactile feedback at rim
|
| 40 |
+
- Insertion force must be controlled to avoid tipping the cup
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(
|
| 44 |
+
self,
|
| 45 |
+
robots,
|
| 46 |
+
env_configuration="default",
|
| 47 |
+
controller_configs=None,
|
| 48 |
+
gripper_types="Robotiq85Gripper",
|
| 49 |
+
base_types="default",
|
| 50 |
+
initialization_noise="default",
|
| 51 |
+
table_full_size=(0.8, 0.8, 0.05),
|
| 52 |
+
table_friction=(1.0, 5e-3, 1e-4),
|
| 53 |
+
use_camera_obs=True,
|
| 54 |
+
use_object_obs=True,
|
| 55 |
+
reward_scale=1.0,
|
| 56 |
+
reward_shaping=True,
|
| 57 |
+
placement_initializer=None,
|
| 58 |
+
has_renderer=False,
|
| 59 |
+
has_offscreen_renderer=True,
|
| 60 |
+
render_camera="frontview",
|
| 61 |
+
render_collision_mesh=False,
|
| 62 |
+
render_visual_mesh=True,
|
| 63 |
+
render_gpu_device_id=-1,
|
| 64 |
+
control_freq=20,
|
| 65 |
+
lite_physics=True,
|
| 66 |
+
horizon=800,
|
| 67 |
+
ignore_done=False,
|
| 68 |
+
hard_reset=True,
|
| 69 |
+
camera_names="agentview",
|
| 70 |
+
camera_heights=256,
|
| 71 |
+
camera_widths=256,
|
| 72 |
+
camera_depths=False,
|
| 73 |
+
camera_segmentations=None,
|
| 74 |
+
renderer="mjviewer",
|
| 75 |
+
renderer_config=None,
|
| 76 |
+
seed=None,
|
| 77 |
+
):
|
| 78 |
+
self.table_full_size = table_full_size
|
| 79 |
+
self.table_friction = table_friction
|
| 80 |
+
self.table_offset = np.array((0, 0, 0.8))
|
| 81 |
+
self.reward_scale = reward_scale
|
| 82 |
+
self.reward_shaping = reward_shaping
|
| 83 |
+
self.use_object_obs = use_object_obs
|
| 84 |
+
self.placement_initializer = placement_initializer
|
| 85 |
+
|
| 86 |
+
super().__init__(
|
| 87 |
+
robots=robots,
|
| 88 |
+
env_configuration=env_configuration,
|
| 89 |
+
controller_configs=controller_configs,
|
| 90 |
+
base_types=base_types,
|
| 91 |
+
gripper_types=gripper_types,
|
| 92 |
+
initialization_noise=initialization_noise,
|
| 93 |
+
use_camera_obs=use_camera_obs,
|
| 94 |
+
has_renderer=has_renderer,
|
| 95 |
+
has_offscreen_renderer=has_offscreen_renderer,
|
| 96 |
+
render_camera=render_camera,
|
| 97 |
+
render_collision_mesh=render_collision_mesh,
|
| 98 |
+
render_visual_mesh=render_visual_mesh,
|
| 99 |
+
render_gpu_device_id=render_gpu_device_id,
|
| 100 |
+
control_freq=control_freq,
|
| 101 |
+
lite_physics=lite_physics,
|
| 102 |
+
horizon=horizon,
|
| 103 |
+
ignore_done=ignore_done,
|
| 104 |
+
hard_reset=hard_reset,
|
| 105 |
+
camera_names=camera_names,
|
| 106 |
+
camera_heights=camera_heights,
|
| 107 |
+
camera_widths=camera_widths,
|
| 108 |
+
camera_depths=camera_depths,
|
| 109 |
+
camera_segmentations=camera_segmentations,
|
| 110 |
+
renderer=renderer,
|
| 111 |
+
renderer_config=renderer_config,
|
| 112 |
+
seed=seed,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def _get_pen_verticality(self):
|
| 116 |
+
"""Return how vertical the pen is (0 = horizontal, 1 = vertical)."""
|
| 117 |
+
quat = self.sim.data.body_xquat[self.pen_body_id]
|
| 118 |
+
mat = np.zeros(9)
|
| 119 |
+
mujoco.mju_quat2Mat(mat, quat)
|
| 120 |
+
z_axis = mat.reshape(3, 3)[:, 2]
|
| 121 |
+
return abs(z_axis[2])
|
| 122 |
+
|
| 123 |
+
def reward(self, action=None):
|
| 124 |
+
reward = 0.0
|
| 125 |
+
if self._check_success():
|
| 126 |
+
reward = 4.0
|
| 127 |
+
elif self.reward_shaping:
|
| 128 |
+
pen_pos = self.sim.data.body_xpos[self.pen_body_id]
|
| 129 |
+
cup_pos = self.sim.data.body_xpos[self.cup_body_id]
|
| 130 |
+
|
| 131 |
+
# Reaching pen
|
| 132 |
+
dist = self._gripper_to_target(
|
| 133 |
+
gripper=self.robots[0].gripper,
|
| 134 |
+
target=self.pen.root_body,
|
| 135 |
+
target_type="body",
|
| 136 |
+
return_distance=True,
|
| 137 |
+
)
|
| 138 |
+
reward += 0.5 * (1 - np.tanh(10.0 * dist))
|
| 139 |
+
|
| 140 |
+
# Grasping pen
|
| 141 |
+
grasped = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.pen)
|
| 142 |
+
if grasped:
|
| 143 |
+
reward += 0.5
|
| 144 |
+
|
| 145 |
+
# Verticality bonus
|
| 146 |
+
vert = self._get_pen_verticality()
|
| 147 |
+
reward += 0.5 * vert
|
| 148 |
+
|
| 149 |
+
# Alignment: pen xy near cup xy
|
| 150 |
+
xy_dist = np.linalg.norm(pen_pos[:2] - cup_pos[:2])
|
| 151 |
+
reward += 1.0 * (1 - np.tanh(10.0 * xy_dist))
|
| 152 |
+
|
| 153 |
+
# Insertion: pen z going down toward cup
|
| 154 |
+
if xy_dist < 0.03 and vert > 0.7:
|
| 155 |
+
# Cup top ~ cup_pos[2] + cup_half_height
|
| 156 |
+
insertion_depth = cup_pos[2] + 0.035 - pen_pos[2]
|
| 157 |
+
if insertion_depth > 0:
|
| 158 |
+
reward += min(1.5, insertion_depth * 20)
|
| 159 |
+
|
| 160 |
+
if self.reward_scale is not None:
|
| 161 |
+
reward *= self.reward_scale / 4.0
|
| 162 |
+
return reward
|
| 163 |
+
|
| 164 |
+
def _load_model(self):
|
| 165 |
+
super()._load_model()
|
| 166 |
+
|
| 167 |
+
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
|
| 168 |
+
self.robots[0].robot_model.set_base_xpos(xpos)
|
| 169 |
+
|
| 170 |
+
mujoco_arena = TableArena(
|
| 171 |
+
table_full_size=self.table_full_size,
|
| 172 |
+
table_friction=self.table_friction,
|
| 173 |
+
table_offset=self.table_offset,
|
| 174 |
+
)
|
| 175 |
+
mujoco_arena.set_origin([0, 0, 0])
|
| 176 |
+
|
| 177 |
+
# Pen (marker)
|
| 178 |
+
self.pen = MujocoXMLObject(
|
| 179 |
+
fname=xml_path_completion("objects/custom/058_markpen/058_markpen.xml"),
|
| 180 |
+
name="pen",
|
| 181 |
+
joints=[dict(type="free", damping="0.0005")],
|
| 182 |
+
obj_type="all",
|
| 183 |
+
duplicate_collision_geoms=False,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Cup (pencup)
|
| 187 |
+
self.cup = MujocoXMLObject(
|
| 188 |
+
fname=xml_path_completion("objects/custom/059_pencup/059_pencup.xml"),
|
| 189 |
+
name="pencup",
|
| 190 |
+
joints=[dict(type="free", damping="0.0005")],
|
| 191 |
+
obj_type="all",
|
| 192 |
+
duplicate_collision_geoms=False,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler")
|
| 196 |
+
self.placement_initializer.append_sampler(
|
| 197 |
+
UniformRandomSampler(
|
| 198 |
+
name="PenSampler",
|
| 199 |
+
mujoco_objects=self.pen,
|
| 200 |
+
x_range=[-0.08, -0.03],
|
| 201 |
+
y_range=[-0.02, 0.02],
|
| 202 |
+
rotation=None,
|
| 203 |
+
ensure_object_boundary_in_range=False,
|
| 204 |
+
ensure_valid_placement=False,
|
| 205 |
+
reference_pos=self.table_offset,
|
| 206 |
+
z_offset=0.01,
|
| 207 |
+
rng=self.rng,
|
| 208 |
+
)
|
| 209 |
+
)
|
| 210 |
+
self.placement_initializer.append_sampler(
|
| 211 |
+
UniformRandomSampler(
|
| 212 |
+
name="CupSampler",
|
| 213 |
+
mujoco_objects=self.cup,
|
| 214 |
+
x_range=[0.03, 0.06],
|
| 215 |
+
y_range=[-0.02, 0.02],
|
| 216 |
+
rotation=None,
|
| 217 |
+
ensure_object_boundary_in_range=False,
|
| 218 |
+
ensure_valid_placement=False,
|
| 219 |
+
reference_pos=self.table_offset,
|
| 220 |
+
z_offset=0.01,
|
| 221 |
+
rng=self.rng,
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
self.model = ManipulationTask(
|
| 226 |
+
mujoco_arena=mujoco_arena,
|
| 227 |
+
mujoco_robots=[robot.robot_model for robot in self.robots],
|
| 228 |
+
mujoco_objects=[self.pen, self.cup],
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
def _setup_references(self):
|
| 232 |
+
super()._setup_references()
|
| 233 |
+
self.pen_body_id = self.sim.model.body_name2id(self.pen.root_body)
|
| 234 |
+
self.cup_body_id = self.sim.model.body_name2id(self.cup.root_body)
|
| 235 |
+
|
| 236 |
+
def _setup_observables(self):
|
| 237 |
+
observables = super()._setup_observables()
|
| 238 |
+
|
| 239 |
+
if self.use_object_obs:
|
| 240 |
+
modality = "object"
|
| 241 |
+
|
| 242 |
+
@sensor(modality=modality)
|
| 243 |
+
def pen_pos(obs_cache):
|
| 244 |
+
return np.array(self.sim.data.body_xpos[self.pen_body_id])
|
| 245 |
+
|
| 246 |
+
@sensor(modality=modality)
|
| 247 |
+
def pen_quat(obs_cache):
|
| 248 |
+
return convert_quat(np.array(self.sim.data.body_xquat[self.pen_body_id]), to="xyzw")
|
| 249 |
+
|
| 250 |
+
@sensor(modality=modality)
|
| 251 |
+
def cup_pos(obs_cache):
|
| 252 |
+
return np.array(self.sim.data.body_xpos[self.cup_body_id])
|
| 253 |
+
|
| 254 |
+
@sensor(modality=modality)
|
| 255 |
+
def cup_quat(obs_cache):
|
| 256 |
+
return convert_quat(np.array(self.sim.data.body_xquat[self.cup_body_id]), to="xyzw")
|
| 257 |
+
|
| 258 |
+
sensors = [pen_pos, pen_quat, cup_pos, cup_quat]
|
| 259 |
+
arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False)
|
| 260 |
+
full_prefixes = self._get_arm_prefixes(self.robots[0])
|
| 261 |
+
|
| 262 |
+
sensors += [
|
| 263 |
+
self._get_obj_eef_sensor(full_pf, "pen_pos", f"{arm_pf}gripper_to_pen_pos", modality)
|
| 264 |
+
for arm_pf, full_pf in zip(arm_prefixes, full_prefixes)
|
| 265 |
+
]
|
| 266 |
+
names = [s.__name__ for s in sensors]
|
| 267 |
+
|
| 268 |
+
for name, s in zip(names, sensors):
|
| 269 |
+
observables[name] = Observable(
|
| 270 |
+
name=name, sensor=s, sampling_rate=self.control_freq,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
return observables
|
| 274 |
+
|
| 275 |
+
def _reset_internal(self):
|
| 276 |
+
super()._reset_internal()
|
| 277 |
+
if not self.deterministic_reset:
|
| 278 |
+
object_placements = self.placement_initializer.sample()
|
| 279 |
+
for obj_pos, obj_quat, obj in object_placements.values():
|
| 280 |
+
self.sim.data.set_joint_qpos(
|
| 281 |
+
obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def _check_success(self):
|
| 285 |
+
pen_pos = self.sim.data.body_xpos[self.pen_body_id]
|
| 286 |
+
cup_pos = self.sim.data.body_xpos[self.cup_body_id]
|
| 287 |
+
|
| 288 |
+
# Pen must be roughly vertical
|
| 289 |
+
vertical = self._get_pen_verticality() > 0.7
|
| 290 |
+
|
| 291 |
+
# Pen xy must be near cup xy (inside the cup opening)
|
| 292 |
+
xy_dist = np.linalg.norm(pen_pos[:2] - cup_pos[:2])
|
| 293 |
+
aligned = xy_dist < 0.03
|
| 294 |
+
|
| 295 |
+
# Pen center must be below cup top (inserted)
|
| 296 |
+
# Cup half-height ~ 0.035m, so cup top is cup_pos[2] + 0.035
|
| 297 |
+
inserted = pen_pos[2] < cup_pos[2] + 0.04
|
| 298 |
+
|
| 299 |
+
# Gripper released
|
| 300 |
+
not_grasped = not self._check_grasp(
|
| 301 |
+
gripper=self.robots[0].gripper, object_geoms=self.pen
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
return vertical and aligned and inserted and not_grasped
|
| 305 |
+
|
| 306 |
+
def visualize(self, vis_settings):
|
| 307 |
+
super().visualize(vis_settings=vis_settings)
|
| 308 |
+
if vis_settings["grippers"]:
|
| 309 |
+
self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.pen)
|
tactile_tasks/eval_act.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate trained ACT policy in robosuite environment.
|
| 4 |
+
|
| 5 |
+
ACT outputs action chunks (chunk_size steps at once).
|
| 6 |
+
Two execution modes:
|
| 7 |
+
- chunk: execute full chunk, then re-query (default)
|
| 8 |
+
- temporal_agg: query every step, exponentially-weighted average of overlapping chunks
|
| 9 |
+
|
| 10 |
+
Usage:
|
| 11 |
+
cd policy/ACT
|
| 12 |
+
python ../../tactile_tasks/eval_act.py --task precision_grasp
|
| 13 |
+
python ../../tactile_tasks/eval_act.py --task precision_grasp --temporal_agg
|
| 14 |
+
python ../../tactile_tasks/eval_act.py --task precision_grasp --ckpt policy_last.ckpt
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import argparse
|
| 20 |
+
import pickle
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from einops import rearrange
|
| 25 |
+
|
| 26 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "policy", "ACT"))
|
| 27 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 28 |
+
|
| 29 |
+
from act_policy import ACTPolicy
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def load_policy(ckpt_dir, ckpt_name, policy_config):
|
| 33 |
+
"""Load trained ACT policy and dataset stats."""
|
| 34 |
+
policy = ACTPolicy(policy_config)
|
| 35 |
+
ckpt_path = os.path.join(ckpt_dir, ckpt_name)
|
| 36 |
+
loading_status = policy.load_state_dict(torch.load(ckpt_path, map_location="cpu"))
|
| 37 |
+
print(f"Loaded: {ckpt_path} ({loading_status})")
|
| 38 |
+
policy.cuda()
|
| 39 |
+
policy.eval()
|
| 40 |
+
|
| 41 |
+
stats_path = os.path.join(ckpt_dir, "dataset_stats.pkl")
|
| 42 |
+
with open(stats_path, "rb") as f:
|
| 43 |
+
stats = pickle.load(f)
|
| 44 |
+
|
| 45 |
+
return policy, stats
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_obs(env, camera_names, stats):
|
| 49 |
+
"""Extract qpos and images from robosuite env, apply normalization."""
|
| 50 |
+
robot = env.robots[0]
|
| 51 |
+
|
| 52 |
+
# qpos: joint_pos(7) + normalized_gripper(1) = 8D
|
| 53 |
+
joint_pos = np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes])
|
| 54 |
+
gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", [])
|
| 55 |
+
gripper_val = env.sim.data.qpos[gripper_idx][0] / 0.8 if len(gripper_idx) else 0.0
|
| 56 |
+
qpos = np.concatenate([joint_pos, [gripper_val]]).astype(np.float32)
|
| 57 |
+
|
| 58 |
+
# Normalize qpos
|
| 59 |
+
qpos_norm = (qpos - stats["qpos_mean"]) / stats["qpos_std"]
|
| 60 |
+
qpos_tensor = torch.from_numpy(qpos_norm).float().cuda().unsqueeze(0)
|
| 61 |
+
|
| 62 |
+
# Images: stack cameras → (1, num_cams, 3, H, W)
|
| 63 |
+
obs = env._get_observations()
|
| 64 |
+
cam_map = {"agentview": "agentview_image", "eye_in_hand": "robot0_eye_in_hand_image"}
|
| 65 |
+
images = []
|
| 66 |
+
for cam in camera_names:
|
| 67 |
+
key = cam_map.get(cam, cam + "_image")
|
| 68 |
+
img = obs[key] # (H, W, 3) uint8
|
| 69 |
+
img = rearrange(img, "h w c -> c h w")
|
| 70 |
+
images.append(img)
|
| 71 |
+
images = np.stack(images, axis=0) # (num_cams, 3, H, W)
|
| 72 |
+
images = torch.from_numpy(images / 255.0).float().cuda().unsqueeze(0)
|
| 73 |
+
|
| 74 |
+
return qpos_tensor, images, qpos
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def run_eval(task, ckpt_dir, ckpt_name, num_rollouts, temporal_agg, camera_names,
|
| 78 |
+
render=False):
|
| 79 |
+
"""Run evaluation rollouts."""
|
| 80 |
+
import yaml
|
| 81 |
+
|
| 82 |
+
# Load ACT config
|
| 83 |
+
config_path = os.path.join(os.path.dirname(__file__), "..", "policy", "ACT", "train_config.yaml")
|
| 84 |
+
with open(config_path, "r") as f:
|
| 85 |
+
cfg = yaml.safe_load(f)
|
| 86 |
+
|
| 87 |
+
state_dim = cfg["state_dim"]
|
| 88 |
+
chunk_size = cfg["chunk_size"]
|
| 89 |
+
|
| 90 |
+
# Build policy config (same structure as training)
|
| 91 |
+
policy_config = {
|
| 92 |
+
"lr": cfg["lr"],
|
| 93 |
+
"num_queries": chunk_size,
|
| 94 |
+
"kl_weight": cfg["kl_weight"],
|
| 95 |
+
"hidden_dim": cfg["hidden_dim"],
|
| 96 |
+
"dim_feedforward": cfg["dim_feedforward"],
|
| 97 |
+
"lr_backbone": 1e-5,
|
| 98 |
+
"backbone": "resnet18",
|
| 99 |
+
"enc_layers": 4,
|
| 100 |
+
"dec_layers": 7,
|
| 101 |
+
"nheads": 8,
|
| 102 |
+
"camera_names": camera_names,
|
| 103 |
+
"state_dim": state_dim,
|
| 104 |
+
"chunk_size": chunk_size,
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
policy, stats = load_policy(ckpt_dir, ckpt_name, policy_config)
|
| 108 |
+
post_process = lambda a: a * stats["action_std"] + stats["action_mean"]
|
| 109 |
+
|
| 110 |
+
# Create environment with extended horizon (settling steps + eval steps)
|
| 111 |
+
from tactile_tasks.collect_data import create_env, TASK_CONFIGS
|
| 112 |
+
settle_steps = 50
|
| 113 |
+
max_timesteps = max(TASK_CONFIGS[task]["horizon"], 900)
|
| 114 |
+
env = create_env(task, has_renderer=render)
|
| 115 |
+
env.horizon = max_timesteps + settle_steps # override so env doesn't cut us short
|
| 116 |
+
|
| 117 |
+
query_frequency = 1 if temporal_agg else chunk_size
|
| 118 |
+
|
| 119 |
+
successes = []
|
| 120 |
+
for ep in range(num_rollouts):
|
| 121 |
+
env.reset()
|
| 122 |
+
# Let objects settle
|
| 123 |
+
for _ in range(50):
|
| 124 |
+
env.step(np.zeros(7))
|
| 125 |
+
|
| 126 |
+
if temporal_agg:
|
| 127 |
+
all_time_actions = torch.zeros(
|
| 128 |
+
[max_timesteps, max_timesteps + chunk_size, state_dim]
|
| 129 |
+
).cuda()
|
| 130 |
+
|
| 131 |
+
all_actions = None
|
| 132 |
+
episode_success = False
|
| 133 |
+
|
| 134 |
+
with torch.inference_mode():
|
| 135 |
+
for t in range(max_timesteps):
|
| 136 |
+
qpos, images, _ = get_obs(env, camera_names, stats)
|
| 137 |
+
|
| 138 |
+
# Query policy
|
| 139 |
+
if t % query_frequency == 0:
|
| 140 |
+
all_actions = policy(qpos, images) # (1, chunk_size, state_dim)
|
| 141 |
+
|
| 142 |
+
if temporal_agg:
|
| 143 |
+
all_time_actions[[t], t:t + chunk_size] = all_actions
|
| 144 |
+
actions_for_curr_step = all_time_actions[:, t]
|
| 145 |
+
actions_populated = torch.all(actions_for_curr_step != 0, axis=1)
|
| 146 |
+
actions_for_curr_step = actions_for_curr_step[actions_populated]
|
| 147 |
+
k = 0.01
|
| 148 |
+
exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step)))
|
| 149 |
+
exp_weights = exp_weights / exp_weights.sum()
|
| 150 |
+
exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1)
|
| 151 |
+
raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True)
|
| 152 |
+
else:
|
| 153 |
+
raw_action = all_actions[:, t % query_frequency]
|
| 154 |
+
|
| 155 |
+
# Post-process: denormalize, take first 7D (ignore padding dim)
|
| 156 |
+
action = post_process(raw_action.squeeze(0).cpu().numpy())
|
| 157 |
+
action = action[:7] # 8D → 7D OSC_POSE
|
| 158 |
+
|
| 159 |
+
env.step(action)
|
| 160 |
+
if render:
|
| 161 |
+
env.render()
|
| 162 |
+
|
| 163 |
+
if env._check_success():
|
| 164 |
+
episode_success = True
|
| 165 |
+
|
| 166 |
+
successes.append(episode_success)
|
| 167 |
+
status = "SUCCESS" if episode_success else "FAIL"
|
| 168 |
+
print(f" [{ep+1}/{num_rollouts}] {status} (t={t+1})")
|
| 169 |
+
|
| 170 |
+
success_rate = sum(successes) / len(successes)
|
| 171 |
+
print(f"\n{'='*40}")
|
| 172 |
+
print(f"Task: {task}")
|
| 173 |
+
print(f"Checkpoint: {ckpt_dir}/{ckpt_name}")
|
| 174 |
+
print(f"Mode: {'temporal_agg' if temporal_agg else 'chunk'}")
|
| 175 |
+
print(f"Success: {sum(successes)}/{num_rollouts} ({success_rate*100:.1f}%)")
|
| 176 |
+
print(f"{'='*40}")
|
| 177 |
+
|
| 178 |
+
env.close()
|
| 179 |
+
return success_rate
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
parser = argparse.ArgumentParser()
|
| 184 |
+
parser.add_argument("--task", type=str, required=True,
|
| 185 |
+
choices=["precision_grasp", "peg_insertion", "gentle_stack"])
|
| 186 |
+
parser.add_argument("--ckpt_dir", type=str, default=None,
|
| 187 |
+
help="checkpoint dir (default: policy/ACT/checkpoints/{task}_act)")
|
| 188 |
+
parser.add_argument("--ckpt", type=str, default="policy_best.ckpt")
|
| 189 |
+
parser.add_argument("--num_rollouts", type=int, default=50)
|
| 190 |
+
parser.add_argument("--temporal_agg", action="store_true")
|
| 191 |
+
parser.add_argument("--render", action="store_true", help="visualize with on-screen renderer")
|
| 192 |
+
parser.add_argument("--cameras", nargs="+", default=["agentview", "eye_in_hand"])
|
| 193 |
+
args = parser.parse_args()
|
| 194 |
+
|
| 195 |
+
if args.ckpt_dir is None:
|
| 196 |
+
args.ckpt_dir = os.path.join(
|
| 197 |
+
os.path.dirname(__file__), "..", "policy", "ACT", "checkpoints", f"{args.task}_act"
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
run_eval(
|
| 201 |
+
task=args.task,
|
| 202 |
+
ckpt_dir=args.ckpt_dir,
|
| 203 |
+
ckpt_name=args.ckpt,
|
| 204 |
+
num_rollouts=args.num_rollouts,
|
| 205 |
+
temporal_agg=args.temporal_agg,
|
| 206 |
+
camera_names=args.cameras,
|
| 207 |
+
render=args.render,
|
| 208 |
+
)
|
tactile_tasks/eval_dp.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate trained Diffusion Policy in robosuite environment.
|
| 4 |
+
|
| 5 |
+
DP output characteristics:
|
| 6 |
+
- Predicts `horizon` (8) steps of actions conditioned on `n_obs_steps` (3) frames
|
| 7 |
+
- Executes `n_action_steps` (6) of them, then re-queries with fresh observations
|
| 8 |
+
- This "receding horizon" approach naturally handles drift and disturbances
|
| 9 |
+
|
| 10 |
+
Planner design:
|
| 11 |
+
- Maintain an observation history buffer of length n_obs_steps
|
| 12 |
+
- Query policy → get n_action_steps actions → execute all → repeat
|
| 13 |
+
- No reliance on env.done signal (we control the episode length)
|
| 14 |
+
- Observations include images (head_cam) and state (agent_pos)
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
python tactile_tasks/eval_dp.py --task precision_grasp
|
| 18 |
+
python tactile_tasks/eval_dp.py --task precision_grasp --render
|
| 19 |
+
python tactile_tasks/eval_dp.py --task gentle_stack --ckpt /path/to/checkpoint.ckpt
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import os
|
| 23 |
+
import sys
|
| 24 |
+
import argparse
|
| 25 |
+
from collections import deque
|
| 26 |
+
|
| 27 |
+
import numpy as np
|
| 28 |
+
import torch
|
| 29 |
+
import dill
|
| 30 |
+
|
| 31 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_obs(env):
|
| 35 |
+
"""Extract raw observation dict from robosuite env (no normalization - DP handles it)."""
|
| 36 |
+
robot = env.robots[0]
|
| 37 |
+
obs = env._get_observations()
|
| 38 |
+
|
| 39 |
+
# State: joint_pos(7) + normalized_gripper(1) = 8D
|
| 40 |
+
joint_pos = np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes])
|
| 41 |
+
gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", [])
|
| 42 |
+
gripper_val = env.sim.data.qpos[gripper_idx][0] / 0.8 if len(gripper_idx) else 0.0
|
| 43 |
+
state = np.concatenate([joint_pos, [gripper_val]]).astype(np.float32)
|
| 44 |
+
|
| 45 |
+
# Image: (H, W, 3) uint8 → (3, H, W) for head_camera
|
| 46 |
+
head_cam = obs["agentview_image"] # (256, 256, 3) uint8
|
| 47 |
+
head_cam = np.moveaxis(head_cam, -1, 0) # (3, 256, 256)
|
| 48 |
+
|
| 49 |
+
return {
|
| 50 |
+
"head_camera": head_cam,
|
| 51 |
+
"state": state,
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def load_dp_policy(ckpt_path, device="cuda:0"):
|
| 56 |
+
"""Load DP policy from checkpoint, return (policy, cfg)."""
|
| 57 |
+
payload = torch.load(open(ckpt_path, "rb"), pickle_module=dill, map_location=device)
|
| 58 |
+
cfg = payload["cfg"]
|
| 59 |
+
|
| 60 |
+
# Get n_obs_steps, n_action_steps from config
|
| 61 |
+
n_obs_steps = cfg.n_obs_steps
|
| 62 |
+
n_action_steps = cfg.n_action_steps
|
| 63 |
+
|
| 64 |
+
# Reconstruct workspace to load ema_model
|
| 65 |
+
import hydra
|
| 66 |
+
from diffusion_policy.workspace.robotworkspace import RobotWorkspace
|
| 67 |
+
|
| 68 |
+
workspace = RobotWorkspace(cfg)
|
| 69 |
+
workspace.load_payload(payload)
|
| 70 |
+
|
| 71 |
+
# Use EMA model if available (better performance)
|
| 72 |
+
if workspace.ema_model is not None:
|
| 73 |
+
policy = workspace.ema_model
|
| 74 |
+
print("Using EMA model")
|
| 75 |
+
else:
|
| 76 |
+
policy = workspace.model
|
| 77 |
+
print("Using base model")
|
| 78 |
+
|
| 79 |
+
policy.to(device)
|
| 80 |
+
policy.eval()
|
| 81 |
+
|
| 82 |
+
print(f"Loaded: {ckpt_path}")
|
| 83 |
+
print(f" horizon={cfg.horizon}, n_obs_steps={n_obs_steps}, n_action_steps={n_action_steps}")
|
| 84 |
+
|
| 85 |
+
return policy, cfg
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def run_eval(task, ckpt_path, num_rollouts, render=False, device="cuda:0"):
|
| 89 |
+
"""Run evaluation rollouts with DP policy."""
|
| 90 |
+
|
| 91 |
+
# Add DP to path for imports
|
| 92 |
+
dp_dir = os.path.join(os.path.dirname(__file__), "..", "policy", "DP")
|
| 93 |
+
sys.path.insert(0, dp_dir)
|
| 94 |
+
|
| 95 |
+
policy, cfg = load_dp_policy(ckpt_path, device)
|
| 96 |
+
n_obs_steps = cfg.n_obs_steps
|
| 97 |
+
n_action_steps = cfg.n_action_steps
|
| 98 |
+
|
| 99 |
+
# Create environment
|
| 100 |
+
from tactile_tasks.collect_data import create_env, TASK_CONFIGS
|
| 101 |
+
settle_steps = 50
|
| 102 |
+
max_timesteps = max(TASK_CONFIGS[task]["horizon"], 900)
|
| 103 |
+
env = create_env(task, has_renderer=render)
|
| 104 |
+
env.horizon = max_timesteps + settle_steps
|
| 105 |
+
|
| 106 |
+
successes = []
|
| 107 |
+
for ep in range(num_rollouts):
|
| 108 |
+
env.reset()
|
| 109 |
+
# Let objects settle
|
| 110 |
+
for _ in range(settle_steps):
|
| 111 |
+
env.step(np.zeros(7))
|
| 112 |
+
|
| 113 |
+
# Initialize obs history buffer with current observation repeated
|
| 114 |
+
obs = get_obs(env)
|
| 115 |
+
obs_history = deque(maxlen=n_obs_steps)
|
| 116 |
+
for _ in range(n_obs_steps):
|
| 117 |
+
obs_history.append(obs)
|
| 118 |
+
|
| 119 |
+
episode_success = False
|
| 120 |
+
t = 0
|
| 121 |
+
|
| 122 |
+
with torch.inference_mode():
|
| 123 |
+
while t < max_timesteps:
|
| 124 |
+
# Build observation batch: (1, n_obs_steps, ...)
|
| 125 |
+
head_cams = np.stack([o["head_camera"] for o in obs_history]) # (T_o, 3, H, W)
|
| 126 |
+
states = np.stack([o["state"] for o in obs_history]) # (T_o, 8)
|
| 127 |
+
|
| 128 |
+
obs_dict = {
|
| 129 |
+
"head_cam": torch.from_numpy(head_cams / 255.0).float().unsqueeze(0).to(device),
|
| 130 |
+
"agent_pos": torch.from_numpy(states).float().unsqueeze(0).to(device),
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
# Query policy → get n_action_steps actions
|
| 134 |
+
result = policy.predict_action(obs_dict)
|
| 135 |
+
actions = result["action"].squeeze(0).cpu().numpy() # (n_action_steps, 7)
|
| 136 |
+
|
| 137 |
+
# Execute action chunk
|
| 138 |
+
for i in range(len(actions)):
|
| 139 |
+
if t >= max_timesteps:
|
| 140 |
+
break
|
| 141 |
+
|
| 142 |
+
action = actions[i] # 7D OSC_POSE
|
| 143 |
+
env.step(action)
|
| 144 |
+
if render:
|
| 145 |
+
env.render()
|
| 146 |
+
|
| 147 |
+
# Update obs history
|
| 148 |
+
obs = get_obs(env)
|
| 149 |
+
obs_history.append(obs)
|
| 150 |
+
|
| 151 |
+
if env._check_success():
|
| 152 |
+
episode_success = True
|
| 153 |
+
|
| 154 |
+
t += 1
|
| 155 |
+
|
| 156 |
+
successes.append(episode_success)
|
| 157 |
+
status = "SUCCESS" if episode_success else "FAIL"
|
| 158 |
+
print(f" [{ep+1}/{num_rollouts}] {status} (t={t})")
|
| 159 |
+
|
| 160 |
+
success_rate = sum(successes) / len(successes)
|
| 161 |
+
print(f"\n{'='*40}")
|
| 162 |
+
print(f"Task: {task}")
|
| 163 |
+
print(f"Checkpoint: {ckpt_path}")
|
| 164 |
+
print(f"n_obs_steps={n_obs_steps}, n_action_steps={n_action_steps}")
|
| 165 |
+
print(f"Success: {sum(successes)}/{num_rollouts} ({success_rate*100:.1f}%)")
|
| 166 |
+
print(f"{'='*40}")
|
| 167 |
+
|
| 168 |
+
env.close()
|
| 169 |
+
return success_rate
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def find_best_ckpt(task):
|
| 173 |
+
"""Find the best checkpoint for given task in DP checkpoints directory."""
|
| 174 |
+
dp_dir = os.path.join(os.path.dirname(__file__), "..", "policy", "DP")
|
| 175 |
+
|
| 176 |
+
# Search in checkpoints/ directory (where train.py saves them)
|
| 177 |
+
search_dirs = [
|
| 178 |
+
os.path.join(dp_dir, "checkpoints"),
|
| 179 |
+
os.path.join(dp_dir, "data", "outputs"),
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
candidates = []
|
| 183 |
+
for search_dir in search_dirs:
|
| 184 |
+
if not os.path.isdir(search_dir):
|
| 185 |
+
continue
|
| 186 |
+
for root, dirs, files in os.walk(search_dir):
|
| 187 |
+
# Prefer dirs that match the task name
|
| 188 |
+
for f in files:
|
| 189 |
+
if f.endswith(".ckpt"):
|
| 190 |
+
path = os.path.join(root, f)
|
| 191 |
+
# Prioritize task-matching paths
|
| 192 |
+
score = 1 if task in root else 0
|
| 193 |
+
candidates.append((score, os.path.getmtime(path), path))
|
| 194 |
+
|
| 195 |
+
if candidates:
|
| 196 |
+
# Sort by task match first, then by modification time
|
| 197 |
+
candidates.sort(key=lambda x: (x[0], x[1]), reverse=True)
|
| 198 |
+
return candidates[0][2]
|
| 199 |
+
return None
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
parser = argparse.ArgumentParser()
|
| 204 |
+
parser.add_argument("--task", type=str, required=True,
|
| 205 |
+
choices=["precision_grasp", "peg_insertion", "gentle_stack"])
|
| 206 |
+
parser.add_argument("--ckpt", type=str, default=None,
|
| 207 |
+
help="path to DP checkpoint (.ckpt)")
|
| 208 |
+
parser.add_argument("--num_rollouts", type=int, default=50)
|
| 209 |
+
parser.add_argument("--render", action="store_true", help="visualize with on-screen renderer")
|
| 210 |
+
parser.add_argument("--device", type=str, default="cuda:0")
|
| 211 |
+
args = parser.parse_args()
|
| 212 |
+
|
| 213 |
+
if args.ckpt is None:
|
| 214 |
+
args.ckpt = find_best_ckpt(args.task)
|
| 215 |
+
if args.ckpt is None:
|
| 216 |
+
print("No checkpoint found. Specify with --ckpt /path/to/checkpoint.ckpt")
|
| 217 |
+
sys.exit(1)
|
| 218 |
+
print(f"Auto-detected checkpoint: {args.ckpt}")
|
| 219 |
+
|
| 220 |
+
run_eval(
|
| 221 |
+
task=args.task,
|
| 222 |
+
ckpt_path=args.ckpt,
|
| 223 |
+
num_rollouts=args.num_rollouts,
|
| 224 |
+
render=args.render,
|
| 225 |
+
device=args.device,
|
| 226 |
+
)
|