RoboTwin / tactile_tasks /collect_data.py
Fxxkrobotics's picture
Add files using upload-large-folder tool
1ac176e verified
#!/usr/bin/env python3
"""
Data collection for contact-rich manipulation with tactile sensing.
Fixed version: improved alignment loops, better z calculation, stable success check.
"""
import os
import sys
import argparse
from datetime import datetime
import math
import numpy as np
import h5py
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import robosuite
from tactile_tasks.uskin_sensor import USkinSensor
# ---- OSC_POSE helpers (7D action: [Δx, Δy, Δz, Δrx, Δry, Δrz, gripper]) ----
def get_eef_pos(env):
return env.sim.data.site_xpos[env.robots[0].eef_site_id["right"]].copy()
def get_eef_z_axis(env):
"""Get EEF z-axis in world frame."""
mat = env.sim.data.site_xmat[env.robots[0].eef_site_id["right"]].reshape(3, 3)
return mat[:, 2].copy()
def get_ori_correction(env, ori_gain=1.0):
"""
Compute axis-angle delta to rotate EEF z-axis toward [0,0,-1] (vertical down).
Returns 3D orientation delta for OSC_POSE action.
With goal_update_mode='achieved', delta is applied relative to current orientation.
"""
z_axis = get_eef_z_axis(env)
target_z = np.array([0.0, 0.0, -1.0])
cross = np.cross(z_axis, target_z)
sin_a = np.linalg.norm(cross)
if sin_a < 1e-6:
return np.zeros(3) # already aligned
axis = cross / sin_a
cos_a = np.dot(z_axis, target_z)
angle = np.arctan2(sin_a, cos_a)
return np.clip(axis * angle * ori_gain, -1, 1)
def move_action(env, target_pos, gain=5.0, gripper=0.0, ori_gain=0.0):
"""7D action: position delta + orientation delta + gripper."""
delta = target_pos - get_eef_pos(env)
delta = np.clip(delta * gain, -1, 1)
ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3)
return np.concatenate([delta, ori, [gripper]])
def at_target(env, target_pos, threshold=0.01):
return np.linalg.norm(get_eef_pos(env) - target_pos) < threshold
def grip_action(env, gripper=1.0, ori_gain=0.0):
ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3)
return np.concatenate([[0, 0, 0], ori, [gripper]])
def is_upright(env, body_id, tol=0.15):
import mujoco
quat = env.sim.data.body_xquat[body_id].copy()
mat = np.zeros(9)
mujoco.mju_quat2Mat(mat, quat)
z_axis = mat.reshape(3, 3)[:, 2]
return abs(z_axis[2]) > (1.0 - tol)
# ---- Core alignment primitive ----
# Key fix: continuous feedback loop that drives OBJECT xy to TARGET xy
# instead of one-shot correction. Runs until converged or max_steps.
def align_object_to_xy(env, sensor, recorder, get_obj_xy_fn, target_xy,
gripper=1.0, xy_tol=0.003, max_steps=80, gain=8.0,
ori_gain=0.0):
"""
Continuously correct EEF so that the held object's xy matches target_xy.
ori_gain>0 applies vertical orientation correction (OSC_POSE).
"""
for _ in range(max_steps):
if recorder.done:
break
obj_xy = get_obj_xy_fn()
eef = get_eef_pos(env)
err = target_xy - obj_xy
if np.linalg.norm(err) < xy_tol:
break
target = eef.copy()
target[:2] += np.clip(err * gain, -0.05, 0.05)
action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain)
recorder.step(env, sensor, action)
eef_hold = get_eef_pos(env).copy()
recorder.run_for(env, sensor,
lambda: move_action(env, eef_hold, gain=5.0, gripper=gripper,
ori_gain=ori_gain),
steps=8)
def descend_with_alignment(env, sensor, recorder, get_obj_xy_fn, target_xy,
target_z, gripper=1.0, z_tol=0.005,
xy_gain=8.0, z_gain=5.0, max_steps=80,
ori_gain=0.0):
"""
Descend to target_z while keeping object aligned over target_xy.
ori_gain>0 applies vertical orientation correction.
"""
for _ in range(max_steps):
if recorder.done:
break
obj_xy = get_obj_xy_fn()
eef = get_eef_pos(env)
xy_err = target_xy - obj_xy
z_err = target_z - eef[2]
target = eef.copy()
target[:2] += np.clip(xy_err * xy_gain, -0.05, 0.05)
target[2] += np.clip(z_err * z_gain, -0.15, 0.15)
action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain)
recorder.step(env, sensor, action)
if abs(z_err) < z_tol and np.linalg.norm(xy_err) < 0.005:
break
# ---- Task policies ----
def run_precision_grasp(env, sensor, recorder):
obj_pos = env.sim.data.body_xpos[env.obj_body_id].copy()
above = obj_pos.copy(); above[2] += 0.08
recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1),
done_fn=lambda: at_target(env, above, 0.01), max_steps=80)
recorder.run_until(env, sensor, lambda: move_action(env, obj_pos, gain=5.0, gripper=-1),
done_fn=lambda: at_target(env, obj_pos, 0.01), max_steps=60)
recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=15)
lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.12
recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=60)
def grasp_object(env, sensor, recorder, obj_body_id, obj_geoms,
descend_gain=3.0, grasp_steps=30, retry_dz=-0.012):
"""
General-purpose grasp primitive.
Returns measured (eef_pos - obj_pos) offset after successful grasp,
so callers can use it for accurate downstream z calculations.
Tries multiple z heights if first attempt fails.
"""
obj_pos = env.sim.data.body_xpos[obj_body_id].copy()
# Move above object
above = obj_pos.copy(); above[2] += 0.12
recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1),
done_fn=lambda: at_target(env, above, 0.01), max_steps=80)
# Try grasping at progressively lower z positions
# Start at obj center, try up to 3 heights spaced 12mm apart
for dz in [0.0, retry_dz, retry_dz * 2]:
grasp_target = obj_pos.copy(); grasp_target[2] += dz
recorder.run_until(env, sensor,
lambda t=grasp_target: move_action(env, t, gain=descend_gain, gripper=-1),
done_fn=lambda t=grasp_target: at_target(env, t, 0.006),
max_steps=80)
recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=grasp_steps)
if env._check_grasp(gripper=env.robots[0].gripper, object_geoms=obj_geoms):
break
# Re-open for next retry
recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=10)
# Measure actual offset EEF→obj immediately after grasp
eef_after = get_eef_pos(env).copy()
obj_after = env.sim.data.body_xpos[obj_body_id].copy()
eef_to_obj_offset = eef_after - obj_after # typically [~0, ~0, +z] (EEF above obj center)
return eef_to_obj_offset
def run_peg_insertion(env, sensor, recorder):
# ── Phase 1: Grasp peg ──────────────────────────────────────────────────
eef_to_peg = grasp_object(
env, sensor, recorder,
obj_body_id=env.peg_body_id,
obj_geoms=env.peg,
descend_gain=3.0,
grasp_steps=30,
retry_dz=-0.012,
)
# grasp_object结束后
# ── Phase 2: Lift high ──────────────────────────────────────────────────
lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.22
recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=100)
# ── Phase 3: Move to above hole ────────────────────────────────────────
hole_pos = env.sim.data.body_xpos[env.hole_body_id].copy()
hole_xy = hole_pos[:2].copy()
hole_xy_target = hole_xy.copy()
hole_xy_target[1] += eef_to_peg[1]
above_hole = np.array([hole_xy[0], hole_xy[1], get_eef_pos(env)[2]])
recorder.run_until(env, sensor,
lambda: move_action(env, above_hole, gain=4.0, gripper=1.0),
done_fn=lambda: at_target(env, above_hole, 0.01), max_steps=120)
# ── Phase 4: Position align + gradual orientation correction ────────────
align_object_to_xy(
env, sensor, recorder,
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
target_xy=hole_xy_target,
gripper=1.0, xy_tol=0.001, max_steps=500, gain=15.0, ori_gain=0.0,
)
# Gradual ori ramp — use hole_xy_target throughout for consistency
for step in range(200):
if recorder.done:
break
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
eef = get_eef_pos(env)
target = eef.copy()
target[:2] += (hole_xy_target - peg_xy) * 3.0
og = min(1.5, step / 150.0 * 1.5)
action = move_action(env, target, gain=10.0, gripper=1.0, ori_gain=og)
recorder.step(env, sensor, action)
# Re-measure offset with final orientation
eef_settled = get_eef_pos(env).copy()
peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
eef_to_peg = eef_settled - peg_settled
# ── Phase 5: Descend to just above hole ─────────────────────────────────
hole_top_z = hole_pos[2] + env.hole_height * 0.5
target_eef_z = hole_top_z + 0.005 + eef_to_peg[2] + env.peg_height
descend_with_alignment(
env, sensor, recorder,
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
target_xy=hole_xy,
target_z=target_eef_z,
gripper=1.0, z_tol=0.005, xy_gain=10.0, z_gain=4.0,
max_steps=120, ori_gain=0.5,
)
search_radius = 0.002
search_angle = 0.0
stuck_steps = 0
for step in range(400):
if recorder.done:
break
mags = sensor.get_force_magnitudes()
total_force = mags["left_finger"].mean() + mags["right_finger"].mean()
peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
peg_z = env.sim.data.body_xpos[env.peg_body_id][2]
hole_z = env.sim.data.body_xpos[env.hole_body_id][2]
xy_err = hole_xy - peg_xy
ori = get_ori_correction(env, ori_gain=0.5)
action = np.zeros(7)
action[3:6] = ori
action[6] = 1.0 # 保持抓住
if total_force > 0.8:
stuck_steps += 1
search_angle += 0.4
sx = search_radius * math.cos(search_angle)
sy = search_radius * math.sin(search_angle)
action[0] = np.clip(xy_err[0] * 15.0 + sx * 20.0, -1, 1)
action[1] = np.clip(xy_err[1] * 15.0 + sy * 20.0, -1, 1)
action[2] = +0.05
if stuck_steps > 20:
search_radius = min(0.003, search_radius + 0.0002)
else:
stuck_steps = 0
action[0] = np.clip(xy_err[0] * 15.0, -1, 1)
action[1] = np.clip(xy_err[1] * 15.0, -1, 1)
action[2] = -0.08
recorder.step(env, sensor, action)
# peg插入足够深再停,不依赖_check_success
if peg_z < hole_z - env.peg_height * 0.3:
break
# ── Phase 7: 松手,等peg稳定,再退出 ────────────────────────────────────
recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=20)
# 等peg自然稳定
eef_hold = get_eef_pos(env).copy()
recorder.run_for(env, sensor,
lambda: move_action(env, eef_hold, gain=5.0, gripper=-1),
steps=30)
retreat = get_eef_pos(env).copy(); retreat[2] += 0.10
recorder.run_until(env, sensor,
lambda: move_action(env, retreat, gain=4.0, gripper=-1),
done_fn=lambda: at_target(env, retreat, 0.01),
max_steps=50)
def run_gentle_stack(env, sensor, recorder):
# ── Phase 1: Grasp box, measure real EEF→box offset ──────────────────────
# box is flat (half-h ~0.032), grasp at center height
eef_to_box = grasp_object(
env, sensor, recorder,
obj_body_id=env.box_body_id,
obj_geoms=env.stack_box,
descend_gain=3.0,
grasp_steps=30,
retry_dz=-0.010,
)
# ── Phase 2: Lift ─────────────────────────────────────────────────────────
lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.14
recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=80)
eef_hold = get_eef_pos(env).copy()
recorder.run_for(env, sensor,
lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0),
steps=12)
# Re-measure offset after settling (box may shift in grip)
eef_settled = get_eef_pos(env).copy()
box_settled = env.sim.data.body_xpos[env.box_body_id].copy()
eef_to_box = eef_settled - box_settled # [~0, ~0, +z]: EEF is above box center
# ── Phase 3: Align box xy over can ───────────────────────────────────────
can_pos = env.sim.data.body_xpos[env.can_body_id].copy()
can_xy = can_pos[:2].copy()
align_object_to_xy(
env, sensor, recorder,
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(),
target_xy=can_xy,
gripper=1.0,
xy_tol=0.005,
max_steps=100,
gain=8.0,
)
# ── Phase 4: Descend to just above can top ────────────────────────────────
# Key formula:
# box_bottom_z = eef_z - eef_to_box[2] - box_half_h
# want box_bottom_z = can_top_z + small_clearance
# → target_eef_z = can_top_z + clearance + eef_to_box[2] + box_half_h
can_top_z = can_pos[2] + 0.04 # CylinderObject size=[0.022, 0.04] → half_h=0.04
# Read actual box half-height from sim geom (BoxObject is randomized, .size unreliable)
box_geom_names = [n for n in env.sim.model.geom_names
if env.stack_box.root_body.lower() in n.lower()]
if box_geom_names:
gid = env.sim.model.geom_name2id(box_geom_names[0])
box_half_h = env.sim.model.geom_size[gid][2] # z half-extent
else:
box_half_h = 0.032 # fallback: midpoint of size_min/size_max z
target_eef_z = can_top_z + 0.010 + eef_to_box[2] + box_half_h
descend_with_alignment(
env, sensor, recorder,
get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(),
target_xy=can_xy,
target_z=target_eef_z,
gripper=1.0,
z_tol=0.006,
xy_gain=6.0,
z_gain=3.0,
max_steps=120,
)
# Re-measure offset with final orientation
eef_settled = get_eef_pos(env).copy()
peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
eef_to_peg = eef_settled - peg_settled
# 加这行
print(f"对齐后误差: {hole_xy - env.sim.data.body_xpos[env.peg_body_id][:2]}")
# ── Phase 5: Gentle final push until contact ──────────────────────────────
for _ in range(80):
if recorder.done:
break
box_xy = env.sim.data.body_xpos[env.box_body_id][:2].copy()
xy_err = can_xy - box_xy
action = np.zeros(7)
action[:2] = np.clip(xy_err * 8.0, -0.3, 0.3)
action[2] = -0.004 # very slow descent
action[6] = 1.0 # gripper closed
recorder.step(env, sensor, action)
# Stop on tactile contact
mags = sensor.get_force_magnitudes()
avg_force = (mags["left_finger"].mean() + mags["right_finger"].mean()) / 2.0
if avg_force > 1.0:
break
# ── Phase 6: Hold, release, retreat ──────────────────────────────────────
eef_hold = get_eef_pos(env).copy()
recorder.run_for(env, sensor,
lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0),
steps=20)
recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=15)
# Straight-up retreat, slow to avoid disturbing box
retreat = eef_hold.copy(); retreat[2] += 0.10
recorder.run_until(env, sensor,
lambda: move_action(env, retreat, gain=2.0, gripper=-1),
done_fn=lambda: at_target(env, retreat, 0.015), max_steps=60)
# Wait for box to fully settle (success check happens here)
recorder.run_for(env, sensor,
lambda: move_action(env, retreat, gain=2.0, gripper=-1),
steps=35)
# ---- Data recording ----
class EpisodeRecorder:
def __init__(self, env, sensor):
self.env = env
self.sensor = sensor
self.data = {
"agentview_image": [], "eye_in_hand_image": [],
"agentview_depth": [], "eye_in_hand_depth": [],
"tactile_left": [], "tactile_right": [],
"joint_pos": [], "joint_vel": [],
"eef_pos": [], "eef_quat": [],
"gripper_qpos": [],
"actions": [], "rewards": [], "success": [],
}
self.step_count = 0
self.done = False
def step(self, env, sensor, action):
if self.done:
return None, 0, True, {}
obs, reward, done, info = env.step(action)
self.done = done
for _ in range(USkinSensor.FREQ_MULTIPLIER):
td = sensor.update()
self.data["tactile_left"].append(td["left_finger"].copy())
self.data["tactile_right"].append(td["right_finger"].copy())
if "agentview_image" in obs:
self.data["agentview_image"].append(obs["agentview_image"])
if "robot0_eye_in_hand_image" in obs:
self.data["eye_in_hand_image"].append(obs["robot0_eye_in_hand_image"])
if "agentview_depth" in obs:
self.data["agentview_depth"].append(obs["agentview_depth"])
if "robot0_eye_in_hand_depth" in obs:
self.data["eye_in_hand_depth"].append(obs["robot0_eye_in_hand_depth"])
robot = env.robots[0]
self.data["joint_pos"].append(np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes]))
self.data["joint_vel"].append(np.array(env.sim.data.qvel[robot._ref_joint_vel_indexes]))
self.data["eef_pos"].append(get_eef_pos(env))
eef_mat = env.sim.data.site_xmat[robot.eef_site_id["right"]].reshape(3, 3)
import mujoco
quat = np.zeros(4)
mujoco.mju_mat2Quat(quat, eef_mat.flatten())
self.data["eef_quat"].append(quat)
gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", [])
if gripper_idx:
self.data["gripper_qpos"].append(np.array(env.sim.data.qpos[gripper_idx]))
else:
self.data["gripper_qpos"].append(np.array([action[-1]]))
self.data["actions"].append(action)
self.data["rewards"].append(reward)
self.data["success"].append(env._check_success())
self.step_count += 1
return obs, reward, done, info
def run_for(self, env, sensor, action_fn, steps):
for _ in range(steps):
if self.done:
break
self.step(env, sensor, action_fn())
def run_until(self, env, sensor, action_fn, done_fn, max_steps):
for _ in range(max_steps):
if self.done:
break
self.step(env, sensor, action_fn())
if done_fn():
break
def finalize(self):
for key in self.data:
if len(self.data[key]) > 0:
self.data[key] = np.array(self.data[key])
else:
self.data[key] = np.array([])
self.data["task_success"] = bool(any(self.data["success"])) if len(self.data["success"]) > 0 else False
self.data["n_steps"] = self.step_count
return self.data
# ---- Task configs ----
TASK_CONFIGS = {
"precision_grasp": {
"env_class": "PrecisionGrasp",
"run_fn": run_precision_grasp,
"horizon": 3000,
"controller": "OSC_POSE",
},
"peg_insertion": {
"env_class": "PegInsertion",
"run_fn": run_peg_insertion,
"horizon": 5000,
"controller": "OSC_POSE",
},
"gentle_stack": {
"env_class": "GentleStack",
"run_fn": run_gentle_stack,
"horizon": 5000,
"controller": "OSC_POSE",
},
"fragile_transport": {
"env_class": "FragileTransport",
"run_fn": None, # keyboard-only collection
"horizon": 5000,
"controller": "OSC_POSE",
},
"cup_retrieval": {
"env_class": "CupRetrieval",
"run_fn": None,
"horizon": 5000,
"controller": "OSC_POSE",
},
"pen_insertion": {
"env_class": "PenInsertion",
"run_fn": None,
"horizon": 5000,
"controller": "OSC_POSE",
},
}
def create_env(task_name, has_renderer=False, camera_names=None, render_camera="agentview"):
from tactile_tasks.envs.precision_grasp import PrecisionGrasp
from tactile_tasks.envs.peg_insertion import PegInsertion
from tactile_tasks.envs.gentle_stack import GentleStack
from tactile_tasks.envs.fragile_transport import FragileTransport
from tactile_tasks.envs.cup_retrieval import CupRetrieval
from tactile_tasks.envs.pen_insertion import PenInsertion
env_classes = {
"PrecisionGrasp": PrecisionGrasp,
"PegInsertion": PegInsertion,
"GentleStack": GentleStack,
"FragileTransport": FragileTransport,
"CupRetrieval": CupRetrieval,
"PenInsertion": PenInsertion,
}
config = TASK_CONFIGS[task_name]
EnvClass = env_classes[config["env_class"]]
if camera_names is None:
camera_names = ["agentview", "robot0_eye_in_hand"]
controller_configs = {
"type": "BASIC",
"body_parts": {
"right": {
"type": "OSC_POSE",
"input_max": 1,
"input_min": -1,
"output_max": [0.15, 0.15, 0.15, 0.5, 0.5, 0.5],
"output_min": [-0.15, -0.15, -0.15, -0.5, -0.5, -0.5],
"kp": 200,
"damping_ratio": 1,
"impedance_mode": "fixed",
"kp_limits": [0, 400],
"damping_ratio_limits": [0, 10],
"position_limits": None,
"uncouple_pos_ori": True,
"input_type": "delta",
"input_ref_frame": "base",
"interpolation": None,
"ramp_ratio": 0.2,
"gripper": {"type": "GRIP"},
}
},
}
env = EnvClass(
robots="Sawyer",
gripper_types="Robotiq85Gripper",
controller_configs=controller_configs,
has_renderer=has_renderer,
has_offscreen_renderer=True,
render_camera=render_camera,
use_camera_obs=True,
use_object_obs=True,
control_freq=20,
horizon=config["horizon"],
camera_names=camera_names,
camera_heights=256,
camera_widths=256,
camera_depths=True,
reward_shaping=True,
renderer="mjviewer",
)
return env
def save_episode_hdf5(filepath, episode_data, task_name):
"""Save a single episode to its own HDF5 file."""
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with h5py.File(filepath, "w") as f:
meta = f.create_group("metadata")
meta.attrs["task"] = task_name
meta.attrs["robot"] = "Sawyer"
meta.attrs["gripper"] = "Robotiq85"
meta.attrs["tactile_sensor"] = "uSkin_4x4"
meta.attrs["controller"] = TASK_CONFIGS.get(task_name, {}).get("controller", "OSC_POSE")
meta.attrs["control_freq"] = 20
meta.attrs["tactile_freq"] = 100
meta.attrs["camera_freq"] = 20
meta.attrs["created"] = datetime.now().isoformat()
f.attrs["success"] = bool(episode_data["task_success"])
f.attrs["n_steps"] = int(episode_data["n_steps"])
for key, value in episode_data.items():
if isinstance(value, np.ndarray) and value.size > 0:
if "image" in key or "depth" in key:
f.create_dataset(key, data=value, compression="gzip", compression_opts=4)
else:
f.create_dataset(key, data=value)
elif isinstance(value, (bool, int, float)):
f.attrs[key] = value
def collect_task_data(task_name, n_episodes=1, save_dir="./tactile_data", visualize=False,
max_attempts=500):
"""Collect n_episodes SUCCESSFUL episodes. Failed episodes are discarded."""
print(f"\n{'='*60}")
print(f"Task: {task_name} | Target: {n_episodes} successful episodes")
print(f"{'='*60}\n")
task_dir = os.path.join(save_dir, task_name)
os.makedirs(task_dir, exist_ok=True)
config = TASK_CONFIGS[task_name]
env = create_env(task_name, has_renderer=visualize)
successes = 0
attempts = 0
while successes < n_episodes and attempts < max_attempts:
attempts += 1
print(f"Attempt {attempts} | Saved: {successes}/{n_episodes}")
obs = env.reset()
for _ in range(80):
obs, _, _, _ = env.step(np.zeros(env.action_dim))
sensor = USkinSensor(env.sim, gripper_prefix="gripper0_right_", noise_std=0.005)
recorder = EpisodeRecorder(env, sensor)
config["run_fn"](env, sensor, recorder)
episode_data = recorder.finalize()
success = episode_data["task_success"]
print(f" Steps: {episode_data['n_steps']}, "
f"Success: {success}, "
f"Reward: {episode_data['rewards'].sum():.1f}")
if success:
filepath = os.path.join(task_dir, f"episode_{successes:02d}.hdf5")
save_episode_hdf5(filepath, episode_data, task_name)
successes += 1
print(f" -> Saved as episode_{successes-1:02d}.hdf5")
else:
print(f" -> Discarded (failed)")
print(f"\nDone: {successes}/{n_episodes} successful episodes in {attempts} attempts")
env.close()
return task_dir
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="precision_grasp",
choices=list(TASK_CONFIGS.keys()) + ["all"])
parser.add_argument("--n_episodes", type=int, default=1)
parser.add_argument("--save_dir", type=str, default="./tactile_data")
parser.add_argument("--visualize", action="store_true")
args = parser.parse_args()
tasks = list(TASK_CONFIGS.keys()) if args.task == "all" else [args.task]
for task in tasks:
collect_task_data(task, args.n_episodes, args.save_dir, args.visualize)
if __name__ == "__main__":
main()