DynaTraj / bb_collect.py
Hang917's picture
UPDATE: new ckpt
62a2f9e
import mujoco
import numpy as np
import argparse
from typing import Optional, Tuple, Dict, Any
import time
import os
from datetime import datetime
from dataclasses import dataclass
from scipy.spatial.transform import Rotation as R
from tqdm import tqdm
import pickle
from dataset import TrajectoryBuffer
@dataclass
class Config:
"""Configuration class to consolidate hyperparameters (excluding XML parameters)"""
# Video recording parameters
video_width: int = 640
video_height: int = 480
video_fps: int = 30
video_codec: str = 'XVID' # Better compatibility than 'mp4v'
# Environment initialization parameters
ball_init_height_min: float = 1.0
ball_init_height_max: float = 1.5
ball_init_xy_range: float = 0.2
board_init_height: float = 0.5
board_init_xy_range: float = 0.2
# Controller parameters
kp_pos_default: float = 1000.0
kd_pos_default: float = 1.0
kp_rot_default: float = 100.0
kd_rot_default: float = 1.0
# Ball response controller parameters
target_z_min: float = 0.4
target_z_max: float = 0.7
target_angle_min: float = -0.1
target_angle_max: float = 0.1
randomization_interval: float = 0.5
# Action limits (should match XML ctrlrange)
linear_vel_limit: float = 10.0
angular_vel_limit: float = 1.0
class PingPongEnv:
def __init__(self, headless: bool = False, config: Config = None):
"""
Initialize MuJoCo ping pong environment with board and ball.
Args:
headless: If True, record video; if False, render directly
config: Configuration object with hyperparameters
"""
self.headless = headless
self.config = config if config is not None else Config()
self.model_xml = self._create_model_xml()
# Initialize MuJoCo model and data
self.model = mujoco.MjModel.from_xml_string(self.model_xml)
self.data = mujoco.MjData(self.model)
# Rendering setup
self.renderer = None
self._setup_rendering()
# Ball tracking for camera
self.ball_body_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "ball")
# Actuator setup for board control
self.act_ids = {
"vx": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "vx_motor"),
"vy": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "vy_motor"),
"vz": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "vz_motor"),
"wx": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "wx_motor"),
"wy": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "wy_motor"),
"wz": mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, "wz_motor"),
}
self.act_order = ["vx", "vy", "vz", "wx", "wy", "wz"]
def _create_model_xml(self) -> str:
"""Create MuJoCo XML model definition"""
xml = """
<mujoco model="pingpong_env">
<compiler angle="radian" coordinate="local"/>
<option timestep="0.001" gravity="0 0 -9.81"/>
<visual>
<headlight diffuse="0.4 0.4 0.4" ambient="0.5 0.5 0.5" specular="0.1 0.1 0.1"/>
<global offwidth="640" offheight="480"/>
</visual>
<asset>
<texture name="grid" type="2d" builtin="checker" rgb1="0.1 0.2 0.3" rgb2="0.2 0.3 0.4" width="300" height="300"/>
<material name="grid" texture="grid" texrepeat="1 1" reflectance="0.2"/>
<material name="ball_mat" rgba="1 0.5 0 1" shininess="0.8"/>
<material name="board_mat" rgba="0.2 0.8 0.2 1" shininess="0.3"/>
</asset>
<default>
<geom solref="0.02 0.05" solimp="0.9 0.99 0.001"/>
</default>
<worldbody>
<!-- Soft directional light -->
<light name="main_light" pos="0.8 0.8 1.5" dir="-0.5 -0.5 -1" diffuse="0.6 0.6 0.6" specular="0.2 0.2 0.2" directional="true"/>
<!-- Fill light for shadows -->
<light name="fill_light" pos="-0.5 -0.5 1" dir="0.3 0.3 -0.8" diffuse="0.3 0.3 0.3" specular="0.1 0.1 0.1" directional="true"/>
<!-- Ground plane -->
<geom name="ground" type="plane" size="100 100 0.1" material="grid"/>
<!-- Board (0.3m x 0.3m) -->
<body name="board" pos="0 0 0.01">
<joint name="board_free" type="free"/>
<geom name="board" type="box" size="0.15 0.15 0.01" material="board_mat" mass="1.0"/>
</body>
<!-- Ping pong ball (40mm diameter, ~2.7g) -->
<body name="ball" pos="0 0 0.1">
<joint name="ball_free" type="free"/>
<geom name="ball" type="sphere" size="0.02" material="ball_mat" mass="0.0027" friction="0.7 0.005 0.0001"/>
<inertial pos="0 0 0" mass="0.0027" diaginertia="2.16e-7 2.16e-7 2.16e-7"/>
</body>
</worldbody>
<!-- Camera for ball tracking -->
<worldbody>
<camera name="ball_cam" pos="0.5 0.5 0.3" xyaxes="1 0 0 0 1 0" fovy="60" mode="trackcom" target="ball"/>
</worldbody>
<actuator>
<!-- translational velocities -->
<velocity name="vx_motor" joint="board_free" gear="1 0 0 0 0 0" kv="40" ctrllimited="true" ctrlrange="-10 10"/>
<velocity name="vy_motor" joint="board_free" gear="0 1 0 0 0 0" kv="40" ctrllimited="true" ctrlrange="-10 10"/>
<velocity name="vz_motor" joint="board_free" gear="0 0 1 0 0 0" kv="40" ctrllimited="true" ctrlrange="-3 3"/>
<!-- angular velocities -->
<velocity name="wx_motor" joint="board_free" gear="0 0 0 1 0 0" kv="6" ctrllimited="true" ctrlrange="-1 1"/>
<velocity name="wy_motor" joint="board_free" gear="0 0 0 0 1 0" kv="6" ctrllimited="true" ctrlrange="-1 1"/>
<velocity name="wz_motor" joint="board_free" gear="0 0 0 0 0 1" kv="6" ctrllimited="true" ctrlrange="-1 1"/>
</actuator>
</mujoco>
"""
return xml
def _setup_rendering(self):
"""Setup rendering based on headless mode"""
if self.headless:
# Setup for video recording with proper resolution
self.renderer = mujoco.Renderer(self.model, height=self.config.video_height, width=self.config.video_width)
self.cam_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_CAMERA, "ball_cam")
else:
# Setup for direct rendering with viewer
try:
import mujoco.viewer as mj_viewer
self.viewer = mj_viewer.launch_passive(self.model, self.data)
self.cam_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_CAMERA, "ball_cam")
except ImportError:
print("Warning: mujoco.viewer not available, using fallback renderer")
self.viewer = None
self.renderer = mujoco.Renderer(self.model, height=self.config.video_height, width=self.config.video_width)
self.cam_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_CAMERA, "ball_cam")
def reset(self) -> Dict[str, Any]:
"""Reset environment to initial state"""
# Reset MuJoCo simulation
mujoco.mj_resetData(self.model, self.data)
# Set initial ball position with some randomness
ball_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
self.data.qpos[ball_qpos_idx:ball_qpos_idx+3] = [
np.random.uniform(-self.config.ball_init_xy_range, self.config.ball_init_xy_range), # x
np.random.uniform(-self.config.ball_init_xy_range, self.config.ball_init_xy_range), # y
np.random.uniform(self.config.ball_init_height_min, self.config.ball_init_height_max) # z
]
# Set initial board position with some randomness
board_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
self.data.qpos[board_qpos_idx:board_qpos_idx+3] = [
np.random.uniform(-self.config.board_init_xy_range, self.config.board_init_xy_range), # x
np.random.uniform(-self.config.board_init_xy_range, self.config.board_init_xy_range), # y
self.config.board_init_height # z (slightly above the ground)
]
self.data.qpos[board_qpos_idx+3:board_qpos_idx+7] = [1, 0, 0, 0] # Quaternion for no rotation
# Forward simulation to update state
mujoco.mj_forward(self.model, self.data)
obs, done = self.get_obs()
return obs
def step(self, action: Optional[np.ndarray] = None) -> Tuple[Dict[str, Any], float, bool, Dict]:
"""
Step the environment forward
Args:
action: Action to take
Returns:
observation, reward, done, info
"""
# Apply action to control the board
if action is not None:
for i, key in enumerate(self.act_order):
self.data.ctrl[self.act_ids[key]] = action[i]
# Handcrafted task: x, y position tracking of the board and ball
ball_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
board_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
ball_pos = self.data.qpos[ball_qpos_idx:ball_qpos_idx+2]
board_pos = self.data.qpos[board_qpos_idx:board_qpos_idx+2]
# Step simulation
mujoco.mj_step(self.model, self.data)
# Get observation
obs, done = self.get_obs()
# reset if done
if done:
obs = self.reset()
# Placeholder reward and done logic
reward = 0.0
done = False
info = {}
return obs, reward, done, info
def get_obs(self) -> Tuple[np.ndarray, bool]:
"""Get current observation
global frame:
- ball pos (x,y,z)
- ball vel (x,y,z)
- board pos (x,y,z) + euler (r,p,y)
- board vel (x,y,z) + angvel (wx,wy,wz)
local frame:
- ball pos relative to board (x, y, z)
- ball vel relative to board (x, y, z)
- board euler(rpy)
- board vel(xyz) + angvel(wx,wy,wz)
"""
# Get joint indices
ball_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
ball_qvel_idx = self.model.jnt_dofadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
board_qpos_idx = self.model.jnt_qposadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
board_qvel_idx = self.model.jnt_dofadr[mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
# Get raw state data
ball_pos = self.data.qpos[ball_qpos_idx:ball_qpos_idx+3]
ball_quat = self.data.qpos[ball_qpos_idx+3:ball_qpos_idx+7]
ball_vel = self.data.qvel[ball_qvel_idx:ball_qvel_idx+3]
ball_angvel = self.data.qvel[ball_qvel_idx+3:ball_qvel_idx+6]
board_pos = self.data.qpos[board_qpos_idx:board_qpos_idx+3]
board_quat = self.data.qpos[board_qpos_idx+3:board_qpos_idx+7]
board_vel = self.data.qvel[board_qvel_idx:board_qvel_idx+3]
board_angvel = self.data.qvel[board_qvel_idx+3:board_qvel_idx+6]
# Convert board quaternion to Euler angles using scipy
# Note: MuJoCo quaternions are in [w, x, y, z] format, scipy expects [x, y, z, w]
board_quat_scipy = np.array([board_quat[1], board_quat[2], board_quat[3], board_quat[0]]) # [x,y,z,w]
board_euler = R.from_quat(board_quat_scipy).as_euler('xyz', degrees=False)
# Global frame observations
global_obs = np.concatenate([
ball_pos, # ball position (3)
ball_vel, # ball velocity (3)
board_pos, # board position (3)
board_euler, # board euler angles (3)
board_vel, # board velocity (3)
board_angvel # board angular velocity (3)
]) # Total: 18 dimensions
# Local frame observations (relative to board)
ball_pos_relative = ball_pos - board_pos # ball position relative to board
ball_vel_relative = ball_vel - board_vel # ball velocity relative to board
local_obs = np.concatenate([
ball_pos_relative, # ball position relative to board (3)
ball_vel_relative, # ball velocity relative to board (3)
board_euler, # board euler angles (3)
board_vel, # board velocity (3)
board_angvel # board angular velocity (3)
]) # Total: 15 dimensions
# Combine global and local observations
obs = np.concatenate([global_obs, local_obs]) # Total: 33 dimensions
obs = obs[:18] #
obs[0:3] = obs[0:3] - obs[6:9] # board center
obs[3:6] = obs[3:6] - obs[12:15] # board center
obs = obs[[0,1,2,3,4,5,9,10,11]]
# Done condition: ball(x,y) out of [100,100] or z lower than board - 0.1m
done = (abs(ball_pos[0]) > 100.0 or
abs(ball_pos[1]) > 100.0 or
ball_pos[2] < (board_pos[2] - 0.1))
return obs, done
def render(self, mode: str = 'human') -> Optional[np.ndarray]:
"""Render the environment"""
if self.headless:
# Render to image for video recording
self.renderer.update_scene(self.data, camera=self.cam_id)
image = self.renderer.render()
return image
else:
# Direct rendering with viewer
if hasattr(self, 'viewer') and self.viewer is not None:
# Update viewer with current simulation state
self.viewer.sync()
return None
elif hasattr(self, 'renderer') and self.renderer is not None:
# Fallback to renderer if viewer not available
self.renderer.update_scene(self.data, camera=self.cam_id)
image = self.renderer.render()
return image
else:
print(f"No rendering available at time: {self.data.time:.3f}")
return None
def close(self):
"""Clean up resources"""
if hasattr(self, 'viewer') and self.viewer is not None:
self.viewer.close()
if hasattr(self, 'renderer') and self.renderer is not None:
self.renderer.close()
class PingPongDummyController:
"""
A simple controller that stabilizes the board at position [0,0,0.5]
with default rotation (no rotation).
"""
def __init__(self, target_pos: np.ndarray = None, target_euler: np.ndarray = None,
config: Config = None):
"""
Initialize the controller with PD gains.
Args:
target_pos: Target position [x, y, z]. Defaults to [0, 0, 0.5]
target_euler: Target Euler angles [roll, pitch, yaw] in radians. Defaults to [0, 0, 0]
config: Configuration object with hyperparameters
"""
self.config = config if config is not None else Config()
self.target_pos = target_pos if target_pos is not None else np.array([0.0, 0.0, 0.5])
self.target_euler = target_euler if target_euler is not None else np.array([0.0, 0.5, 0.0]) # [roll, pitch, yaw]
# PD controller gains from config
self.kp_pos = self.config.kp_pos_default
self.kd_pos = self.config.kd_pos_default
self.kp_rot = self.config.kp_rot_default
self.kd_rot = self.config.kd_rot_default
# Initialize randomized targets (will be updated every 0.5s)
self.target_z = 0.5
self.target_roll = 0.0
self.target_pitch = 0.0
self.target_yaw = 0.0
self.last_update_time = 0.0
def reset_controller(self):
"""Reset controller state for new episode"""
self.last_update_time = 0.0
# Optionally reset targets to initial values
self.target_z = 0.5
self.target_roll = 0.0
self.target_pitch = 0.0
self.target_yaw = 0.0
def quat_to_euler(self, quat: np.ndarray) -> np.ndarray:
"""
Convert quaternion to Euler angles (roll, pitch, yaw).
Args:
quat: Quaternion [w, x, y, z]
Returns:
euler: Euler angles [roll, pitch, yaw] in radians
"""
w, x, y, z = quat
# Roll (x-axis rotation)
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
# Pitch (y-axis rotation)
sinp = 2 * (w * y - z * x)
if abs(sinp) >= 1:
pitch = np.copysign(np.pi / 2, sinp) # Use 90 degrees if out of range
else:
pitch = np.arcsin(sinp)
# Yaw (z-axis rotation)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return np.array([roll, pitch, yaw])
def euler_to_quat(self, euler: np.ndarray) -> np.ndarray:
"""
Convert Euler angles to quaternion.
Args:
euler: Euler angles [roll, pitch, yaw] in radians
Returns:
quat: Quaternion [w, x, y, z]
"""
roll, pitch, yaw = euler
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return np.array([w, x, y, z])
def get_dummy_position_action(self, env: PingPongEnv) -> np.ndarray:
"""
Compute control action to stabilize the board.
Args:
env: The PingPong environment
, target_euler: np.ndarray = None,
Returns:
action: 6D action vector [vx, vy, vz, wx, wy, wz]
"""
# Get board joint indices
board_qpos_idx = env.model.jnt_qposadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
board_qvel_idx = env.model.jnt_dofadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
# Get current board state
current_pos = env.data.qpos[board_qpos_idx:board_qpos_idx+3]
current_quat = env.data.qpos[board_qpos_idx+3:board_qpos_idx+7]
current_vel = env.data.qvel[board_qvel_idx:board_qvel_idx+3]
current_angvel = env.data.qvel[board_qvel_idx+3:board_qvel_idx+6]
# Position control (PD controller)
pos_error = self.target_pos - current_pos
vel_error = -current_vel # Target velocity is 0
# Linear velocity commands
linear_cmd = self.kp_pos * pos_error + self.kd_pos * vel_error
# Rotation control using Euler angles
current_euler = self.quat_to_euler(current_quat)
# Compute rotation error (with angle wrapping)
rot_error = self.target_euler - current_euler
# Wrap angles to [-pi, pi] for shortest path
rot_error = np.arctan2(np.sin(rot_error), np.cos(rot_error))
# Angular velocity error (target angular velocity is 0)
angvel_error = -current_angvel
# Angular velocity commands (PD controller)
angular_cmd = self.kp_rot * rot_error + self.kd_rot * angvel_error
# Combine into action vector [vx, vy, vz, wx, wy, wz]
action = np.concatenate([linear_cmd, angular_cmd])
# Clip to actuator limits (from config, should match XML ctrlrange)
action[:3] = np.clip(action[:3], -self.config.linear_vel_limit, self.config.linear_vel_limit) # Linear velocity limits
action[3:] = np.clip(action[3:], -self.config.angular_vel_limit, self.config.angular_vel_limit) # Angular velocity limits
return action
def get_ball_response_action(self, env: PingPongEnv) -> np.ndarray:
"""
Control dot[x,y,z,wx,wy,wz]
Control objective:
1. match x, y of the ball
2. randomized z at [0.4, 0.6]
3. randomized euler (roll pitch, yaw) [-0.5,0.5] each
Args:
env: The PingPong environment
, target_euler: np.ndarray = None,
Returns:
action: 6D action vector [vx, vy, vz, wx, wy, wz]
"""
# Get ball position
ball_qpos_idx = env.model.jnt_qposadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
ball_pos = env.data.qpos[ball_qpos_idx:ball_qpos_idx+3]
# Get board joint indices
board_qpos_idx = env.model.jnt_qposadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
board_qvel_idx = env.model.jnt_dofadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
# Get current board state
current_pos = env.data.qpos[board_qpos_idx:board_qpos_idx+3]
current_quat = env.data.qpos[board_qpos_idx+3:board_qpos_idx+7]
current_vel = env.data.qvel[board_qvel_idx:board_qvel_idx+3]
current_angvel = env.data.qvel[board_qvel_idx+3:board_qvel_idx+6]
# Create dynamic target position and orientation
# 1. Match ball x, y position
target_x = ball_pos[0]
target_y = ball_pos[1]
# 2. Update randomized targets every 0.5 seconds
if (env.data.time - self.last_update_time >= self.config.randomization_interval):
self.target_z = np.random.uniform(self.config.target_z_min, self.config.target_z_max)
# 3. Randomized euler angles [-0.5, 0.5] for each axis
self.target_roll = np.random.uniform(self.config.target_angle_min, self.config.target_angle_max)
self.target_pitch = np.random.uniform(self.config.target_angle_min, self.config.target_angle_max)
self.target_yaw = np.random.uniform(self.config.target_angle_min, self.config.target_angle_max)
self.last_update_time = env.data.time
# Set dynamic targets
dynamic_target_pos = np.array([target_x, target_y, self.target_z])
dynamic_target_euler = np.array([self.target_roll, self.target_pitch, self.target_yaw])
# Position control (PD controller)
pos_error = dynamic_target_pos - current_pos
vel_error = -current_vel # Target velocity is 0
# Linear velocity commands
linear_cmd = self.kp_pos * pos_error + self.kd_pos * vel_error
# Rotation control using Euler angles
current_euler = self.quat_to_euler(current_quat)
# Compute rotation error (with angle wrapping)
rot_error = dynamic_target_euler - current_euler
# Wrap angles to [-pi, pi] for shortest path
rot_error = np.arctan2(np.sin(rot_error), np.cos(rot_error))
# Angular velocity error (target angular velocity is 0)
angvel_error = -current_angvel
# Angular velocity commands (PD controller)
angular_cmd = self.kp_rot * rot_error + self.kd_rot * angvel_error
# Combine into action vector [vx, vy, vz, wx, wy, wz]
action = np.concatenate([linear_cmd, angular_cmd])
# Clip to actuator limits (from config, should match XML ctrlrange)
action[:3] = np.clip(action[:3], -self.config.linear_vel_limit, self.config.linear_vel_limit) # Linear velocity limits
action[3:] = np.clip(action[3:], -self.config.angular_vel_limit, self.config.angular_vel_limit) # Angular velocity limits
return action
def collect_bouncing_ball_data(
env: PingPongEnv,
controller: PingPongDummyController,
target_trajectories: int,
steps_per_traj: int,
render: bool = False,
realtime: bool = False
) -> TrajectoryBuffer:
"""
Collect trajectory data from bouncing ball environment using ball_response controller.
Args:
env: PingPong environment
controller: Controller for board movement
target_trajectories: Number of trajectories to collect
steps_per_traj: Steps per trajectory
render: Whether to render during collection
realtime: Whether to match simulation speed to real time
Returns:
TrajectoryBuffer with collected data
"""
buffer = TrajectoryBuffer(steps_per_traj)
pbar = tqdm(total=target_trajectories, desc="Collecting bouncing ball data")
collected_trajs = 0
while collected_trajs < target_trajectories:
# Reset environment and controller
obs = env.reset()
controller.reset_controller()
# Collect one trajectory
for step in range(steps_per_traj):
# Get current observation and done flag
current_obs, done = env.get_obs()
# Get action from ball_response controller
action = controller.get_ball_response_action(env)
# Calculate reward (simple distance-based for now)
ball_qpos_idx = env.model.jnt_qposadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "ball_free")]
board_qpos_idx = env.model.jnt_qposadr[mujoco.mj_name2id(env.model, mujoco.mjtObj.mjOBJ_JOINT, "board_free")]
ball_pos = env.data.qpos[ball_qpos_idx:ball_qpos_idx+3]
board_pos = env.data.qpos[board_qpos_idx:board_qpos_idx+3]
# Reward based on ball-board distance (encouraging ball to stay near board)
distance = np.linalg.norm(ball_pos[:2] - board_pos[:2]) # x,y distance only
reward = -distance # Negative distance as reward
# Step environment first to get the next state
_, _, env_done, _ = env.step(action)
# Get done status after stepping (this is what we want to predict)
_, done_after_step = env.get_obs()
# Append step to buffer (B=1 for single environment)
# obs->action->done pattern: observation leads to action, done is result after action
obs_np = current_obs[None, :] # Add batch dimension
ext_obs_np = obs_np # Same as obs for this environment
action_np = action[None, :] # Add batch dimension
reward_np = np.array([reward], dtype=np.float32)
done_np = np.array([done_after_step], dtype=np.bool_) # Done status after applying action
buffer.append_step(obs_np, ext_obs_np, action_np, reward_np, done_np)
# Render if requested
if render:
env.render()
# Sleep for realtime if requested
if realtime:
time.sleep(env.model.opt.timestep)
# Break if episode is done
if done_after_step or env_done:
break
collected_trajs += 1
pbar.update(1)
pbar.close()
return buffer
def parse_data_collection_args():
"""Parse command line arguments for data collection"""
parser = argparse.ArgumentParser(description="Collect bouncing ball dataset")
parser.add_argument("--trajectories", type=int, default=1000, help="Number of trajectories to collect")
parser.add_argument("--steps_per_trajectory", type=int, default=20000, help="Steps per trajectory")
parser.add_argument("--out_dir", type=str, default="./dataset/bb/", help="Output directory")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("--headless", action="store_true", help="Run in headless mode (no rendering)")
# Demo simulation args for backward compatibility
parser.add_argument("--realtime", action="store_true", help="Match simulation speed to real time")
parser.add_argument("--demo", action="store_true", help="Run demo mode instead of data collection")
return parser.parse_args()
def main():
args = parse_data_collection_args()
# Set random seed
np.random.seed(args.seed)
if args.demo:
# Demo simulation mode
# Create configuration and environment
config = Config()
env = PingPongEnv(headless=args.headless, config=config)
controller = PingPongDummyController(config=config)
try:
# Reset environment and controller
obs = env.reset()
controller.reset_controller()
print(f"Initial observation shape: {obs.shape}")
# Run simulation for 1000 steps
for step in range(1000):
# Get action from ball_response controller
action = controller.get_ball_response_action(env)
obs, reward, done, info = env.step(action)
if not args.headless:
# Render every frame for smooth visualization
env.render()
if args.realtime:
time.sleep(env.model.opt.timestep)
if done:
obs = env.reset() # Auto-reset if done
controller.reset_controller() # Reset controller state
print("Demo simulation completed")
finally:
env.close()
else:
# Data collection mode (default)
print(f"Starting data collection with {args.trajectories} trajectories...")
# Create output directory
os.makedirs(args.out_dir, exist_ok=True)
# Create configuration and environment
config = Config()
env = PingPongEnv(headless=args.headless, config=config)
controller = PingPongDummyController(config=config)
try:
# Collect data
buffer = collect_bouncing_ball_data(
env=env,
controller=controller,
target_trajectories=args.trajectories,
steps_per_traj=args.steps_per_trajectory,
render=not args.headless, # Render if not headless
realtime=args.realtime
)
# Save dataset
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
file_stem = f"bb_ball_response_{timestamp}"
dataset_path = os.path.join(args.out_dir, f"{file_stem}.npz")
buffer.save(dataset_path)
# Save metadata
meta = {
"environment": "bouncing_ball",
"control_mode": "ball_response",
"trajectories": args.trajectories,
"steps_per_trajectory": args.steps_per_trajectory,
"total_trajectories": len(buffer),
"total_steps": len(buffer) * args.steps_per_trajectory,
"seed": args.seed,
"config": config,
"timestamp": timestamp,
"headless": args.headless
}
metadata_path = os.path.join(args.out_dir, f"{file_stem}_metadata.pkl")
with open(metadata_path, "wb") as f:
pickle.dump(meta, f)
print(f"[INFO] Data collection completed!")
print(f"[INFO] Dataset saved: {dataset_path}")
print(f"[INFO] Metadata saved: {metadata_path}")
print(f"[INFO] Collected {len(buffer)} trajectories")
finally:
env.close()
if __name__ == "__main__":
main()