RoboTwin / tactile_tasks /eval_act.py
Fxxkrobotics's picture
Add files using upload-large-folder tool
1ac176e verified
#!/usr/bin/env python3
"""
Evaluate trained ACT policy in robosuite environment.
ACT outputs action chunks (chunk_size steps at once).
Two execution modes:
- chunk: execute full chunk, then re-query (default)
- temporal_agg: query every step, exponentially-weighted average of overlapping chunks
Usage:
cd policy/ACT
python ../../tactile_tasks/eval_act.py --task precision_grasp
python ../../tactile_tasks/eval_act.py --task precision_grasp --temporal_agg
python ../../tactile_tasks/eval_act.py --task precision_grasp --ckpt policy_last.ckpt
"""
import os
import sys
import argparse
import pickle
import numpy as np
import torch
from einops import rearrange
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "policy", "ACT"))
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from act_policy import ACTPolicy
def load_policy(ckpt_dir, ckpt_name, policy_config):
"""Load trained ACT policy and dataset stats."""
policy = ACTPolicy(policy_config)
ckpt_path = os.path.join(ckpt_dir, ckpt_name)
loading_status = policy.load_state_dict(torch.load(ckpt_path, map_location="cpu"))
print(f"Loaded: {ckpt_path} ({loading_status})")
policy.cuda()
policy.eval()
stats_path = os.path.join(ckpt_dir, "dataset_stats.pkl")
with open(stats_path, "rb") as f:
stats = pickle.load(f)
return policy, stats
def get_obs(env, camera_names, stats):
"""Extract qpos and images from robosuite env, apply normalization."""
robot = env.robots[0]
# qpos: joint_pos(7) + normalized_gripper(1) = 8D
joint_pos = np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes])
gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", [])
gripper_val = env.sim.data.qpos[gripper_idx][0] / 0.8 if len(gripper_idx) else 0.0
qpos = np.concatenate([joint_pos, [gripper_val]]).astype(np.float32)
# Normalize qpos
qpos_norm = (qpos - stats["qpos_mean"]) / stats["qpos_std"]
qpos_tensor = torch.from_numpy(qpos_norm).float().cuda().unsqueeze(0)
# Images: stack cameras → (1, num_cams, 3, H, W)
obs = env._get_observations()
cam_map = {"agentview": "agentview_image", "eye_in_hand": "robot0_eye_in_hand_image"}
images = []
for cam in camera_names:
key = cam_map.get(cam, cam + "_image")
img = obs[key] # (H, W, 3) uint8
img = rearrange(img, "h w c -> c h w")
images.append(img)
images = np.stack(images, axis=0) # (num_cams, 3, H, W)
images = torch.from_numpy(images / 255.0).float().cuda().unsqueeze(0)
return qpos_tensor, images, qpos
def run_eval(task, ckpt_dir, ckpt_name, num_rollouts, temporal_agg, camera_names,
render=False):
"""Run evaluation rollouts."""
import yaml
# Load ACT config
config_path = os.path.join(os.path.dirname(__file__), "..", "policy", "ACT", "train_config.yaml")
with open(config_path, "r") as f:
cfg = yaml.safe_load(f)
state_dim = cfg["state_dim"]
chunk_size = cfg["chunk_size"]
# Build policy config (same structure as training)
policy_config = {
"lr": cfg["lr"],
"num_queries": chunk_size,
"kl_weight": cfg["kl_weight"],
"hidden_dim": cfg["hidden_dim"],
"dim_feedforward": cfg["dim_feedforward"],
"lr_backbone": 1e-5,
"backbone": "resnet18",
"enc_layers": 4,
"dec_layers": 7,
"nheads": 8,
"camera_names": camera_names,
"state_dim": state_dim,
"chunk_size": chunk_size,
}
policy, stats = load_policy(ckpt_dir, ckpt_name, policy_config)
post_process = lambda a: a * stats["action_std"] + stats["action_mean"]
# Create environment with extended horizon (settling steps + eval steps)
from tactile_tasks.collect_data import create_env, TASK_CONFIGS
settle_steps = 50
max_timesteps = max(TASK_CONFIGS[task]["horizon"], 900)
env = create_env(task, has_renderer=render)
env.horizon = max_timesteps + settle_steps # override so env doesn't cut us short
query_frequency = 1 if temporal_agg else chunk_size
successes = []
for ep in range(num_rollouts):
env.reset()
# Let objects settle
for _ in range(50):
env.step(np.zeros(7))
if temporal_agg:
all_time_actions = torch.zeros(
[max_timesteps, max_timesteps + chunk_size, state_dim]
).cuda()
all_actions = None
episode_success = False
with torch.inference_mode():
for t in range(max_timesteps):
qpos, images, _ = get_obs(env, camera_names, stats)
# Query policy
if t % query_frequency == 0:
all_actions = policy(qpos, images) # (1, chunk_size, state_dim)
if temporal_agg:
all_time_actions[[t], t:t + chunk_size] = all_actions
actions_for_curr_step = all_time_actions[:, t]
actions_populated = torch.all(actions_for_curr_step != 0, axis=1)
actions_for_curr_step = actions_for_curr_step[actions_populated]
k = 0.01
exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step)))
exp_weights = exp_weights / exp_weights.sum()
exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1)
raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True)
else:
raw_action = all_actions[:, t % query_frequency]
# Post-process: denormalize, take first 7D (ignore padding dim)
action = post_process(raw_action.squeeze(0).cpu().numpy())
action = action[:7] # 8D → 7D OSC_POSE
env.step(action)
if render:
env.render()
if env._check_success():
episode_success = True
successes.append(episode_success)
status = "SUCCESS" if episode_success else "FAIL"
print(f" [{ep+1}/{num_rollouts}] {status} (t={t+1})")
success_rate = sum(successes) / len(successes)
print(f"\n{'='*40}")
print(f"Task: {task}")
print(f"Checkpoint: {ckpt_dir}/{ckpt_name}")
print(f"Mode: {'temporal_agg' if temporal_agg else 'chunk'}")
print(f"Success: {sum(successes)}/{num_rollouts} ({success_rate*100:.1f}%)")
print(f"{'='*40}")
env.close()
return success_rate
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, required=True,
choices=["precision_grasp", "peg_insertion", "gentle_stack"])
parser.add_argument("--ckpt_dir", type=str, default=None,
help="checkpoint dir (default: policy/ACT/checkpoints/{task}_act)")
parser.add_argument("--ckpt", type=str, default="policy_best.ckpt")
parser.add_argument("--num_rollouts", type=int, default=50)
parser.add_argument("--temporal_agg", action="store_true")
parser.add_argument("--render", action="store_true", help="visualize with on-screen renderer")
parser.add_argument("--cameras", nargs="+", default=["agentview", "eye_in_hand"])
args = parser.parse_args()
if args.ckpt_dir is None:
args.ckpt_dir = os.path.join(
os.path.dirname(__file__), "..", "policy", "ACT", "checkpoints", f"{args.task}_act"
)
run_eval(
task=args.task,
ckpt_dir=args.ckpt_dir,
ckpt_name=args.ckpt,
num_rollouts=args.num_rollouts,
temporal_agg=args.temporal_agg,
camera_names=args.cameras,
render=args.render,
)