| |
| """ |
| Evaluate trained EVA policy in robosuite environment. |
| |
| Same structure as eval_act.py, but adds tactile observation. |
| |
| Usage: |
| python tactile_tasks/eval_eva.py --task peg_insertion |
| python tactile_tasks/eval_eva.py --task peg_insertion --temporal_agg |
| python tactile_tasks/eval_eva.py --task peg_insertion --ckpt policy_last.ckpt |
| """ |
|
|
| import os |
| import sys |
| import argparse |
| import pickle |
|
|
| import numpy as np |
| import torch |
|
|
| |
| sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "policy", "EVA")) |
| sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "policy", "EVA", "detr")) |
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
|
|
| from eva_policy import EVAPolicy |
| from tactile_tasks.uskin_sensor import USkinSensor |
|
|
|
|
| def load_policy(ckpt_dir, ckpt_name, policy_config): |
| """Load trained EVA policy and dataset stats.""" |
| policy = EVAPolicy(policy_config) |
| ckpt_path = os.path.join(ckpt_dir, ckpt_name) |
| loading_status = policy.load_state_dict(torch.load(ckpt_path, map_location="cpu")) |
| print(f"Loaded: {ckpt_path} ({loading_status})") |
| policy.cuda() |
| policy.eval() |
|
|
| stats_path = os.path.join(ckpt_dir, "dataset_stats.pkl") |
| with open(stats_path, "rb") as f: |
| stats = pickle.load(f) |
|
|
| return policy, stats |
|
|
|
|
| def get_obs(env, camera_names, stats): |
| """Extract qpos and images from robosuite env, apply normalization.""" |
| robot = env.robots[0] |
|
|
| |
| joint_pos = np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes]) |
| gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", []) |
| gripper_val = env.sim.data.qpos[gripper_idx][0] / 0.8 if len(gripper_idx) else 0.0 |
| qpos = np.concatenate([joint_pos, [gripper_val]]).astype(np.float32) |
|
|
| |
| qpos_norm = (qpos - stats["qpos_mean"]) / stats["qpos_std"] |
| qpos_tensor = torch.from_numpy(qpos_norm).float().cuda().unsqueeze(0) |
|
|
| |
| obs = env._get_observations() |
| cam_map = {"agentview": "agentview_image", "eye_in_hand": "robot0_eye_in_hand_image"} |
| images = [] |
| for cam in camera_names: |
| key = cam_map.get(cam, cam + "_image") |
| img = obs[key] |
| img = np.transpose(img, (2, 0, 1)) |
| images.append(img) |
| images = np.stack(images, axis=0) |
| images = torch.from_numpy(images / 255.0).float().cuda().unsqueeze(0) |
|
|
| return qpos_tensor, images |
|
|
|
|
| def get_tactile(sensor): |
| """Read one control-step worth of tactile data: [1, 2, 5, 4, 4, 3].""" |
| left_frames = [] |
| right_frames = [] |
| for _ in range(USkinSensor.FREQ_MULTIPLIER): |
| sensor.update() |
| left_frames.append(sensor._left_forces.copy()) |
| right_frames.append(sensor._right_forces.copy()) |
| tl = np.stack(left_frames) |
| tr = np.stack(right_frames) |
| tactile = np.stack([tl, tr]) |
| return torch.from_numpy(tactile).float().cuda().unsqueeze(0) |
|
|
|
|
| def run_eval(task, ckpt_dir, ckpt_name, num_rollouts, temporal_agg, camera_names, |
| render=False): |
| """Run evaluation rollouts.""" |
| import yaml |
|
|
| |
| config_path = os.path.join(os.path.dirname(__file__), "..", "policy", "EVA", "train_config.yaml") |
| with open(config_path, "r") as f: |
| cfg = yaml.safe_load(f) |
|
|
| state_dim = cfg["state_dim"] |
| chunk_size = cfg["chunk_size"] |
|
|
| policy_config = { |
| "lr": cfg["lr"], |
| "num_queries": chunk_size, |
| "kl_weight": cfg["kl_weight"], |
| "event_weight": cfg.get("event_weight", 0.1), |
| "hidden_dim": cfg["hidden_dim"], |
| "dim_feedforward": cfg["dim_feedforward"], |
| "lr_backbone": 1e-5, |
| "backbone": "resnet18", |
| "enc_layers": 4, |
| "dec_layers": 7, |
| "nheads": 8, |
| "camera_names": camera_names, |
| "state_dim": state_dim, |
| "chunk_size": chunk_size, |
| } |
|
|
| policy, stats = load_policy(ckpt_dir, ckpt_name, policy_config) |
| post_process = lambda a: a * stats["action_std"] + stats["action_mean"] |
|
|
| |
| from tactile_tasks.collect_data import create_env, TASK_CONFIGS |
| settle_steps = 50 |
| max_timesteps = max(TASK_CONFIGS[task]["horizon"], 900) |
| env = create_env(task, has_renderer=render) |
| env.horizon = max_timesteps + settle_steps |
|
|
| sensor = USkinSensor(env.sim) |
| query_frequency = 1 if temporal_agg else chunk_size |
|
|
| successes = [] |
| for ep in range(num_rollouts): |
| env.reset() |
| for _ in range(settle_steps): |
| env.step(np.zeros(7)) |
|
|
| if temporal_agg: |
| all_time_actions = torch.zeros( |
| [max_timesteps, max_timesteps + chunk_size, state_dim] |
| ).cuda() |
|
|
| all_actions = None |
| episode_success = False |
|
|
| with torch.inference_mode(): |
| for t in range(max_timesteps): |
| qpos, images = get_obs(env, camera_names, stats) |
| tactile = get_tactile(sensor) |
|
|
| |
| if t % query_frequency == 0: |
| all_actions = policy(qpos, images, tactile) |
|
|
| if temporal_agg: |
| all_time_actions[[t], t:t + chunk_size] = all_actions |
| actions_for_curr_step = all_time_actions[:, t] |
| actions_populated = torch.all(actions_for_curr_step != 0, axis=1) |
| actions_for_curr_step = actions_for_curr_step[actions_populated] |
| k = 0.01 |
| exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) |
| exp_weights = exp_weights / exp_weights.sum() |
| exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) |
| raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) |
| else: |
| raw_action = all_actions[:, t % query_frequency] |
|
|
| |
| action = post_process(raw_action.squeeze(0).cpu().numpy()) |
| action = action[:7] |
|
|
| env.step(action) |
| if render: |
| env.render() |
|
|
| if env._check_success(): |
| episode_success = True |
|
|
| successes.append(episode_success) |
| status = "SUCCESS" if episode_success else "FAIL" |
| print(f" [{ep+1}/{num_rollouts}] {status} (t={t+1})") |
|
|
| success_rate = sum(successes) / len(successes) |
| print(f"\n{'='*40}") |
| print(f"Task: {task}") |
| print(f"Checkpoint: {ckpt_dir}/{ckpt_name}") |
| print(f"Mode: {'temporal_agg' if temporal_agg else 'chunk'}") |
| print(f"Success: {sum(successes)}/{num_rollouts} ({success_rate*100:.1f}%)") |
| print(f"{'='*40}") |
|
|
| env.close() |
| return success_rate |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--task", type=str, required=True, |
| choices=["precision_grasp", "peg_insertion", "gentle_stack"]) |
| parser.add_argument("--ckpt_dir", type=str, default=None, |
| help="checkpoint dir (default: policy/EVA/checkpoints/{task}_eva)") |
| parser.add_argument("--ckpt", type=str, default="policy_best.ckpt") |
| parser.add_argument("--num_rollouts", type=int, default=5) |
| parser.add_argument("--temporal_agg", action="store_true") |
| parser.add_argument("--render", action="store_true") |
| parser.add_argument("--cameras", nargs="+", default=["agentview", "eye_in_hand"]) |
| args = parser.parse_args() |
|
|
| if args.ckpt_dir is None: |
| args.ckpt_dir = os.path.join( |
| os.path.dirname(__file__), "..", "policy", "EVA", "checkpoints", f"{args.task}_eva" |
| ) |
|
|
| run_eval( |
| task=args.task, |
| ckpt_dir=args.ckpt_dir, |
| ckpt_name=args.ckpt, |
| num_rollouts=args.num_rollouts, |
| temporal_agg=args.temporal_agg, |
| camera_names=args.cameras, |
| render=args.render, |
| ) |
|
|