|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Evaluate a policy on an environment by running rollouts and computing metrics.
|
|
|
|
|
|
Usage examples:
|
|
|
|
|
|
You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht)
|
|
|
for 10 episodes.
|
|
|
|
|
|
```
|
|
|
lerobot-eval \
|
|
|
--policy.path=lerobot/diffusion_pusht \
|
|
|
--env.type=pusht \
|
|
|
--eval.batch_size=10 \
|
|
|
--eval.n_episodes=10 \
|
|
|
--policy.use_amp=false \
|
|
|
--policy.device=cuda
|
|
|
```
|
|
|
|
|
|
OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
|
|
|
```
|
|
|
lerobot-eval \
|
|
|
--policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
|
|
|
--env.type=pusht \
|
|
|
--eval.batch_size=10 \
|
|
|
--eval.n_episodes=10 \
|
|
|
--policy.use_amp=false \
|
|
|
--policy.device=cuda
|
|
|
```
|
|
|
|
|
|
Note that in both examples, the repo/folder should contain at least `config.json` and `model.safetensors` files.
|
|
|
|
|
|
You can learn about the CLI options for this script in the `EvalPipelineConfig` in lerobot/configs/eval.py
|
|
|
"""
|
|
|
|
|
|
import concurrent.futures as cf
|
|
|
import json
|
|
|
import logging
|
|
|
import threading
|
|
|
import time
|
|
|
from collections import defaultdict
|
|
|
from collections.abc import Callable
|
|
|
from contextlib import nullcontext
|
|
|
from copy import deepcopy
|
|
|
from dataclasses import asdict
|
|
|
from functools import partial
|
|
|
from pathlib import Path
|
|
|
from pprint import pformat
|
|
|
from typing import Any, TypedDict
|
|
|
|
|
|
import einops
|
|
|
import gymnasium as gym
|
|
|
import numpy as np
|
|
|
import torch
|
|
|
from termcolor import colored
|
|
|
from torch import Tensor, nn
|
|
|
from tqdm import trange
|
|
|
|
|
|
from lerobot.configs import parser
|
|
|
from lerobot.configs.eval import EvalPipelineConfig
|
|
|
from lerobot.envs.factory import make_env
|
|
|
from lerobot.envs.utils import (
|
|
|
add_envs_task,
|
|
|
check_env_attributes_and_types,
|
|
|
close_envs,
|
|
|
preprocess_observation,
|
|
|
)
|
|
|
from lerobot.policies.factory import make_policy, make_pre_post_processors
|
|
|
from lerobot.policies.pretrained import PreTrainedPolicy
|
|
|
from lerobot.processor import PolicyAction, PolicyProcessorPipeline
|
|
|
from lerobot.utils.constants import ACTION, DONE, OBS_STR, REWARD
|
|
|
from lerobot.utils.io_utils import write_video
|
|
|
from lerobot.utils.random_utils import set_seed
|
|
|
from lerobot.utils.utils import (
|
|
|
get_safe_torch_device,
|
|
|
init_logging,
|
|
|
inside_slurm,
|
|
|
)
|
|
|
|
|
|
|
|
|
def rollout(
|
|
|
env: gym.vector.VectorEnv,
|
|
|
policy: PreTrainedPolicy,
|
|
|
preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
|
|
|
postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
|
|
|
seeds: list[int] | None = None,
|
|
|
return_observations: bool = False,
|
|
|
render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
|
|
|
) -> dict:
|
|
|
"""Run a batched policy rollout once through a batch of environments.
|
|
|
|
|
|
Note that all environments in the batch are run until the last environment is done. This means some
|
|
|
data will probably need to be discarded (for environments that aren't the first one to be done).
|
|
|
|
|
|
The return dictionary contains:
|
|
|
(optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation
|
|
|
keys. NOTE that this has an extra sequence element relative to the other keys in the
|
|
|
dictionary. This is because an extra observation is included for after the environment is
|
|
|
terminated or truncated.
|
|
|
"action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
|
|
|
including the last observations).
|
|
|
"reward": A (batch, sequence) tensor of rewards received for applying the actions.
|
|
|
"success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
|
|
|
environment termination/truncation).
|
|
|
"done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
|
|
|
the first True is followed by True's all the way till the end. This can be used for masking
|
|
|
extraneous elements from the sequences above.
|
|
|
|
|
|
Args:
|
|
|
env: The batch of environments.
|
|
|
policy: The policy. Must be a PyTorch nn module.
|
|
|
seeds: The environments are seeded once at the start of the rollout. If provided, this argument
|
|
|
specifies the seeds for each of the environments.
|
|
|
return_observations: Whether to include all observations in the returned rollout data. Observations
|
|
|
are returned optionally because they typically take more memory to cache. Defaults to False.
|
|
|
render_callback: Optional rendering callback to be used after the environments are reset, and after
|
|
|
every step.
|
|
|
Returns:
|
|
|
The dictionary described above.
|
|
|
"""
|
|
|
assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module."
|
|
|
|
|
|
|
|
|
policy.reset()
|
|
|
observation, info = env.reset(seed=seeds)
|
|
|
if render_callback is not None:
|
|
|
render_callback(env)
|
|
|
|
|
|
all_observations = []
|
|
|
all_actions = []
|
|
|
all_rewards = []
|
|
|
all_successes = []
|
|
|
all_dones = []
|
|
|
|
|
|
step = 0
|
|
|
|
|
|
done = np.array([False] * env.num_envs)
|
|
|
max_steps = env.call("_max_episode_steps")[0]
|
|
|
progbar = trange(
|
|
|
max_steps,
|
|
|
desc=f"Running rollout with at most {max_steps} steps",
|
|
|
disable=inside_slurm(),
|
|
|
leave=False,
|
|
|
)
|
|
|
check_env_attributes_and_types(env)
|
|
|
while not np.all(done) and step < max_steps:
|
|
|
|
|
|
observation = preprocess_observation(observation)
|
|
|
if return_observations:
|
|
|
all_observations.append(deepcopy(observation))
|
|
|
|
|
|
|
|
|
|
|
|
observation = add_envs_task(env, observation)
|
|
|
observation = preprocessor(observation)
|
|
|
with torch.inference_mode():
|
|
|
action = policy.select_action(observation)
|
|
|
action = postprocessor(action)
|
|
|
|
|
|
|
|
|
action_numpy: np.ndarray = action.to("cpu").numpy()
|
|
|
assert action_numpy.ndim == 2, "Action dimensions should be (batch, action_dim)"
|
|
|
|
|
|
|
|
|
observation, reward, terminated, truncated, info = env.step(action_numpy)
|
|
|
if render_callback is not None:
|
|
|
render_callback(env)
|
|
|
|
|
|
|
|
|
|
|
|
if "final_info" in info:
|
|
|
final_info = info["final_info"]
|
|
|
if not isinstance(final_info, dict):
|
|
|
raise RuntimeError(
|
|
|
"Unsupported `final_info` format: expected dict (Gymnasium >= 1.0). "
|
|
|
"You're likely using an older version of gymnasium (< 1.0). Please upgrade."
|
|
|
)
|
|
|
successes = final_info["is_success"].tolist()
|
|
|
else:
|
|
|
successes = [False] * env.num_envs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
done = terminated | truncated | done
|
|
|
if step + 1 == max_steps:
|
|
|
done = np.ones_like(done, dtype=bool)
|
|
|
|
|
|
all_actions.append(torch.from_numpy(action_numpy))
|
|
|
all_rewards.append(torch.from_numpy(reward))
|
|
|
all_dones.append(torch.from_numpy(done))
|
|
|
all_successes.append(torch.tensor(successes))
|
|
|
|
|
|
step += 1
|
|
|
running_success_rate = (
|
|
|
einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
|
|
|
)
|
|
|
progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
|
|
|
progbar.update()
|
|
|
|
|
|
|
|
|
if return_observations:
|
|
|
observation = preprocess_observation(observation)
|
|
|
all_observations.append(deepcopy(observation))
|
|
|
|
|
|
|
|
|
ret = {
|
|
|
ACTION: torch.stack(all_actions, dim=1),
|
|
|
"reward": torch.stack(all_rewards, dim=1),
|
|
|
"success": torch.stack(all_successes, dim=1),
|
|
|
"done": torch.stack(all_dones, dim=1),
|
|
|
}
|
|
|
if return_observations:
|
|
|
stacked_observations = {}
|
|
|
for key in all_observations[0]:
|
|
|
stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
|
|
|
ret[OBS_STR] = stacked_observations
|
|
|
|
|
|
if hasattr(policy, "use_original_modules"):
|
|
|
policy.use_original_modules()
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
def eval_policy(
|
|
|
env: gym.vector.VectorEnv,
|
|
|
policy: PreTrainedPolicy,
|
|
|
preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
|
|
|
postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
|
|
|
n_episodes: int,
|
|
|
max_episodes_rendered: int = 0,
|
|
|
videos_dir: Path | None = None,
|
|
|
return_episode_data: bool = False,
|
|
|
start_seed: int | None = None,
|
|
|
) -> dict:
|
|
|
"""
|
|
|
Args:
|
|
|
env: The batch of environments.
|
|
|
policy: The policy.
|
|
|
n_episodes: The number of episodes to evaluate.
|
|
|
max_episodes_rendered: Maximum number of episodes to render into videos.
|
|
|
videos_dir: Where to save rendered videos.
|
|
|
return_episode_data: Whether to return episode data for online training. Incorporates the data into
|
|
|
the "episodes" key of the returned dictionary.
|
|
|
start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
|
|
|
seed is incremented by 1. If not provided, the environments are not manually seeded.
|
|
|
Returns:
|
|
|
Dictionary with metrics and data regarding the rollouts.
|
|
|
"""
|
|
|
if max_episodes_rendered > 0 and not videos_dir:
|
|
|
raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
|
|
|
|
|
|
if not isinstance(policy, PreTrainedPolicy):
|
|
|
raise ValueError(
|
|
|
f"Policy of type 'PreTrainedPolicy' is expected, but type '{type(policy)}' was provided."
|
|
|
)
|
|
|
|
|
|
start = time.time()
|
|
|
policy.eval()
|
|
|
|
|
|
|
|
|
|
|
|
n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
|
|
|
|
|
|
|
|
|
sum_rewards = []
|
|
|
max_rewards = []
|
|
|
all_successes = []
|
|
|
all_seeds = []
|
|
|
threads = []
|
|
|
n_episodes_rendered = 0
|
|
|
|
|
|
|
|
|
def render_frame(env: gym.vector.VectorEnv):
|
|
|
|
|
|
if n_episodes_rendered >= max_episodes_rendered:
|
|
|
return
|
|
|
n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
|
|
|
if isinstance(env, gym.vector.SyncVectorEnv):
|
|
|
ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)]))
|
|
|
elif isinstance(env, gym.vector.AsyncVectorEnv):
|
|
|
|
|
|
ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
|
|
|
|
|
|
if max_episodes_rendered > 0:
|
|
|
video_paths: list[str] = []
|
|
|
|
|
|
if return_episode_data:
|
|
|
episode_data: dict | None = None
|
|
|
|
|
|
|
|
|
progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm())
|
|
|
for batch_ix in progbar:
|
|
|
|
|
|
|
|
|
if max_episodes_rendered > 0:
|
|
|
ep_frames: list[np.ndarray] = []
|
|
|
|
|
|
if start_seed is None:
|
|
|
seeds = None
|
|
|
else:
|
|
|
seeds = range(
|
|
|
start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs)
|
|
|
)
|
|
|
rollout_data = rollout(
|
|
|
env=env,
|
|
|
policy=policy,
|
|
|
preprocessor=preprocessor,
|
|
|
postprocessor=postprocessor,
|
|
|
seeds=list(seeds) if seeds else None,
|
|
|
return_observations=return_episode_data,
|
|
|
render_callback=render_frame if max_episodes_rendered > 0 else None,
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
n_steps = rollout_data["done"].shape[1]
|
|
|
|
|
|
done_indices = torch.argmax(rollout_data["done"].to(int), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int()
|
|
|
|
|
|
batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
|
|
|
sum_rewards.extend(batch_sum_rewards.tolist())
|
|
|
batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
|
|
|
max_rewards.extend(batch_max_rewards.tolist())
|
|
|
batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
|
|
|
all_successes.extend(batch_successes.tolist())
|
|
|
if seeds:
|
|
|
all_seeds.extend(seeds)
|
|
|
else:
|
|
|
all_seeds.append(None)
|
|
|
|
|
|
|
|
|
if return_episode_data:
|
|
|
this_episode_data = _compile_episode_data(
|
|
|
rollout_data,
|
|
|
done_indices,
|
|
|
start_episode_index=batch_ix * env.num_envs,
|
|
|
start_data_index=(0 if episode_data is None else (episode_data["index"][-1].item() + 1)),
|
|
|
fps=env.unwrapped.metadata["render_fps"],
|
|
|
)
|
|
|
if episode_data is None:
|
|
|
episode_data = this_episode_data
|
|
|
else:
|
|
|
|
|
|
assert episode_data["episode_index"][-1] + 1 == this_episode_data["episode_index"][0]
|
|
|
assert episode_data["index"][-1] + 1 == this_episode_data["index"][0]
|
|
|
|
|
|
episode_data = {k: torch.cat([episode_data[k], this_episode_data[k]]) for k in episode_data}
|
|
|
|
|
|
|
|
|
if max_episodes_rendered > 0 and len(ep_frames) > 0:
|
|
|
batch_stacked_frames = np.stack(ep_frames, axis=1)
|
|
|
for stacked_frames, done_index in zip(
|
|
|
batch_stacked_frames, done_indices.flatten().tolist(), strict=False
|
|
|
):
|
|
|
if n_episodes_rendered >= max_episodes_rendered:
|
|
|
break
|
|
|
|
|
|
videos_dir.mkdir(parents=True, exist_ok=True)
|
|
|
video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4"
|
|
|
video_paths.append(str(video_path))
|
|
|
thread = threading.Thread(
|
|
|
target=write_video,
|
|
|
args=(
|
|
|
str(video_path),
|
|
|
stacked_frames[: done_index + 1],
|
|
|
env.unwrapped.metadata["render_fps"],
|
|
|
),
|
|
|
)
|
|
|
thread.start()
|
|
|
threads.append(thread)
|
|
|
n_episodes_rendered += 1
|
|
|
|
|
|
progbar.set_postfix(
|
|
|
{"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
|
|
|
)
|
|
|
|
|
|
|
|
|
for thread in threads:
|
|
|
thread.join()
|
|
|
|
|
|
|
|
|
info = {
|
|
|
"per_episode": [
|
|
|
{
|
|
|
"episode_ix": i,
|
|
|
"sum_reward": sum_reward,
|
|
|
"max_reward": max_reward,
|
|
|
"success": success,
|
|
|
"seed": seed,
|
|
|
}
|
|
|
for i, (sum_reward, max_reward, success, seed) in enumerate(
|
|
|
zip(
|
|
|
sum_rewards[:n_episodes],
|
|
|
max_rewards[:n_episodes],
|
|
|
all_successes[:n_episodes],
|
|
|
all_seeds[:n_episodes],
|
|
|
strict=True,
|
|
|
)
|
|
|
)
|
|
|
],
|
|
|
"aggregated": {
|
|
|
"avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
|
|
|
"avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
|
|
|
"pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
|
|
|
"eval_s": time.time() - start,
|
|
|
"eval_ep_s": (time.time() - start) / n_episodes,
|
|
|
},
|
|
|
}
|
|
|
|
|
|
if return_episode_data:
|
|
|
info["episodes"] = episode_data
|
|
|
|
|
|
if max_episodes_rendered > 0:
|
|
|
info["video_paths"] = video_paths
|
|
|
|
|
|
return info
|
|
|
|
|
|
|
|
|
def _compile_episode_data(
|
|
|
rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float
|
|
|
) -> dict:
|
|
|
"""Convenience function for `eval_policy(return_episode_data=True)`
|
|
|
|
|
|
Compiles all the rollout data into a Hugging Face dataset.
|
|
|
|
|
|
Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`).
|
|
|
"""
|
|
|
ep_dicts = []
|
|
|
total_frames = 0
|
|
|
for ep_ix in range(rollout_data[ACTION].shape[0]):
|
|
|
|
|
|
num_frames = done_indices[ep_ix].item() + 2
|
|
|
total_frames += num_frames
|
|
|
|
|
|
|
|
|
ep_dict = {
|
|
|
ACTION: rollout_data[ACTION][ep_ix, : num_frames - 1],
|
|
|
"episode_index": torch.tensor([start_episode_index + ep_ix] * (num_frames - 1)),
|
|
|
"frame_index": torch.arange(0, num_frames - 1, 1),
|
|
|
"timestamp": torch.arange(0, num_frames - 1, 1) / fps,
|
|
|
DONE: rollout_data["done"][ep_ix, : num_frames - 1],
|
|
|
"next.success": rollout_data["success"][ep_ix, : num_frames - 1],
|
|
|
REWARD: rollout_data["reward"][ep_ix, : num_frames - 1].type(torch.float32),
|
|
|
}
|
|
|
|
|
|
|
|
|
for k in ep_dict:
|
|
|
ep_dict[k] = torch.cat([ep_dict[k], ep_dict[k][-1:]])
|
|
|
|
|
|
for key in rollout_data[OBS_STR]:
|
|
|
ep_dict[key] = rollout_data[OBS_STR][key][ep_ix, :num_frames]
|
|
|
|
|
|
ep_dicts.append(ep_dict)
|
|
|
|
|
|
data_dict = {}
|
|
|
for key in ep_dicts[0]:
|
|
|
data_dict[key] = torch.cat([x[key] for x in ep_dicts])
|
|
|
|
|
|
data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1)
|
|
|
|
|
|
return data_dict
|
|
|
|
|
|
|
|
|
@parser.wrap()
|
|
|
def eval_main(cfg: EvalPipelineConfig):
|
|
|
logging.info(pformat(asdict(cfg)))
|
|
|
|
|
|
|
|
|
device = get_safe_torch_device(cfg.policy.device, log=True)
|
|
|
|
|
|
torch.backends.cudnn.benchmark = True
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
set_seed(cfg.seed)
|
|
|
|
|
|
logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
|
|
|
|
|
|
logging.info("Making environment.")
|
|
|
envs = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
|
|
|
|
|
|
logging.info("Making policy.")
|
|
|
|
|
|
policy = make_policy(
|
|
|
cfg=cfg.policy,
|
|
|
env_cfg=cfg.env,
|
|
|
rename_map=cfg.rename_map,
|
|
|
)
|
|
|
|
|
|
policy.eval()
|
|
|
|
|
|
|
|
|
preprocessor_overrides = {
|
|
|
"device_processor": {"device": str(policy.config.device)},
|
|
|
"rename_observations_processor": {"rename_map": cfg.rename_map},
|
|
|
}
|
|
|
|
|
|
preprocessor, postprocessor = make_pre_post_processors(
|
|
|
policy_cfg=cfg.policy,
|
|
|
pretrained_path=cfg.policy.pretrained_path,
|
|
|
preprocessor_overrides=preprocessor_overrides,
|
|
|
)
|
|
|
with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext():
|
|
|
info = eval_policy_all(
|
|
|
envs=envs,
|
|
|
policy=policy,
|
|
|
preprocessor=preprocessor,
|
|
|
postprocessor=postprocessor,
|
|
|
n_episodes=cfg.eval.n_episodes,
|
|
|
max_episodes_rendered=10,
|
|
|
videos_dir=Path(cfg.output_dir) / "videos",
|
|
|
start_seed=cfg.seed,
|
|
|
max_parallel_tasks=cfg.env.max_parallel_tasks,
|
|
|
)
|
|
|
print("Overall Aggregated Metrics:")
|
|
|
print(info["overall"])
|
|
|
|
|
|
|
|
|
for task_group, task_group_info in info.items():
|
|
|
print(f"\nAggregated Metrics for {task_group}:")
|
|
|
print(task_group_info)
|
|
|
|
|
|
close_envs(envs)
|
|
|
|
|
|
|
|
|
with open(Path(cfg.output_dir) / "eval_info.json", "w") as f:
|
|
|
json.dump(info, f, indent=2)
|
|
|
|
|
|
logging.info("End of eval")
|
|
|
|
|
|
|
|
|
|
|
|
class TaskMetrics(TypedDict):
|
|
|
sum_rewards: list[float]
|
|
|
max_rewards: list[float]
|
|
|
successes: list[bool]
|
|
|
video_paths: list[str]
|
|
|
|
|
|
|
|
|
ACC_KEYS = ("sum_rewards", "max_rewards", "successes", "video_paths")
|
|
|
|
|
|
|
|
|
def eval_one(
|
|
|
env: gym.vector.VectorEnv,
|
|
|
*,
|
|
|
policy: PreTrainedPolicy,
|
|
|
preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
|
|
|
postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
|
|
|
n_episodes: int,
|
|
|
max_episodes_rendered: int,
|
|
|
videos_dir: Path | None,
|
|
|
return_episode_data: bool,
|
|
|
start_seed: int | None,
|
|
|
) -> TaskMetrics:
|
|
|
"""Evaluates one task_id of one suite using the provided vec env."""
|
|
|
|
|
|
task_videos_dir = videos_dir
|
|
|
|
|
|
task_result = eval_policy(
|
|
|
env=env,
|
|
|
policy=policy,
|
|
|
preprocessor=preprocessor,
|
|
|
postprocessor=postprocessor,
|
|
|
n_episodes=n_episodes,
|
|
|
max_episodes_rendered=max_episodes_rendered,
|
|
|
videos_dir=task_videos_dir,
|
|
|
return_episode_data=return_episode_data,
|
|
|
start_seed=start_seed,
|
|
|
)
|
|
|
|
|
|
per_episode = task_result["per_episode"]
|
|
|
return TaskMetrics(
|
|
|
sum_rewards=[ep["sum_reward"] for ep in per_episode],
|
|
|
max_rewards=[ep["max_reward"] for ep in per_episode],
|
|
|
successes=[ep["success"] for ep in per_episode],
|
|
|
video_paths=task_result.get("video_paths", []),
|
|
|
)
|
|
|
|
|
|
|
|
|
def run_one(
|
|
|
task_group: str,
|
|
|
task_id: int,
|
|
|
env,
|
|
|
*,
|
|
|
policy,
|
|
|
preprocessor,
|
|
|
postprocessor,
|
|
|
n_episodes: int,
|
|
|
max_episodes_rendered: int,
|
|
|
videos_dir: Path | None,
|
|
|
return_episode_data: bool,
|
|
|
start_seed: int | None,
|
|
|
):
|
|
|
"""
|
|
|
Run eval_one for a single (task_group, task_id, env).
|
|
|
Returns (task_group, task_id, task_metrics_dict).
|
|
|
This function is intentionally module-level to make it easy to test.
|
|
|
"""
|
|
|
task_videos_dir = None
|
|
|
if videos_dir is not None:
|
|
|
task_videos_dir = videos_dir / f"{task_group}_{task_id}"
|
|
|
task_videos_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
metrics = eval_one(
|
|
|
env,
|
|
|
policy=policy,
|
|
|
preprocessor=preprocessor,
|
|
|
postprocessor=postprocessor,
|
|
|
n_episodes=n_episodes,
|
|
|
max_episodes_rendered=max_episodes_rendered,
|
|
|
videos_dir=task_videos_dir,
|
|
|
return_episode_data=return_episode_data,
|
|
|
start_seed=start_seed,
|
|
|
)
|
|
|
|
|
|
if max_episodes_rendered > 0:
|
|
|
metrics.setdefault("video_paths", [])
|
|
|
return task_group, task_id, metrics
|
|
|
|
|
|
|
|
|
def eval_policy_all(
|
|
|
envs: dict[str, dict[int, gym.vector.VectorEnv]],
|
|
|
policy,
|
|
|
preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
|
|
|
postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
|
|
|
n_episodes: int,
|
|
|
*,
|
|
|
max_episodes_rendered: int = 0,
|
|
|
videos_dir: Path | None = None,
|
|
|
return_episode_data: bool = False,
|
|
|
start_seed: int | None = None,
|
|
|
max_parallel_tasks: int = 1,
|
|
|
) -> dict:
|
|
|
"""
|
|
|
Evaluate a nested `envs` dict: {task_group: {task_id: vec_env}}.
|
|
|
This implementation flattens tasks, runs them sequentially or via ThreadPoolExecutor,
|
|
|
accumulates per-group and overall statistics, and returns the same aggregate metrics
|
|
|
schema as the single-env evaluator (avg_sum_reward / avg_max_reward / pc_success / timings)
|
|
|
plus per-task infos.
|
|
|
"""
|
|
|
start_t = time.time()
|
|
|
|
|
|
|
|
|
tasks = [(tg, tid, vec) for tg, group in envs.items() for tid, vec in group.items()]
|
|
|
|
|
|
|
|
|
group_acc: dict[str, dict[str, list]] = defaultdict(lambda: {k: [] for k in ACC_KEYS})
|
|
|
overall: dict[str, list] = {k: [] for k in ACC_KEYS}
|
|
|
per_task_infos: list[dict] = []
|
|
|
|
|
|
|
|
|
def _accumulate_to(group: str, metrics: dict):
|
|
|
|
|
|
|
|
|
|
|
|
def _append(key, value):
|
|
|
if value is None:
|
|
|
return
|
|
|
if isinstance(value, list):
|
|
|
group_acc[group][key].extend(value)
|
|
|
overall[key].extend(value)
|
|
|
else:
|
|
|
group_acc[group][key].append(value)
|
|
|
overall[key].append(value)
|
|
|
|
|
|
_append("sum_rewards", metrics.get("sum_rewards"))
|
|
|
_append("max_rewards", metrics.get("max_rewards"))
|
|
|
_append("successes", metrics.get("successes"))
|
|
|
|
|
|
paths = metrics.get("video_paths", [])
|
|
|
if paths:
|
|
|
group_acc[group]["video_paths"].extend(paths)
|
|
|
overall["video_paths"].extend(paths)
|
|
|
|
|
|
|
|
|
task_runner = partial(
|
|
|
run_one,
|
|
|
policy=policy,
|
|
|
preprocessor=preprocessor,
|
|
|
postprocessor=postprocessor,
|
|
|
n_episodes=n_episodes,
|
|
|
max_episodes_rendered=max_episodes_rendered,
|
|
|
videos_dir=videos_dir,
|
|
|
return_episode_data=return_episode_data,
|
|
|
start_seed=start_seed,
|
|
|
)
|
|
|
|
|
|
if max_parallel_tasks <= 1:
|
|
|
|
|
|
|
|
|
for task_group, task_id, env in tasks:
|
|
|
tg, tid, metrics = task_runner(task_group, task_id, env)
|
|
|
_accumulate_to(tg, metrics)
|
|
|
per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
|
|
|
else:
|
|
|
|
|
|
with cf.ThreadPoolExecutor(max_workers=max_parallel_tasks) as executor:
|
|
|
fut2meta = {}
|
|
|
for task_group, task_id, env in tasks:
|
|
|
fut = executor.submit(task_runner, task_group, task_id, env)
|
|
|
fut2meta[fut] = (task_group, task_id)
|
|
|
for fut in cf.as_completed(fut2meta):
|
|
|
tg, tid, metrics = fut.result()
|
|
|
_accumulate_to(tg, metrics)
|
|
|
per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
|
|
|
|
|
|
|
|
|
def _agg_from_list(xs):
|
|
|
if not xs:
|
|
|
return float("nan")
|
|
|
arr = np.array(xs, dtype=float)
|
|
|
return float(np.nanmean(arr))
|
|
|
|
|
|
|
|
|
groups_aggregated = {}
|
|
|
for group, acc in group_acc.items():
|
|
|
groups_aggregated[group] = {
|
|
|
"avg_sum_reward": _agg_from_list(acc["sum_rewards"]),
|
|
|
"avg_max_reward": _agg_from_list(acc["max_rewards"]),
|
|
|
"pc_success": _agg_from_list(acc["successes"]) * 100 if acc["successes"] else float("nan"),
|
|
|
"n_episodes": len(acc["sum_rewards"]),
|
|
|
"video_paths": list(acc["video_paths"]),
|
|
|
}
|
|
|
|
|
|
|
|
|
overall_agg = {
|
|
|
"avg_sum_reward": _agg_from_list(overall["sum_rewards"]),
|
|
|
"avg_max_reward": _agg_from_list(overall["max_rewards"]),
|
|
|
"pc_success": _agg_from_list(overall["successes"]) * 100 if overall["successes"] else float("nan"),
|
|
|
"n_episodes": len(overall["sum_rewards"]),
|
|
|
"eval_s": time.time() - start_t,
|
|
|
"eval_ep_s": (time.time() - start_t) / max(1, len(overall["sum_rewards"])),
|
|
|
"video_paths": list(overall["video_paths"]),
|
|
|
}
|
|
|
|
|
|
return {
|
|
|
"per_task": per_task_infos,
|
|
|
"per_group": groups_aggregated,
|
|
|
"overall": overall_agg,
|
|
|
}
|
|
|
|
|
|
|
|
|
def main():
|
|
|
init_logging()
|
|
|
eval_main()
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|
|
|
|