|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import argparse |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
from gr00t.eval.robot import RobotInferenceServer |
|
|
from gr00t.eval.simulation import ( |
|
|
MultiStepConfig, |
|
|
SimulationConfig, |
|
|
SimulationInferenceClient, |
|
|
VideoConfig, |
|
|
) |
|
|
from gr00t.model.policy import Gr00tPolicy |
|
|
|
|
|
if __name__ == "__main__": |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument( |
|
|
"--model_path", |
|
|
type=str, |
|
|
help="Path to the model checkpoint directory.", |
|
|
default="<PATH_TO_YOUR_MODEL>", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--embodiment_tag", |
|
|
type=str, |
|
|
help="The embodiment tag for the model.", |
|
|
default="<EMBODIMENT_TAG>", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--env_name", |
|
|
type=str, |
|
|
help="Name of the environment to run.", |
|
|
default="<ENV_NAME>", |
|
|
) |
|
|
parser.add_argument("--port", type=int, help="Port number for the server.", default=5555) |
|
|
parser.add_argument( |
|
|
"--host", type=str, help="Host address for the server.", default="localhost" |
|
|
) |
|
|
parser.add_argument("--video_dir", type=str, help="Directory to save videos.", default=None) |
|
|
parser.add_argument("--n_episodes", type=int, help="Number of episodes to run.", default=2) |
|
|
parser.add_argument("--n_envs", type=int, help="Number of parallel environments.", default=1) |
|
|
parser.add_argument( |
|
|
"--n_action_steps", |
|
|
type=int, |
|
|
help="Number of action steps per environment step.", |
|
|
default=16, |
|
|
) |
|
|
parser.add_argument( |
|
|
"--max_episode_steps", type=int, help="Maximum number of steps per episode.", default=1440 |
|
|
) |
|
|
|
|
|
parser.add_argument("--server", action="store_true", help="Run the server.") |
|
|
|
|
|
parser.add_argument("--client", action="store_true", help="Run the client") |
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.server: |
|
|
|
|
|
policy = Gr00tPolicy( |
|
|
model_path=args.model_path, |
|
|
embodiment_tag=args.embodiment_tag, |
|
|
) |
|
|
|
|
|
|
|
|
server = RobotInferenceServer(policy, port=args.port) |
|
|
server.run() |
|
|
|
|
|
elif args.client: |
|
|
|
|
|
simulation_client = SimulationInferenceClient(host=args.host, port=args.port) |
|
|
|
|
|
print("Available modality configs:") |
|
|
modality_config = simulation_client.get_modality_config() |
|
|
print(modality_config.keys()) |
|
|
|
|
|
|
|
|
config = SimulationConfig( |
|
|
env_name=args.env_name, |
|
|
n_episodes=args.n_episodes, |
|
|
n_envs=args.n_envs, |
|
|
video=VideoConfig(video_dir=args.video_dir), |
|
|
multistep=MultiStepConfig( |
|
|
n_action_steps=args.n_action_steps, max_episode_steps=args.max_episode_steps |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
print(f"Running simulation for {args.env_name}...") |
|
|
env_name, episode_successes = simulation_client.run_simulation(config) |
|
|
|
|
|
|
|
|
print(f"Results for {env_name}:") |
|
|
print(f"Success rate: {np.mean(episode_successes):.2f}") |
|
|
|
|
|
else: |
|
|
raise ValueError("Please specify either --server or --client") |
|
|
|