BoxTestingv11771283373.4644873 / minimal_evaluation.py
HenryZhang's picture
Upload folder using huggingface_hub
5840b97 verified
#!/usr/bin/env python3
import os
import time
import torch
from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.policies.factory import make_pre_post_processors
# -------------------------
# CONFIG
# -------------------------
MODEL_ID = "lerobot/smolvla_base"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
FOLLOWER_PORT = "/dev/ttyACM3"
TOP_CAM_INDEX = 7
WRIST_CAM_INDEX = 9
TASK = "Pick up the red block."
ROBOT_TYPE = "so101_follower"
FPS = 10
STEPS_PER_EPISODE = 30
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
# -------------------------
# Load model
# -------------------------
print("[INFO] Loading SmolVLA policy")
policy = SmolVLAPolicy.from_pretrained(MODEL_ID).to(DEVICE)
policy.eval()
# -------------------------
# Load OG preprocess + postprocess
# -------------------------
print("[INFO] Loading policy preprocess + postprocess")
preprocess, postprocess = make_pre_post_processors(
policy.config,
MODEL_ID,
preprocessor_overrides={"device_processor": {"device": str(DEVICE)}},
)
# -------------------------
# Setup robot
# -------------------------
camera_cfg = {
"camera1": OpenCVCameraConfig(index_or_path=TOP_CAM_INDEX, width=640, height=480, fps=30),
"camera2": OpenCVCameraConfig(index_or_path=WRIST_CAM_INDEX, width=640, height=480, fps=30),
}
robot = SO101Follower(
SO101FollowerConfig(
port=FOLLOWER_PORT,
id="so101_follower_arm",
cameras=camera_cfg,
)
)
robot.connect()
# -------------------------
# ds_features (required glue)
# -------------------------
action_feats = hw_to_dataset_features(robot.action_features, "action")
obs_feats = hw_to_dataset_features(robot.observation_features, "observation")
ds_features = {**obs_feats, **action_feats}
# -------------------------
# Control loop
# -------------------------
print("[INFO] Starting evaluation")
policy.reset()
try:
for step in range(STEPS_PER_EPISODE):
obs = robot.get_observation()
# ---- BUILD INFERENCE FRAME ----
# save obs['camera1'] and obs['camera2'] to disk for debugging
obs_frame = build_inference_frame(
observation=obs,
ds_features=ds_features,
device=DEVICE,
task=TASK,
robot_type=ROBOT_TYPE,
)
# ---- PREPROCESS ----
batch = preprocess(obs_frame)
# ---- POLICY ----
with torch.no_grad():
raw_action = policy.select_action(batch)
action = postprocess(raw_action)
print("Raw action:", raw_action, "-> Decoded action:", action)
# ---- SEND TO ROBOT ----
import pdb; pdb.set_trace()
robot_action = make_robot_action(action.squeeze(0), ds_features)
robot.send_action(robot_action)
time.sleep(1.0 / FPS)
except KeyboardInterrupt:
print("\n[INFO] Ctrl+C received")
finally:
robot.disconnect()
print("[INFO] Done")