|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging
|
|
|
import logging.handlers
|
|
|
import os
|
|
|
import time
|
|
|
from dataclasses import dataclass, field
|
|
|
from pathlib import Path
|
|
|
|
|
|
import torch
|
|
|
|
|
|
from lerobot.configs.types import PolicyFeature
|
|
|
from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
|
|
|
|
|
|
|
|
|
from lerobot.policies import (
|
|
|
ACTConfig,
|
|
|
DiffusionConfig,
|
|
|
PI0Config,
|
|
|
PI05Config,
|
|
|
SmolVLAConfig,
|
|
|
VQBeTConfig,
|
|
|
)
|
|
|
from lerobot.robots.robot import Robot
|
|
|
from lerobot.utils.constants import OBS_IMAGES, OBS_STATE, OBS_STR
|
|
|
from lerobot.utils.utils import init_logging
|
|
|
|
|
|
Action = torch.Tensor
|
|
|
|
|
|
|
|
|
RawObservation = dict[str, torch.Tensor]
|
|
|
|
|
|
|
|
|
LeRobotObservation = dict[str, torch.Tensor]
|
|
|
|
|
|
|
|
|
Observation = dict[str, torch.Tensor]
|
|
|
|
|
|
|
|
|
def visualize_action_queue_size(action_queue_size: list[int]) -> None:
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
|
_, ax = plt.subplots()
|
|
|
ax.set_title("Action Queue Size Over Time")
|
|
|
ax.set_xlabel("Environment steps")
|
|
|
ax.set_ylabel("Action Queue Size")
|
|
|
ax.set_ylim(0, max(action_queue_size) * 1.1)
|
|
|
ax.grid(True, alpha=0.3)
|
|
|
ax.plot(range(len(action_queue_size)), action_queue_size)
|
|
|
plt.show()
|
|
|
|
|
|
|
|
|
def map_robot_keys_to_lerobot_features(robot: Robot) -> dict[str, dict]:
|
|
|
return hw_to_dataset_features(robot.observation_features, OBS_STR, use_video=False)
|
|
|
|
|
|
|
|
|
def is_image_key(k: str) -> bool:
|
|
|
return k.startswith(OBS_IMAGES)
|
|
|
|
|
|
|
|
|
def resize_robot_observation_image(image: torch.tensor, resize_dims: tuple[int, int, int]) -> torch.tensor:
|
|
|
assert image.ndim == 3, f"Image must be (C, H, W)! Received {image.shape}"
|
|
|
|
|
|
image = image.permute(2, 0, 1)
|
|
|
dims = (resize_dims[1], resize_dims[2])
|
|
|
|
|
|
image_batched = image.unsqueeze(0)
|
|
|
|
|
|
resized = torch.nn.functional.interpolate(image_batched, size=dims, mode="bilinear", align_corners=False)
|
|
|
|
|
|
return resized.squeeze(0)
|
|
|
|
|
|
|
|
|
|
|
|
def raw_observation_to_observation(
|
|
|
raw_observation: RawObservation,
|
|
|
lerobot_features: dict[str, dict],
|
|
|
policy_image_features: dict[str, PolicyFeature],
|
|
|
) -> Observation:
|
|
|
observation = {}
|
|
|
|
|
|
observation = prepare_raw_observation(raw_observation, lerobot_features, policy_image_features)
|
|
|
for k, v in observation.items():
|
|
|
if isinstance(v, torch.Tensor):
|
|
|
if "image" in k:
|
|
|
|
|
|
observation[k] = prepare_image(v).unsqueeze(0)
|
|
|
else:
|
|
|
observation[k] = v
|
|
|
|
|
|
return observation
|
|
|
|
|
|
|
|
|
def prepare_image(image: torch.Tensor) -> torch.Tensor:
|
|
|
"""Minimal preprocessing to turn int8 images to float32 in [0, 1], and create a memory-contiguous tensor"""
|
|
|
image = image.type(torch.float32) / 255
|
|
|
image = image.contiguous()
|
|
|
|
|
|
return image
|
|
|
|
|
|
|
|
|
def extract_state_from_raw_observation(
|
|
|
lerobot_obs: RawObservation,
|
|
|
) -> torch.Tensor:
|
|
|
"""Extract the state from a raw observation."""
|
|
|
state = torch.tensor(lerobot_obs[OBS_STATE])
|
|
|
|
|
|
if state.ndim == 1:
|
|
|
state = state.unsqueeze(0)
|
|
|
|
|
|
return state
|
|
|
|
|
|
|
|
|
def extract_images_from_raw_observation(
|
|
|
lerobot_obs: RawObservation,
|
|
|
camera_key: str,
|
|
|
) -> dict[str, torch.Tensor]:
|
|
|
"""Extract the images from a raw observation."""
|
|
|
return torch.tensor(lerobot_obs[camera_key])
|
|
|
|
|
|
|
|
|
def make_lerobot_observation(
|
|
|
robot_obs: RawObservation,
|
|
|
lerobot_features: dict[str, dict],
|
|
|
) -> LeRobotObservation:
|
|
|
"""Make a lerobot observation from a raw observation."""
|
|
|
return build_dataset_frame(lerobot_features, robot_obs, prefix=OBS_STR)
|
|
|
|
|
|
|
|
|
def prepare_raw_observation(
|
|
|
robot_obs: RawObservation,
|
|
|
lerobot_features: dict[str, dict],
|
|
|
policy_image_features: dict[str, PolicyFeature],
|
|
|
) -> Observation:
|
|
|
"""Matches keys from the raw robot_obs dict to the keys expected by a given policy (passed as
|
|
|
policy_image_features)."""
|
|
|
|
|
|
|
|
|
lerobot_obs = make_lerobot_observation(robot_obs, lerobot_features)
|
|
|
|
|
|
|
|
|
image_keys = list(filter(is_image_key, lerobot_obs))
|
|
|
|
|
|
state_dict = {OBS_STATE: extract_state_from_raw_observation(lerobot_obs)}
|
|
|
image_dict = {
|
|
|
image_k: extract_images_from_raw_observation(lerobot_obs, image_k) for image_k in image_keys
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
image_dict = {
|
|
|
key: resize_robot_observation_image(torch.tensor(lerobot_obs[key]), policy_image_features[key].shape)
|
|
|
for key in image_keys
|
|
|
}
|
|
|
|
|
|
if "task" in robot_obs:
|
|
|
state_dict["task"] = robot_obs["task"]
|
|
|
|
|
|
return {**state_dict, **image_dict}
|
|
|
|
|
|
|
|
|
def get_logger(name: str, log_to_file: bool = True) -> logging.Logger:
|
|
|
"""
|
|
|
Get a logger using the standardized logging setup from utils.py.
|
|
|
|
|
|
Args:
|
|
|
name: Logger name (e.g., 'policy_server', 'robot_client')
|
|
|
log_to_file: Whether to also log to a file
|
|
|
|
|
|
Returns:
|
|
|
Configured logger instance
|
|
|
"""
|
|
|
|
|
|
if log_to_file:
|
|
|
os.makedirs("logs", exist_ok=True)
|
|
|
log_file = Path(f"logs/{name}_{int(time.time())}.log")
|
|
|
else:
|
|
|
log_file = None
|
|
|
|
|
|
|
|
|
init_logging(log_file=log_file, display_pid=False)
|
|
|
|
|
|
|
|
|
return logging.getLogger(name)
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
class TimedData:
|
|
|
"""A data object with timestamp and timestep information.
|
|
|
|
|
|
Args:
|
|
|
timestamp: Unix timestamp relative to data's creation.
|
|
|
data: The actual data to wrap a timestamp around.
|
|
|
timestep: The timestep of the data.
|
|
|
"""
|
|
|
|
|
|
timestamp: float
|
|
|
timestep: int
|
|
|
|
|
|
def get_timestamp(self):
|
|
|
return self.timestamp
|
|
|
|
|
|
def get_timestep(self):
|
|
|
return self.timestep
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
class TimedAction(TimedData):
|
|
|
action: Action
|
|
|
|
|
|
def get_action(self):
|
|
|
return self.action
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
class TimedObservation(TimedData):
|
|
|
observation: RawObservation
|
|
|
must_go: bool = False
|
|
|
|
|
|
def get_observation(self):
|
|
|
return self.observation
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
class FPSTracker:
|
|
|
"""Utility class to track FPS metrics over time."""
|
|
|
|
|
|
target_fps: float
|
|
|
first_timestamp: float = None
|
|
|
total_obs_count: int = 0
|
|
|
|
|
|
def calculate_fps_metrics(self, current_timestamp: float) -> dict[str, float]:
|
|
|
"""Calculate average FPS vs target"""
|
|
|
self.total_obs_count += 1
|
|
|
|
|
|
|
|
|
if self.first_timestamp is None:
|
|
|
self.first_timestamp = current_timestamp
|
|
|
|
|
|
|
|
|
total_duration = current_timestamp - self.first_timestamp
|
|
|
avg_fps = (self.total_obs_count - 1) / total_duration if total_duration > 1e-6 else 0.0
|
|
|
|
|
|
return {"avg_fps": avg_fps, "target_fps": self.target_fps}
|
|
|
|
|
|
def reset(self):
|
|
|
"""Reset the FPS tracker state"""
|
|
|
self.first_timestamp = None
|
|
|
self.total_obs_count = 0
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
class RemotePolicyConfig:
|
|
|
policy_type: str
|
|
|
pretrained_name_or_path: str
|
|
|
lerobot_features: dict[str, PolicyFeature]
|
|
|
actions_per_chunk: int
|
|
|
device: str = "cpu"
|
|
|
rename_map: dict[str, str] = field(default_factory=dict)
|
|
|
|
|
|
|
|
|
def _compare_observation_states(obs1_state: torch.Tensor, obs2_state: torch.Tensor, atol: float) -> bool:
|
|
|
"""Check if two observation states are similar, under a tolerance threshold"""
|
|
|
return bool(torch.linalg.norm(obs1_state - obs2_state) < atol)
|
|
|
|
|
|
|
|
|
def observations_similar(
|
|
|
obs1: TimedObservation, obs2: TimedObservation, lerobot_features: dict[str, dict], atol: float = 1
|
|
|
) -> bool:
|
|
|
"""Check if two observations are similar, under a tolerance threshold. Measures distance between
|
|
|
observations as the difference in joint-space between the two observations.
|
|
|
|
|
|
NOTE(fracapuano): This is a very simple check, and it is enough for the current use case.
|
|
|
An immediate next step is to use (fast) perceptual difference metrics comparing some camera views,
|
|
|
to surpass this joint-space similarity check.
|
|
|
"""
|
|
|
obs1_state = extract_state_from_raw_observation(
|
|
|
make_lerobot_observation(obs1.get_observation(), lerobot_features)
|
|
|
)
|
|
|
obs2_state = extract_state_from_raw_observation(
|
|
|
make_lerobot_observation(obs2.get_observation(), lerobot_features)
|
|
|
)
|
|
|
|
|
|
return _compare_observation_states(obs1_state, obs2_state, atol=atol)
|
|
|
|