|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Unit-tests for the `PolicyServer` core logic.
|
|
|
Monkey-patch the `policy` attribute with a stub so that no real model inference is performed.
|
|
|
"""
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
import time
|
|
|
|
|
|
import pytest
|
|
|
import torch
|
|
|
|
|
|
from lerobot.configs.types import PolicyFeature
|
|
|
from lerobot.utils.constants import OBS_STATE
|
|
|
from tests.utils import require_package
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MockPolicy:
|
|
|
"""A minimal mock for an actual policy, returning zeros.
|
|
|
Refer to tests/policies for tests of the individual policies supported."""
|
|
|
|
|
|
class _Config:
|
|
|
robot_type = "dummy_robot"
|
|
|
|
|
|
@property
|
|
|
def image_features(self) -> dict[str, PolicyFeature]:
|
|
|
"""Empty image features since this test doesn't use images."""
|
|
|
return {}
|
|
|
|
|
|
def predict_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor:
|
|
|
"""Return a chunk of 20 dummy actions."""
|
|
|
batch_size = len(observation[OBS_STATE])
|
|
|
return torch.zeros(batch_size, 20, 6)
|
|
|
|
|
|
def __init__(self):
|
|
|
self.config = self._Config()
|
|
|
|
|
|
def to(self, *args, **kwargs):
|
|
|
|
|
|
return self
|
|
|
|
|
|
def model(self, batch: dict) -> torch.Tensor:
|
|
|
|
|
|
batch_size = len(batch["robot_type"])
|
|
|
return torch.zeros(batch_size, 20, 6)
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
@require_package("grpc")
|
|
|
def policy_server():
|
|
|
"""Fresh `PolicyServer` instance with a stubbed-out policy model."""
|
|
|
|
|
|
from lerobot.async_inference.configs import PolicyServerConfig
|
|
|
from lerobot.async_inference.policy_server import PolicyServer
|
|
|
|
|
|
test_config = PolicyServerConfig(host="localhost", port=9999)
|
|
|
server = PolicyServer(test_config)
|
|
|
|
|
|
server.policy = MockPolicy()
|
|
|
server.actions_per_chunk = 20
|
|
|
server.device = "cpu"
|
|
|
|
|
|
|
|
|
server.lerobot_features = {
|
|
|
OBS_STATE: {
|
|
|
"dtype": "float32",
|
|
|
"shape": [6],
|
|
|
"names": ["joint1", "joint2", "joint3", "joint4", "joint5", "joint6"],
|
|
|
}
|
|
|
}
|
|
|
|
|
|
return server
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _make_obs(state: torch.Tensor, timestep: int = 0, must_go: bool = False):
|
|
|
"""Create a TimedObservation with a given state vector."""
|
|
|
|
|
|
from lerobot.async_inference.helpers import TimedObservation
|
|
|
|
|
|
return TimedObservation(
|
|
|
observation={
|
|
|
"joint1": state[0].item() if len(state) > 0 else 0.0,
|
|
|
"joint2": state[1].item() if len(state) > 1 else 0.0,
|
|
|
"joint3": state[2].item() if len(state) > 2 else 0.0,
|
|
|
"joint4": state[3].item() if len(state) > 3 else 0.0,
|
|
|
"joint5": state[4].item() if len(state) > 4 else 0.0,
|
|
|
"joint6": state[5].item() if len(state) > 5 else 0.0,
|
|
|
},
|
|
|
timestamp=time.time(),
|
|
|
timestep=timestep,
|
|
|
must_go=must_go,
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_time_action_chunk(policy_server):
|
|
|
"""Verify that `_time_action_chunk` assigns correct timestamps and timesteps."""
|
|
|
start_ts = time.time()
|
|
|
start_t = 10
|
|
|
|
|
|
action_tensors = [torch.randn(6) for _ in range(3)]
|
|
|
|
|
|
timed_actions = policy_server._time_action_chunk(start_ts, action_tensors, start_t)
|
|
|
|
|
|
assert len(timed_actions) == 3
|
|
|
|
|
|
assert [ta.get_timestep() for ta in timed_actions] == [10, 11, 12]
|
|
|
|
|
|
expected_timestamps = [
|
|
|
start_ts,
|
|
|
start_ts + policy_server.config.environment_dt,
|
|
|
start_ts + 2 * policy_server.config.environment_dt,
|
|
|
]
|
|
|
for ta, expected_ts in zip(timed_actions, expected_timestamps, strict=True):
|
|
|
assert abs(ta.get_timestamp() - expected_ts) < 1e-6
|
|
|
|
|
|
|
|
|
def test_maybe_enqueue_observation_must_go(policy_server):
|
|
|
"""An observation with `must_go=True` is always enqueued."""
|
|
|
obs = _make_obs(torch.zeros(6), must_go=True)
|
|
|
assert policy_server._enqueue_observation(obs) is True
|
|
|
assert policy_server.observation_queue.qsize() == 1
|
|
|
assert policy_server.observation_queue.get_nowait() is obs
|
|
|
|
|
|
|
|
|
def test_maybe_enqueue_observation_dissimilar(policy_server):
|
|
|
"""A dissimilar observation (not `must_go`) is enqueued."""
|
|
|
|
|
|
policy_server.last_processed_obs = _make_obs(torch.zeros(6))
|
|
|
|
|
|
new_obs = _make_obs(torch.ones(6) * 5)
|
|
|
|
|
|
assert policy_server._enqueue_observation(new_obs) is True
|
|
|
assert policy_server.observation_queue.qsize() == 1
|
|
|
|
|
|
|
|
|
def test_maybe_enqueue_observation_is_skipped(policy_server):
|
|
|
"""A similar observation (not `must_go`) is skipped."""
|
|
|
|
|
|
policy_server.last_processed_obs = _make_obs(torch.zeros(6))
|
|
|
|
|
|
new_obs = _make_obs(torch.zeros(6) + 1e-4)
|
|
|
|
|
|
assert policy_server._enqueue_observation(new_obs) is False
|
|
|
assert policy_server.observation_queue.empty() is True
|
|
|
|
|
|
|
|
|
def test_obs_sanity_checks(policy_server):
|
|
|
"""Unit-test the private `_obs_sanity_checks` helper."""
|
|
|
prev = _make_obs(torch.zeros(6), timestep=0)
|
|
|
|
|
|
|
|
|
policy_server._predicted_timesteps.add(1)
|
|
|
obs_same_ts = _make_obs(torch.ones(6), timestep=1)
|
|
|
assert policy_server._obs_sanity_checks(obs_same_ts, prev) is False
|
|
|
|
|
|
|
|
|
policy_server._predicted_timesteps.clear()
|
|
|
obs_similar = _make_obs(torch.zeros(6) + 1e-4, timestep=2)
|
|
|
assert policy_server._obs_sanity_checks(obs_similar, prev) is False
|
|
|
|
|
|
|
|
|
obs_ok = _make_obs(torch.ones(6) * 5, timestep=3)
|
|
|
assert policy_server._obs_sanity_checks(obs_ok, prev) is True
|
|
|
|
|
|
|
|
|
def test_predict_action_chunk(monkeypatch, policy_server):
|
|
|
"""End-to-end test of `_predict_action_chunk` with a stubbed _get_action_chunk."""
|
|
|
|
|
|
from lerobot.async_inference.policy_server import PolicyServer
|
|
|
|
|
|
|
|
|
policy_server.policy_type = "act"
|
|
|
|
|
|
policy_server.preprocessor = lambda obs: obs
|
|
|
policy_server.postprocessor = lambda tensor: tensor
|
|
|
action_dim = 6
|
|
|
batch_size = 1
|
|
|
actions_per_chunk = policy_server.actions_per_chunk
|
|
|
|
|
|
def _fake_get_action_chunk(_self, _obs, _type="act"):
|
|
|
return torch.zeros(batch_size, actions_per_chunk, action_dim)
|
|
|
|
|
|
monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True)
|
|
|
|
|
|
obs = _make_obs(torch.zeros(6), timestep=5)
|
|
|
timed_actions = policy_server._predict_action_chunk(obs)
|
|
|
|
|
|
assert len(timed_actions) == actions_per_chunk
|
|
|
assert [ta.get_timestep() for ta in timed_actions] == list(range(5, 5 + actions_per_chunk))
|
|
|
|
|
|
for i, ta in enumerate(timed_actions):
|
|
|
expected_ts = obs.get_timestamp() + i * policy_server.config.environment_dt
|
|
|
assert abs(ta.get_timestamp() - expected_ts) < 1e-6
|
|
|
|