|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""End-to-end test of the asynchronous inference stack (client ↔ server).
|
|
|
|
|
|
This test spins up a lightweight gRPC `PolicyServer` instance with a stubbed
|
|
|
policy network and launches a `RobotClient` that uses a `MockRobot`. The goal
|
|
|
is to exercise the full communication loop:
|
|
|
|
|
|
1. Client sends policy specification → Server
|
|
|
2. Client streams observations → Server
|
|
|
3. Server streams action chunks → Client
|
|
|
4. Client executes received actions
|
|
|
|
|
|
The test succeeds if at least one action is executed and the server records at
|
|
|
least one predicted timestep - demonstrating that the gRPC round-trip works
|
|
|
end-to-end using real (but lightweight) protocol messages.
|
|
|
"""
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
import threading
|
|
|
from concurrent import futures
|
|
|
|
|
|
import pytest
|
|
|
import torch
|
|
|
|
|
|
|
|
|
pytest.importorskip("grpc")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_async_inference_e2e(monkeypatch):
|
|
|
"""Tests the full asynchronous inference pipeline."""
|
|
|
|
|
|
import grpc
|
|
|
|
|
|
from lerobot.async_inference.configs import PolicyServerConfig, RobotClientConfig
|
|
|
from lerobot.async_inference.helpers import map_robot_keys_to_lerobot_features
|
|
|
from lerobot.async_inference.policy_server import PolicyServer
|
|
|
from lerobot.async_inference.robot_client import RobotClient
|
|
|
from lerobot.robots.utils import make_robot_from_config
|
|
|
from lerobot.transport import (
|
|
|
services_pb2,
|
|
|
services_pb2_grpc,
|
|
|
)
|
|
|
from tests.mocks.mock_robot import MockRobotConfig
|
|
|
|
|
|
|
|
|
class MockPolicy:
|
|
|
"""A minimal mock for an actual policy, returning zeros."""
|
|
|
|
|
|
class _Config:
|
|
|
robot_type = "dummy_robot"
|
|
|
|
|
|
@property
|
|
|
def image_features(self):
|
|
|
"""Empty image features since this test doesn't use images."""
|
|
|
return {}
|
|
|
|
|
|
def __init__(self):
|
|
|
self.config = self._Config()
|
|
|
|
|
|
def to(self, *args, **kwargs):
|
|
|
return self
|
|
|
|
|
|
def model(self, batch):
|
|
|
|
|
|
batch_size = len(batch["robot_type"])
|
|
|
return torch.zeros(batch_size, 20, 6)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
policy_server_config = PolicyServerConfig(host="localhost", port=9999)
|
|
|
policy_server = PolicyServer(policy_server_config)
|
|
|
|
|
|
policy_server.policy = MockPolicy()
|
|
|
policy_server.actions_per_chunk = 20
|
|
|
policy_server.device = "cpu"
|
|
|
|
|
|
policy_server.preprocessor = lambda obs: obs
|
|
|
policy_server.postprocessor = lambda tensor: tensor
|
|
|
|
|
|
|
|
|
robot_config = MockRobotConfig()
|
|
|
mock_robot = make_robot_from_config(robot_config)
|
|
|
|
|
|
lerobot_features = map_robot_keys_to_lerobot_features(mock_robot)
|
|
|
policy_server.lerobot_features = lerobot_features
|
|
|
|
|
|
|
|
|
policy_server.policy_type = "act"
|
|
|
|
|
|
def _fake_get_action_chunk(_self, _obs, _type="test"):
|
|
|
action_dim = 6
|
|
|
batch_size = 1
|
|
|
actions_per_chunk = policy_server.actions_per_chunk
|
|
|
|
|
|
return torch.zeros(batch_size, actions_per_chunk, action_dim)
|
|
|
|
|
|
monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True)
|
|
|
|
|
|
|
|
|
def _fake_send_policy_instructions(self, request, context):
|
|
|
return services_pb2.Empty()
|
|
|
|
|
|
monkeypatch.setattr(PolicyServer, "SendPolicyInstructions", _fake_send_policy_instructions, raising=True)
|
|
|
|
|
|
|
|
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="policy_server"))
|
|
|
services_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server)
|
|
|
|
|
|
|
|
|
server_address = f"{policy_server.config.host}:{policy_server.config.port}"
|
|
|
server.add_insecure_port(server_address)
|
|
|
server.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
client_config = RobotClientConfig(
|
|
|
server_address=server_address,
|
|
|
robot=robot_config,
|
|
|
chunk_size_threshold=0.0,
|
|
|
policy_type="test",
|
|
|
pretrained_name_or_path="test",
|
|
|
actions_per_chunk=20,
|
|
|
)
|
|
|
|
|
|
client = RobotClient(client_config)
|
|
|
assert client.start(), "Client failed initial handshake with the server"
|
|
|
|
|
|
|
|
|
action_chunks_received = {"count": 0}
|
|
|
original_aggregate = client._aggregate_action_queues
|
|
|
|
|
|
def counting_aggregate(*args, **kwargs):
|
|
|
action_chunks_received["count"] += 1
|
|
|
return original_aggregate(*args, **kwargs)
|
|
|
|
|
|
monkeypatch.setattr(client, "_aggregate_action_queues", counting_aggregate)
|
|
|
|
|
|
|
|
|
action_thread = threading.Thread(target=client.receive_actions, daemon=True)
|
|
|
control_thread = threading.Thread(target=client.control_loop, args=({"task": ""}), daemon=True)
|
|
|
action_thread.start()
|
|
|
control_thread.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
server.wait_for_termination(timeout=5)
|
|
|
|
|
|
assert action_chunks_received["count"] > 0, "Client did not receive any action chunks"
|
|
|
assert len(policy_server._predicted_timesteps) > 0, "Server did not record any predicted timesteps"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
client.stop()
|
|
|
action_thread.join()
|
|
|
control_thread.join()
|
|
|
policy_server.stop()
|
|
|
server.stop(grace=None)
|
|
|
|