Spaces:
Runtime error
Runtime error
backend done
Browse files- backend/config_builder.py +34 -20
- backend/record_controller.py +407 -0
- backend/train_controller.py +563 -0
- requirements.txt → requeriments.txt +70 -2
- robot_detector.py +183 -0
backend/config_builder.py
CHANGED
|
@@ -1,28 +1,42 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from lerobot.
|
|
|
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
env = xarm.make_env()
|
| 11 |
-
else:
|
| 12 |
-
return "Simulación no reconocida"
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
|
|
|
| 18 |
with gr.Blocks() as demo:
|
| 19 |
-
gr.Markdown("#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
|
| 28 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from lerobot.robots import LeFeetech
|
| 3 |
+
import time
|
| 4 |
|
| 5 |
+
# Inicializa conexión con robot Feetech
|
| 6 |
+
try:
|
| 7 |
+
robot = LeFeetech(port="/dev/ttyUSB0") # Cambia este puerto si es necesario
|
| 8 |
+
motors = robot.motors
|
| 9 |
+
except Exception as e:
|
| 10 |
+
raise RuntimeError(f"Error al conectar con el robot: {e}")
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# Función para mover los motores desde los sliders
|
| 13 |
+
def controlar_robot(motor1_pos, motor2_pos, motor3_pos):
|
| 14 |
+
try:
|
| 15 |
+
# Envía posición a cada motor (ajusta según IDs reales)
|
| 16 |
+
motors[0].goto(motor1_pos)
|
| 17 |
+
motors[1].goto(motor2_pos)
|
| 18 |
+
motors[2].goto(motor3_pos)
|
| 19 |
+
|
| 20 |
+
time.sleep(0.5) # espera para que el robot se mueva
|
| 21 |
+
# Podrías agregar aquí sensores o cámara para devolver imagen
|
| 22 |
+
return f"Motor1: {motor1_pos}, Motor2: {motor2_pos}, Motor3: {motor3_pos}"
|
| 23 |
+
except Exception as err:
|
| 24 |
+
return f"Error: {err}"
|
| 25 |
|
| 26 |
+
# Interfaz
|
| 27 |
with gr.Blocks() as demo:
|
| 28 |
+
gr.Markdown("# Control en Tiempo Real del Robot LeRobot con Feetech")
|
| 29 |
+
|
| 30 |
+
with gr.Row():
|
| 31 |
+
motor1 = gr.Slider(0, 1023, value=512, label="Motor 1")
|
| 32 |
+
motor2 = gr.Slider(0, 1023, value=512, label="Motor 2")
|
| 33 |
+
motor3 = gr.Slider(0, 1023, value=512, label="Motor 3")
|
| 34 |
|
| 35 |
+
estado = gr.Textbox(label="Estado del Robot")
|
| 36 |
+
|
| 37 |
+
# Actualiza cada vez que se mueva algún slider
|
| 38 |
+
motor1.change(fn=controlar_robot, inputs=[motor1, motor2, motor3], outputs=estado)
|
| 39 |
+
motor2.change(fn=controlar_robot, inputs=[motor1, motor2, motor3], outputs=estado)
|
| 40 |
+
motor3.change(fn=controlar_robot, inputs=[motor1, motor2, motor3], outputs=estado)
|
| 41 |
|
| 42 |
demo.launch()
|
backend/record_controller.py
CHANGED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# backend/record_controller.py (o record_controller_gradio.py)
|
| 2 |
+
import subprocess
|
| 3 |
+
import os
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import serial.tools.list_ports # Nuevo: para listar puertos seriales
|
| 6 |
+
import json # Necesario para la función load_config si la copiamos aquí
|
| 7 |
+
|
| 8 |
+
# IMPORTANTE: Si robot_detector.py está en el mismo directorio 'backend',
|
| 9 |
+
# puedes importar su función de carga de configuración.
|
| 10 |
+
# Asegúrate de que el archivo robot_detector.py exista en la misma carpeta.
|
| 11 |
+
try:
|
| 12 |
+
from robot_detector import load_config as load_robot_config, CONFIG_FILE as ROBOT_CONFIG_FILE
|
| 13 |
+
except ImportError:
|
| 14 |
+
print("Advertencia: No se pudo importar 'load_config' de 'robot_detector.py'.")
|
| 15 |
+
print("Asegúrate de que 'robot_detector.py' esté en el mismo directorio 'backend'.")
|
| 16 |
+
print("Se usará una implementación local de load_config.")
|
| 17 |
+
|
| 18 |
+
# Implementación fallback de load_config si no se puede importar
|
| 19 |
+
ROBOT_CONFIG_FILE = "robot_config.json"
|
| 20 |
+
def load_robot_config():
|
| 21 |
+
if os.path.exists(ROBOT_CONFIG_FILE):
|
| 22 |
+
try:
|
| 23 |
+
with open(ROBOT_CONFIG_FILE, 'r') as f:
|
| 24 |
+
config = json.load(f)
|
| 25 |
+
return (
|
| 26 |
+
config.get("robot_follower", {}).get("port", ""),
|
| 27 |
+
config.get("robot_follower", {}).get("id", ""),
|
| 28 |
+
config.get("teleop_leader", {}).get("port", ""),
|
| 29 |
+
config.get("teleop_leader", {}).get("id", "")
|
| 30 |
+
)
|
| 31 |
+
except Exception as e:
|
| 32 |
+
print(f"Error al cargar la configuración existente desde '{ROBOT_CONFIG_FILE}': {e}. Se usarán valores por defecto.")
|
| 33 |
+
return "", "", "", ""
|
| 34 |
+
return "", "", "", ""
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def run_command(command: str, description: str):
|
| 38 |
+
"""
|
| 39 |
+
Ejecuta un comando de shell y captura su salida, manejando errores.
|
| 40 |
+
"""
|
| 41 |
+
print(f"\n--- {description} ---")
|
| 42 |
+
process_output = [] # Use a list to collect output for better streaming if needed later
|
| 43 |
+
try:
|
| 44 |
+
# Use Popen to stream output in real-time, important for long running processes
|
| 45 |
+
process = subprocess.Popen(
|
| 46 |
+
command,
|
| 47 |
+
shell=True,
|
| 48 |
+
stdout=subprocess.PIPE,
|
| 49 |
+
stderr=subprocess.STDOUT, # Redirect stderr to stdout
|
| 50 |
+
text=True
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
for line in iter(process.stdout.readline, ''):
|
| 54 |
+
print(line, end='') # Print to console for server-side logging
|
| 55 |
+
process_output.append(line)
|
| 56 |
+
# You could add a gr.Progress() here for real-time update in Gradio if needed
|
| 57 |
+
|
| 58 |
+
process.wait() # Wait for the process to complete
|
| 59 |
+
|
| 60 |
+
if process.returncode == 0:
|
| 61 |
+
print(f"Éxito: {description}")
|
| 62 |
+
return True, "".join(process_output)
|
| 63 |
+
else:
|
| 64 |
+
error_message = f"Error durante '{description}': El comando devolvió el código de salida {process.returncode}"
|
| 65 |
+
print(error_message)
|
| 66 |
+
return False, "".join(process_output) # Return all collected output, including errors
|
| 67 |
+
except Exception as e:
|
| 68 |
+
error_message = f"Ocurrió un error inesperado durante '{description}': {e}"
|
| 69 |
+
print(error_message)
|
| 70 |
+
return False, error_message
|
| 71 |
+
|
| 72 |
+
def list_serial_device_paths():
|
| 73 |
+
"""
|
| 74 |
+
Lista los puertos seriales disponibles y devuelve solo las rutas de los dispositivos.
|
| 75 |
+
"""
|
| 76 |
+
ports = serial.tools.list_ports.comports()
|
| 77 |
+
device_paths = []
|
| 78 |
+
if not ports:
|
| 79 |
+
return [] # Return empty list if no ports found
|
| 80 |
+
|
| 81 |
+
for port in ports:
|
| 82 |
+
device_paths.append(port.device)
|
| 83 |
+
|
| 84 |
+
return device_paths
|
| 85 |
+
|
| 86 |
+
def login_to_huggingface(token: str):
|
| 87 |
+
"""
|
| 88 |
+
Inicia sesión en Hugging Face CLI con el token proporcionado.
|
| 89 |
+
"""
|
| 90 |
+
if not token or token == "hf_YOUR_ACTUAL_WRITE_TOKEN_HERE":
|
| 91 |
+
return False, "Error: Por favor, proporciona un token de Hugging Face válido."
|
| 92 |
+
|
| 93 |
+
success, output = run_command(
|
| 94 |
+
f"huggingface-cli login --token {token}", # <--- ¡Argumento eliminado!
|
| 95 |
+
"Iniciando sesión en Hugging Face CLI"
|
| 96 |
+
)
|
| 97 |
+
if success:
|
| 98 |
+
return True, "¡Inicio de sesión en Hugging Face exitoso!"
|
| 99 |
+
else:
|
| 100 |
+
return False, output
|
| 101 |
+
|
| 102 |
+
def get_huggingface_user():
|
| 103 |
+
"""
|
| 104 |
+
Obtiene el nombre de usuario de Hugging Face.
|
| 105 |
+
"""
|
| 106 |
+
success, output = run_command(
|
| 107 |
+
"huggingface-cli whoami | head -n 1",
|
| 108 |
+
"Obteniendo nombre de usuario de Hugging Face"
|
| 109 |
+
)
|
| 110 |
+
if success:
|
| 111 |
+
# The output might contain warnings before the actual username.
|
| 112 |
+
# Try to find the username, which should be the first non-empty, non-warning line.
|
| 113 |
+
lines = output.splitlines()
|
| 114 |
+
for line in lines:
|
| 115 |
+
if line.strip() and not (line.strip().startswith("warnings.") or "deprecated" in line.lower()):
|
| 116 |
+
return True, line.strip()
|
| 117 |
+
return False, "No se pudo extraer el nombre de usuario de la salida de 'whoami'."
|
| 118 |
+
else:
|
| 119 |
+
return False, output
|
| 120 |
+
|
| 121 |
+
def record_dataset_core(hf_user: str,
|
| 122 |
+
robot_port: str,
|
| 123 |
+
teleop_port: str,
|
| 124 |
+
num_episodes: int = 2,
|
| 125 |
+
single_task: str = "Grab the black cube",
|
| 126 |
+
push_to_hub: bool = True,
|
| 127 |
+
resume: bool = False,
|
| 128 |
+
episode_time_s: int = 60,
|
| 129 |
+
reset_time_s: int = 60):
|
| 130 |
+
"""
|
| 131 |
+
Función central para grabar un dataset usando LeRobot.
|
| 132 |
+
"""
|
| 133 |
+
if not hf_user:
|
| 134 |
+
return False, "Error: Nombre de usuario de Hugging Face no disponible. ¿Falló el inicio de sesión?"
|
| 135 |
+
|
| 136 |
+
# Use the IDs from the loaded config for consistency
|
| 137 |
+
initial_follower_port, initial_follower_id, initial_leader_port, initial_leader_id = load_robot_config()
|
| 138 |
+
robot_id = initial_follower_id if initial_follower_id else "my_awesome_follower_arm"
|
| 139 |
+
teleop_id = initial_leader_id if initial_leader_id else "my_awesome_leader_arm"
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
dataset_repo_id = f"{hf_user}/record-test"
|
| 143 |
+
|
| 144 |
+
command = [
|
| 145 |
+
"python", "-m", "lerobot.record",
|
| 146 |
+
"--robot.type=so101_follower",
|
| 147 |
+
f"--robot.port={robot_port}",
|
| 148 |
+
f"--robot.id={robot_id}", # Use ID from config/default
|
| 149 |
+
"--robot.cameras=\"{{ front: {{type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}}}\"", # Escaping for shell command
|
| 150 |
+
"--teleop.type=so101_leader",
|
| 151 |
+
f"--teleop.port={teleop_port}",
|
| 152 |
+
f"--teleop.id={teleop_id}", # Use ID from config/default
|
| 153 |
+
"--display_data=true",
|
| 154 |
+
f"--dataset.repo_id={dataset_repo_id}",
|
| 155 |
+
f"--dataset.num_episodes={num_episodes}",
|
| 156 |
+
f"--dataset.single_task=\"{single_task}\"",
|
| 157 |
+
f"--dataset.episode_time_s={episode_time_s}",
|
| 158 |
+
f"--dataset.reset_time_s={reset_time_s}"
|
| 159 |
+
]
|
| 160 |
+
|
| 161 |
+
if not push_to_hub:
|
| 162 |
+
command.append("--dataset.push_to_hub=False")
|
| 163 |
+
if resume:
|
| 164 |
+
command.append("--resume=true")
|
| 165 |
+
|
| 166 |
+
full_command = " ".join(command)
|
| 167 |
+
|
| 168 |
+
success, output = run_command(full_command, "Grabación del dataset")
|
| 169 |
+
if success:
|
| 170 |
+
final_message = f"¡Grabación del dataset completada exitosamente!\n"
|
| 171 |
+
final_message += f"El dataset se almacenó localmente en: ~/.cache/huggingface/lerobot/{dataset_repo_id}\n"
|
| 172 |
+
if push_to_hub:
|
| 173 |
+
final_message += f"El dataset ha sido subido a: https://huggingface.co/datasets/{dataset_repo_id}\n"
|
| 174 |
+
final_message += "Puedes añadir etiquetas personalizadas (ej. 'tutorial') en la página del dataset en Hugging Face."
|
| 175 |
+
else:
|
| 176 |
+
final_message += "La subida al Hugging Face Hub fue deshabilitada."
|
| 177 |
+
return True, final_message
|
| 178 |
+
else:
|
| 179 |
+
return False, output
|
| 180 |
+
|
| 181 |
+
# --- Gradio Interface Logic ---
|
| 182 |
+
|
| 183 |
+
# Variable global para almacenar el usuario de Hugging Face
|
| 184 |
+
current_hf_user = None
|
| 185 |
+
|
| 186 |
+
def gradio_login(hf_token_input: str):
|
| 187 |
+
"""Interfaz Gradio para iniciar sesión en Hugging Face."""
|
| 188 |
+
global current_hf_user
|
| 189 |
+
success, message = login_to_huggingface(hf_token_input)
|
| 190 |
+
if success:
|
| 191 |
+
gr.Info(message)
|
| 192 |
+
success_user, user_name = get_huggingface_user()
|
| 193 |
+
if success_user:
|
| 194 |
+
current_hf_user = user_name
|
| 195 |
+
return gr.update(value=user_name, interactive=False), gr.update(visible=True, value=message) # Update username field and status box
|
| 196 |
+
else:
|
| 197 |
+
return gr.update(value="", interactive=True), gr.update(visible=True, value=user_name) # Show error if user not retrieved
|
| 198 |
+
else:
|
| 199 |
+
current_hf_user = None
|
| 200 |
+
return gr.update(value="", interactive=True), gr.update(visible=True, value=message) # Clear username and show error
|
| 201 |
+
|
| 202 |
+
def gradio_record(robot_port_input: str,
|
| 203 |
+
teleop_port_input: str,
|
| 204 |
+
num_episodes_input: int,
|
| 205 |
+
single_task_input: str,
|
| 206 |
+
push_to_hub_input: bool,
|
| 207 |
+
resume_input: bool,
|
| 208 |
+
episode_time_s_input: int,
|
| 209 |
+
reset_time_s_input: int):
|
| 210 |
+
"""Interfaz Gradio para iniciar la grabación del dataset."""
|
| 211 |
+
global current_hf_user
|
| 212 |
+
if not current_hf_user:
|
| 213 |
+
return gr.update(visible=True, value="Error: No se ha iniciado sesión en Hugging Face o no se pudo obtener el usuario. Por favor, inicia sesión primero."), gr.update(interactive=False)
|
| 214 |
+
|
| 215 |
+
gr.Info("Iniciando grabación del dataset. Esto puede tardar unos segundos...")
|
| 216 |
+
success, message = record_dataset_core(
|
| 217 |
+
hf_user=current_hf_user,
|
| 218 |
+
robot_port=robot_port_input,
|
| 219 |
+
teleop_port=teleop_port_input,
|
| 220 |
+
num_episodes=num_episodes_input,
|
| 221 |
+
single_task=single_task_input,
|
| 222 |
+
push_to_hub=push_to_hub_input,
|
| 223 |
+
resume=resume_input,
|
| 224 |
+
episode_time_s=episode_time_s_input,
|
| 225 |
+
reset_time_s=reset_time_s_input
|
| 226 |
+
)
|
| 227 |
+
if success:
|
| 228 |
+
gr.Info("Grabación completada.")
|
| 229 |
+
return gr.update(visible=True, value=message), gr.update(interactive=True)
|
| 230 |
+
else:
|
| 231 |
+
gr.Info("Grabación fallida. Revisa el log para más detalles.")
|
| 232 |
+
return gr.update(visible=True, value=message), gr.update(interactive=True)
|
| 233 |
+
|
| 234 |
+
# Define la interfaz de Gradio
|
| 235 |
+
with gr.Blocks(title="Controlador de Grabación LeRobot") as demo:
|
| 236 |
+
gr.Markdown("# <center>Controlador de Grabación de Datasets LeRobot</center>")
|
| 237 |
+
gr.Markdown("Esta interfaz te ayuda a gestionar la grabación de datasets para LeRobot.")
|
| 238 |
+
|
| 239 |
+
with gr.Tab("1. Configuración de Hugging Face"):
|
| 240 |
+
gr.Markdown("## Configuración de Hugging Face")
|
| 241 |
+
gr.Markdown(
|
| 242 |
+
"Introduce tu **token de Hugging Face con permisos de escritura**. "
|
| 243 |
+
"Puedes generarlo en [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)."
|
| 244 |
+
)
|
| 245 |
+
hf_token_input = gr.Textbox(
|
| 246 |
+
label="Token de Hugging Face",
|
| 247 |
+
type="password",
|
| 248 |
+
placeholder="hf_YOUR_ACTUAL_WRITE_TOKEN_HERE",
|
| 249 |
+
info="El token debe tener permisos de escritura (write)."
|
| 250 |
+
)
|
| 251 |
+
login_btn = gr.Button("Iniciar Sesión / Verificar Token")
|
| 252 |
+
hf_user_output = gr.Textbox(label="Usuario de Hugging Face Actual", interactive=False, placeholder="No autenticado", show_copy_button=True)
|
| 253 |
+
login_status_output = gr.Textbox(label="Estado de Autenticación", interactive=False, visible=False)
|
| 254 |
+
|
| 255 |
+
login_btn.click(
|
| 256 |
+
fn=gradio_login,
|
| 257 |
+
inputs=hf_token_input,
|
| 258 |
+
outputs=[hf_user_output, login_status_output]
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
with gr.Tab("2. Parámetros de Grabación"):
|
| 262 |
+
gr.Markdown("## Parámetros del Robot y la Grabación")
|
| 263 |
+
gr.Markdown(
|
| 264 |
+
"Asegúrate de que los **puertos seriales** de tus robots sean correctos. "
|
| 265 |
+
"Puedes detectarlos automáticamente o introducirlos manualmente."
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# Load initial config from robot_config.json
|
| 269 |
+
initial_follower_port, initial_follower_id, initial_leader_port, initial_leader_id = load_robot_config()
|
| 270 |
+
|
| 271 |
+
with gr.Group(): # Group for auto-detection and manual input
|
| 272 |
+
gr.Markdown("### Puerto del Robot Follower")
|
| 273 |
+
robot_port_dropdown = gr.Dropdown(
|
| 274 |
+
label="Seleccionar Puerto",
|
| 275 |
+
choices=[], # Populated on load/refresh
|
| 276 |
+
value=initial_follower_port, # Pre-fill from config
|
| 277 |
+
interactive=True,
|
| 278 |
+
allow_custom_value=True # Allow manual input
|
| 279 |
+
)
|
| 280 |
+
robot_port_manual = gr.Textbox(
|
| 281 |
+
label="O introducir Puerto Manualmente",
|
| 282 |
+
value=initial_follower_port, # Pre-fill from config
|
| 283 |
+
placeholder="ej. /dev/tty.usbmodemXXXXX",
|
| 284 |
+
interactive=True
|
| 285 |
+
)
|
| 286 |
+
# Synchronize dropdown and manual textbox
|
| 287 |
+
robot_port_dropdown.change(lambda x: x, inputs=robot_port_dropdown, outputs=robot_port_manual)
|
| 288 |
+
robot_port_manual.change(lambda x: x, inputs=robot_port_manual, outputs=robot_port_dropdown)
|
| 289 |
+
|
| 290 |
+
with gr.Group(): # Group for auto-detection and manual input
|
| 291 |
+
gr.Markdown("### Puerto del Teleoperador Leader")
|
| 292 |
+
teleop_port_dropdown = gr.Dropdown(
|
| 293 |
+
label="Seleccionar Puerto",
|
| 294 |
+
choices=[], # Populated on load/refresh
|
| 295 |
+
value=initial_leader_port, # Pre-fill from config
|
| 296 |
+
interactive=True,
|
| 297 |
+
allow_custom_value=True # Allow manual input
|
| 298 |
+
)
|
| 299 |
+
teleop_port_manual = gr.Textbox(
|
| 300 |
+
label="O introducir Puerto Manualmente",
|
| 301 |
+
value=initial_leader_port, # Pre-fill from config
|
| 302 |
+
placeholder="ej. /dev/tty.usbmodemXXXXX",
|
| 303 |
+
interactive=True
|
| 304 |
+
)
|
| 305 |
+
# Synchronize dropdown and manual textbox
|
| 306 |
+
teleop_port_dropdown.change(lambda x: x, inputs=teleop_port_dropdown, outputs=teleop_port_manual)
|
| 307 |
+
teleop_port_manual.change(lambda x: x, inputs=teleop_port_manual, outputs=teleop_port_dropdown)
|
| 308 |
+
|
| 309 |
+
refresh_ports_btn = gr.Button("🔄 Refrescar Puertos Detectados")
|
| 310 |
+
|
| 311 |
+
num_episodes_input = gr.Slider(
|
| 312 |
+
minimum=1, maximum=100, step=1, value=2, label="Número de Episodios a Grabar",
|
| 313 |
+
info="Define cuántos episodios se grabarán."
|
| 314 |
+
)
|
| 315 |
+
single_task_input = gr.Textbox(
|
| 316 |
+
label="Descripción de la Tarea (Single Task)",
|
| 317 |
+
value="Grab the black cube",
|
| 318 |
+
placeholder="Describe la tarea que realizará el robot"
|
| 319 |
+
)
|
| 320 |
+
episode_time_s_input = gr.Slider(
|
| 321 |
+
minimum=10, maximum=300, step=10, value=60, label="Duración del Episodio (segundos)",
|
| 322 |
+
info="Tiempo máximo de grabación para cada episodio."
|
| 323 |
+
)
|
| 324 |
+
reset_time_s_input = gr.Slider(
|
| 325 |
+
minimum=5, maximum=120, step=5, value=60, label="Tiempo de Reseteo (segundos)",
|
| 326 |
+
info="Tiempo para preparar el entorno entre episodios."
|
| 327 |
+
)
|
| 328 |
+
push_to_hub_input = gr.Checkbox(
|
| 329 |
+
label="Subir Dataset a Hugging Face Hub",
|
| 330 |
+
value=True,
|
| 331 |
+
info="Marca esta casilla para subir el dataset a tu perfil de Hugging Face."
|
| 332 |
+
)
|
| 333 |
+
resume_input = gr.Checkbox(
|
| 334 |
+
label="Reanudar Grabación Existente",
|
| 335 |
+
value=False,
|
| 336 |
+
info="Si hay una grabación previa, continúa desde donde se detuvo."
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
# Function to update dropdown choices on button click or page load
|
| 340 |
+
def update_port_dropdowns():
|
| 341 |
+
ports = list_serial_device_paths()
|
| 342 |
+
# The dropdowns allow custom values, so setting choices updates the list for selection
|
| 343 |
+
return gr.update(choices=ports), gr.update(choices=ports)
|
| 344 |
+
|
| 345 |
+
# Attach update function to refresh button and initial load
|
| 346 |
+
refresh_ports_btn.click(
|
| 347 |
+
fn=update_port_dropdowns,
|
| 348 |
+
inputs=None,
|
| 349 |
+
outputs=[robot_port_dropdown, teleop_port_dropdown]
|
| 350 |
+
)
|
| 351 |
+
demo.load(
|
| 352 |
+
fn=update_port_dropdowns,
|
| 353 |
+
inputs=None,
|
| 354 |
+
outputs=[robot_port_dropdown, teleop_port_dropdown]
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
with gr.Tab("3. Iniciar Grabación"):
|
| 358 |
+
gr.Markdown("## Ejecutar Grabación del Dataset")
|
| 359 |
+
gr.Markdown(
|
| 360 |
+
"Haz clic en el botón para iniciar la grabación. "
|
| 361 |
+
"Una nueva ventana de LeRobot se abrirá para la teleoperación y visualización."
|
| 362 |
+
)
|
| 363 |
+
record_btn = gr.Button("🚀 Iniciar Grabación 🚀", variant="primary")
|
| 364 |
+
|
| 365 |
+
# Output para el estado y mensajes de la grabación
|
| 366 |
+
record_status_output = gr.Textbox(
|
| 367 |
+
label="Estado de la Grabación",
|
| 368 |
+
interactive=False,
|
| 369 |
+
visible=False,
|
| 370 |
+
lines=10
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
record_btn.click(
|
| 374 |
+
fn=gradio_record,
|
| 375 |
+
inputs=[
|
| 376 |
+
# Use the current value from either dropdown or textbox (they are synchronized)
|
| 377 |
+
robot_port_manual, # We'll use the textbox value as it's the ultimate source of truth
|
| 378 |
+
teleop_port_manual, # Same for teleop port
|
| 379 |
+
num_episodes_input,
|
| 380 |
+
single_task_input,
|
| 381 |
+
push_to_hub_input,
|
| 382 |
+
resume_input,
|
| 383 |
+
episode_time_s_input,
|
| 384 |
+
reset_time_s_input
|
| 385 |
+
],
|
| 386 |
+
outputs=[record_status_output, record_btn] # Update status output and re-enable button
|
| 387 |
+
)
|
| 388 |
+
gr.Markdown("### Controles de Teclado Durante la Grabación (en la ventana de LeRobot):")
|
| 389 |
+
gr.Markdown(
|
| 390 |
+
"- **Flecha Derecha (→):** Detener el episodio actual y pasar al siguiente.\n"
|
| 391 |
+
"- **Flecha Izquierda (←):** Cancelar el episodio actual y volver a grabar.\n"
|
| 392 |
+
"- **Escape (ESC):** Detener la sesión, codificar videos y subir el dataset (si está activado)."
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
gr.Markdown("---")
|
| 396 |
+
gr.Markdown("Hecho con ❤️ para RobotCleanPupusas503")
|
| 397 |
+
|
| 398 |
+
# Lanza la interfaz de Gradio
|
| 399 |
+
if __name__ == "__main__":
|
| 400 |
+
try:
|
| 401 |
+
import serial # Test if pyserial is installed
|
| 402 |
+
except ImportError:
|
| 403 |
+
print("Error: La librería 'pyserial' no está instalada.")
|
| 404 |
+
print("Por favor, instala 'pyserial' ejecutando: pip install pyserial")
|
| 405 |
+
exit(1)
|
| 406 |
+
|
| 407 |
+
demo.launch(share=False)
|
backend/train_controller.py
CHANGED
|
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# backend/train_robot.py
|
| 2 |
+
import subprocess
|
| 3 |
+
import os
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import json
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
# --- Helper Functions (reused from record_controller for consistency) ---
|
| 9 |
+
|
| 10 |
+
def run_command(command: str, description: str):
|
| 11 |
+
"""
|
| 12 |
+
Ejecuta un comando de shell y captura su salida, manejando errores.
|
| 13 |
+
"""
|
| 14 |
+
print(f"\n--- {description} ---")
|
| 15 |
+
process_output = []
|
| 16 |
+
try:
|
| 17 |
+
# Use Popen to stream output in real-time
|
| 18 |
+
process = subprocess.Popen(
|
| 19 |
+
command,
|
| 20 |
+
shell=True,
|
| 21 |
+
stdout=subprocess.PIPE,
|
| 22 |
+
stderr=subprocess.STDOUT, # Redirect stderr to stdout
|
| 23 |
+
text=True
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
for line in iter(process.stdout.readline, ''):
|
| 27 |
+
print(line, end='') # Print to console
|
| 28 |
+
process_output.append(line)
|
| 29 |
+
# You might want to yield here for Gradio's gr.Progress,
|
| 30 |
+
# but for simplicity, we'll collect all output and return at the end.
|
| 31 |
+
|
| 32 |
+
process.wait() # Wait for the process to complete
|
| 33 |
+
|
| 34 |
+
if process.returncode == 0:
|
| 35 |
+
print(f"Éxito: {description}")
|
| 36 |
+
return True, "".join(process_output)
|
| 37 |
+
else:
|
| 38 |
+
error_message = f"Error durante '{description}': El comando devolvió el código de salida {process.returncode}"
|
| 39 |
+
print(error_message)
|
| 40 |
+
return False, "".join(process_output) # Return all collected output, including errors
|
| 41 |
+
except Exception as e:
|
| 42 |
+
error_message = f"Ocurrió un error inesperado durante '{description}': {e}"
|
| 43 |
+
print(error_message)
|
| 44 |
+
return False, error_message
|
| 45 |
+
|
| 46 |
+
def login_to_huggingface(token: str):
|
| 47 |
+
"""
|
| 48 |
+
Inicia sesión en Hugging Face CLI con el token proporcionado.
|
| 49 |
+
"""
|
| 50 |
+
if not token or token == "hf_YOUR_ACTUAL_WRITE_TOKEN_HERE":
|
| 51 |
+
return False, "Error: Por favor, proporciona un token de Hugging Face válido."
|
| 52 |
+
|
| 53 |
+
success, output = run_command(
|
| 54 |
+
f"huggingface-cli login --token {token} --add-to-git-credential",
|
| 55 |
+
"Iniciando sesión en Hugging Face CLI"
|
| 56 |
+
)
|
| 57 |
+
if success:
|
| 58 |
+
return True, "¡Inicio de sesión en Hugging Face exitoso!"
|
| 59 |
+
else:
|
| 60 |
+
return False, output
|
| 61 |
+
|
| 62 |
+
def get_huggingface_user():
|
| 63 |
+
"""
|
| 64 |
+
Obtiene el nombre de usuario de Hugging Face.
|
| 65 |
+
"""
|
| 66 |
+
success, output = run_command(
|
| 67 |
+
"huggingface-cli whoami | head -n 1",
|
| 68 |
+
"Obteniendo nombre de usuario de Hugging Face"
|
| 69 |
+
)
|
| 70 |
+
if success:
|
| 71 |
+
# The output might contain warnings before the actual username.
|
| 72 |
+
# Try to find the username, which should be the first non-empty, non-warning line.
|
| 73 |
+
lines = output.splitlines()
|
| 74 |
+
for line in lines:
|
| 75 |
+
if line.strip() and not (line.strip().startswith("warnings.") or "deprecated" in line.lower()):
|
| 76 |
+
return True, line.strip()
|
| 77 |
+
return False, "No se pudo extraer el nombre de usuario de la salida de 'whoami'."
|
| 78 |
+
else:
|
| 79 |
+
return False, output
|
| 80 |
+
|
| 81 |
+
# --- Core Training and Upload Logic ---
|
| 82 |
+
|
| 83 |
+
def train_policy_core(hf_user: str,
|
| 84 |
+
dataset_repo_id: str,
|
| 85 |
+
policy_type: str,
|
| 86 |
+
output_dir: str,
|
| 87 |
+
job_name: str,
|
| 88 |
+
policy_device: str,
|
| 89 |
+
wandb_enable: bool,
|
| 90 |
+
resume: bool,
|
| 91 |
+
resume_config_path: str):
|
| 92 |
+
"""
|
| 93 |
+
Entrena una política de robot usando el script `lerobot/scripts/train.py`.
|
| 94 |
+
"""
|
| 95 |
+
if not hf_user:
|
| 96 |
+
return False, "Error: Nombre de usuario de Hugging Face no disponible. Por favor, inicia sesión primero."
|
| 97 |
+
if not dataset_repo_id.startswith(f"{hf_user}/"):
|
| 98 |
+
dataset_repo_id = f"{hf_user}/{dataset_repo_id.split('/')[-1]}" # Ensure correct repo_id format
|
| 99 |
+
|
| 100 |
+
print(f"\nPreparando para entrenar la política '{policy_type}' con el dataset '{dataset_repo_id}'...")
|
| 101 |
+
|
| 102 |
+
command = [
|
| 103 |
+
"python", "-m", "lerobot.scripts.train" # Changed to -m lerobot.scripts.train
|
| 104 |
+
]
|
| 105 |
+
|
| 106 |
+
if resume and resume_config_path:
|
| 107 |
+
command.extend([
|
| 108 |
+
f"--config_path={resume_config_path}",
|
| 109 |
+
"--resume=true"
|
| 110 |
+
])
|
| 111 |
+
else:
|
| 112 |
+
command.extend([
|
| 113 |
+
f"--dataset.repo_id={dataset_repo_id}",
|
| 114 |
+
f"--policy.type={policy_type}",
|
| 115 |
+
f"--output_dir={output_dir}",
|
| 116 |
+
f"--job_name={job_name}",
|
| 117 |
+
f"--policy.device={policy_device}"
|
| 118 |
+
])
|
| 119 |
+
if wandb_enable:
|
| 120 |
+
command.append("--wandb.enable=true")
|
| 121 |
+
|
| 122 |
+
full_command = " ".join(command)
|
| 123 |
+
|
| 124 |
+
success, output = run_command(full_command, "Entrenamiento de la Política")
|
| 125 |
+
if success:
|
| 126 |
+
final_message = f"¡Entrenamiento de la política completado exitosamente!\n"
|
| 127 |
+
final_message += f"Los checkpoints se guardaron en: {output_dir}/checkpoints\n"
|
| 128 |
+
if wandb_enable:
|
| 129 |
+
final_message += "Revisa Weights & Biases para los gráficos de entrenamiento.\n"
|
| 130 |
+
return True, final_message + "\n" + output # Add full output for visibility
|
| 131 |
+
else:
|
| 132 |
+
return False, f"Error durante el entrenamiento: {output}"
|
| 133 |
+
|
| 134 |
+
def upload_policy_core(hf_user: str, policy_repo_name: str, checkpoint_path: str, is_intermediate: bool = False):
|
| 135 |
+
"""
|
| 136 |
+
Sube un checkpoint de política a Hugging Face Hub.
|
| 137 |
+
"""
|
| 138 |
+
if not hf_user:
|
| 139 |
+
return False, "Error: Nombre de usuario de Hugging Face no disponible. Por favor, inicia sesión primero."
|
| 140 |
+
if not policy_repo_name:
|
| 141 |
+
return False, "Error: El nombre del repositorio de la política no puede estar vacío."
|
| 142 |
+
if not checkpoint_path:
|
| 143 |
+
return False, "Error: La ruta al checkpoint no puede estar vacía."
|
| 144 |
+
if not os.path.exists(checkpoint_path):
|
| 145 |
+
return False, f"Error: La ruta del checkpoint '{checkpoint_path}' no existe."
|
| 146 |
+
|
| 147 |
+
full_repo_id = f"{hf_user}/{policy_repo_name}"
|
| 148 |
+
|
| 149 |
+
# Hugging Face CLI upload expects the local path to be the second argument
|
| 150 |
+
command = [
|
| 151 |
+
"huggingface-cli", "upload",
|
| 152 |
+
full_repo_id,
|
| 153 |
+
checkpoint_path,
|
| 154 |
+
"--repo-type=model" # Specify repo type as model for policies
|
| 155 |
+
]
|
| 156 |
+
|
| 157 |
+
if is_intermediate:
|
| 158 |
+
# For intermediate, we might want to append CKPT to the repo name or use a specific folder within the repo
|
| 159 |
+
# The provided doc uses policy_nameCKPT. Let's adapt to that if the user provides just base name
|
| 160 |
+
# However, huggingface-cli upload expects a repo_id, which is HF_USER/REPO_NAME
|
| 161 |
+
# The common practice is to upload to the same repo but into a different subfolder.
|
| 162 |
+
# For simplicity, we'll stick to uploading the specified path to the given repo_id.
|
| 163 |
+
pass # The logic for is_intermediate might depend on how the user names their repos/checkpoints
|
| 164 |
+
|
| 165 |
+
full_command = " ".join(command)
|
| 166 |
+
|
| 167 |
+
success, output = run_command(full_command, f"Subiendo política a {full_repo_id}")
|
| 168 |
+
if success:
|
| 169 |
+
return True, f"¡Política subida exitosamente a https://huggingface.co/{full_repo_id}!"
|
| 170 |
+
else:
|
| 171 |
+
return False, f"Error al subir política: {output}"
|
| 172 |
+
|
| 173 |
+
def evaluate_policy_core(hf_user: str,
|
| 174 |
+
robot_type: str,
|
| 175 |
+
robot_port: str,
|
| 176 |
+
robot_cameras: str, # Raw string for cameras
|
| 177 |
+
robot_id: str,
|
| 178 |
+
display_data: bool,
|
| 179 |
+
dataset_repo_id_eval: str,
|
| 180 |
+
single_task: str,
|
| 181 |
+
policy_path: str,
|
| 182 |
+
teleop_enable: bool = False, # Optional teleop for evaluation
|
| 183 |
+
teleop_type: str = "",
|
| 184 |
+
teleop_port: str = "",
|
| 185 |
+
teleop_id: str = ""):
|
| 186 |
+
"""
|
| 187 |
+
Evalúa una política utilizando el script `lerobot.record` modificado.
|
| 188 |
+
"""
|
| 189 |
+
if not hf_user:
|
| 190 |
+
return False, "Error: Nombre de usuario de Hugging Face no disponible. Por favor, inicia sesión primero."
|
| 191 |
+
if not policy_path:
|
| 192 |
+
return False, "Error: La ruta a la política para evaluar no puede estar vacía."
|
| 193 |
+
|
| 194 |
+
# Ensure eval dataset repo ID starts with user
|
| 195 |
+
if not dataset_repo_id_eval.startswith(f"{hf_user}/"):
|
| 196 |
+
dataset_repo_id_eval = f"{hf_user}/{dataset_repo_id_eval.split('/')[-1]}"
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
print(f"\nPreparando para evaluar la política '{policy_path}'...")
|
| 200 |
+
|
| 201 |
+
command = [
|
| 202 |
+
"python", "-m", "lerobot.record",
|
| 203 |
+
f"--robot.type={robot_type}",
|
| 204 |
+
f"--robot.port={robot_port}",
|
| 205 |
+
f"--robot.cameras=\"{robot_cameras}\"", # Use the raw string provided by user
|
| 206 |
+
f"--robot.id={robot_id}",
|
| 207 |
+
f"--display_data={str(display_data).lower()}",
|
| 208 |
+
f"--dataset.repo_id={dataset_repo_id_eval}",
|
| 209 |
+
f"--dataset.single_task=\"{single_task}\"",
|
| 210 |
+
f"--policy.path={policy_path}"
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
if teleop_enable:
|
| 214 |
+
command.extend([
|
| 215 |
+
f"--teleop.type={teleop_type}",
|
| 216 |
+
f"--teleop.port={teleop_port}",
|
| 217 |
+
f"--teleop.id={teleop_id}"
|
| 218 |
+
])
|
| 219 |
+
|
| 220 |
+
full_command = " ".join(command)
|
| 221 |
+
|
| 222 |
+
success, output = run_command(full_command, "Evaluación de la Política")
|
| 223 |
+
if success:
|
| 224 |
+
final_message = f"¡Evaluación de la política completada exitosamente!\n"
|
| 225 |
+
final_message += f"Los datos de evaluación se guardaron en: ~/.cache/huggingface/lerobot/{dataset_repo_id_eval}\n"
|
| 226 |
+
return True, final_message + "\n" + output
|
| 227 |
+
else:
|
| 228 |
+
return False, f"Error durante la evaluación: {output}"
|
| 229 |
+
|
| 230 |
+
# --- Gradio Interface Logic ---
|
| 231 |
+
|
| 232 |
+
# Variable global para almacenar el usuario de Hugging Face
|
| 233 |
+
current_hf_user = None
|
| 234 |
+
|
| 235 |
+
def gradio_login(hf_token_input: str):
|
| 236 |
+
"""Interfaz Gradio para iniciar sesión en Hugging Face."""
|
| 237 |
+
global current_hf_user
|
| 238 |
+
success, message = login_to_huggingface(hf_token_input)
|
| 239 |
+
if success:
|
| 240 |
+
gr.Info(message)
|
| 241 |
+
success_user, user_name = get_huggingface_user()
|
| 242 |
+
if success_user:
|
| 243 |
+
current_hf_user = user_name
|
| 244 |
+
return gr.update(value=user_name, interactive=False), gr.update(visible=True, value=message)
|
| 245 |
+
else:
|
| 246 |
+
return gr.update(value="", interactive=True), gr.update(visible=True, value=user_name)
|
| 247 |
+
else:
|
| 248 |
+
current_hf_user = None
|
| 249 |
+
return gr.update(value="", interactive=True), gr.update(visible=True, value=message)
|
| 250 |
+
|
| 251 |
+
def gradio_train(dataset_repo_id_input: str,
|
| 252 |
+
policy_type_input: str,
|
| 253 |
+
output_dir_input: str,
|
| 254 |
+
job_name_input: str,
|
| 255 |
+
policy_device_input: str,
|
| 256 |
+
wandb_enable_input: bool,
|
| 257 |
+
resume_input: bool,
|
| 258 |
+
resume_config_path_input: str):
|
| 259 |
+
"""Interfaz Gradio para iniciar el entrenamiento."""
|
| 260 |
+
global current_hf_user
|
| 261 |
+
if not current_hf_user:
|
| 262 |
+
return gr.update(visible=True, value="Error: No se ha iniciado sesión en Hugging Face o no se pudo obtener el usuario. Por favor, inicia sesión primero.")
|
| 263 |
+
|
| 264 |
+
gr.Info("Iniciando entrenamiento del modelo. Esto puede tardar mucho tiempo...")
|
| 265 |
+
success, message = train_policy_core(
|
| 266 |
+
hf_user=current_hf_user,
|
| 267 |
+
dataset_repo_id=dataset_repo_id_input,
|
| 268 |
+
policy_type=policy_type_input,
|
| 269 |
+
output_dir=output_dir_input,
|
| 270 |
+
job_name=job_name_input,
|
| 271 |
+
policy_device=policy_device_input,
|
| 272 |
+
wandb_enable=wandb_enable_input,
|
| 273 |
+
resume=resume_input,
|
| 274 |
+
resume_config_path=resume_config_path_input
|
| 275 |
+
)
|
| 276 |
+
if success:
|
| 277 |
+
gr.Info("Entrenamiento completado. Revisa la salida para los detalles.")
|
| 278 |
+
else:
|
| 279 |
+
gr.Info("Entrenamiento fallido. Revisa la salida para los errores.")
|
| 280 |
+
return gr.update(visible=True, value=message)
|
| 281 |
+
|
| 282 |
+
def gradio_upload(policy_repo_name_input: str, checkpoint_path_input: str):
|
| 283 |
+
"""Interfaz Gradio para subir un checkpoint."""
|
| 284 |
+
global current_hf_user
|
| 285 |
+
if not current_hf_user:
|
| 286 |
+
return gr.update(visible=True, value="Error: No se ha iniciado sesión en Hugging Face. Por favor, inicia sesión primero.")
|
| 287 |
+
|
| 288 |
+
gr.Info(f"Subiendo checkpoint '{checkpoint_path_input}' a '{policy_repo_name_input}'...")
|
| 289 |
+
success, message = upload_policy_core(
|
| 290 |
+
hf_user=current_hf_user,
|
| 291 |
+
policy_repo_name=policy_repo_name_input,
|
| 292 |
+
checkpoint_path=checkpoint_path_input
|
| 293 |
+
)
|
| 294 |
+
if success:
|
| 295 |
+
gr.Info("Subida completada.")
|
| 296 |
+
else:
|
| 297 |
+
gr.Info("Subida fallida. Revisa la salida.")
|
| 298 |
+
return gr.update(visible=True, value=message)
|
| 299 |
+
|
| 300 |
+
def gradio_evaluate(robot_type_input: str,
|
| 301 |
+
robot_port_input: str,
|
| 302 |
+
robot_cameras_input: str,
|
| 303 |
+
robot_id_input: str,
|
| 304 |
+
display_data_input: bool,
|
| 305 |
+
dataset_repo_id_eval_input: str,
|
| 306 |
+
single_task_eval_input: str,
|
| 307 |
+
policy_path_input: str,
|
| 308 |
+
teleop_enable_input: bool,
|
| 309 |
+
teleop_type_input: str,
|
| 310 |
+
teleop_port_input: str,
|
| 311 |
+
teleop_id_input: str):
|
| 312 |
+
"""Interfaz Gradio para evaluar una política."""
|
| 313 |
+
global current_hf_user
|
| 314 |
+
if not current_hf_user:
|
| 315 |
+
return gr.update(visible=True, value="Error: No se ha iniciado sesión en Hugging Face. Por favor, inicia sesión primero.")
|
| 316 |
+
|
| 317 |
+
gr.Info("Iniciando evaluación de la política...")
|
| 318 |
+
success, message = evaluate_policy_core(
|
| 319 |
+
hf_user=current_hf_user,
|
| 320 |
+
robot_type=robot_type_input,
|
| 321 |
+
robot_port=robot_port_input,
|
| 322 |
+
robot_cameras=robot_cameras_input,
|
| 323 |
+
robot_id=robot_id_input,
|
| 324 |
+
display_data=display_data_input,
|
| 325 |
+
dataset_repo_id_eval=dataset_repo_id_eval_input,
|
| 326 |
+
single_task=single_task_eval_input,
|
| 327 |
+
policy_path=policy_path_input,
|
| 328 |
+
teleop_enable=teleop_enable_input,
|
| 329 |
+
teleop_type=teleop_type_input,
|
| 330 |
+
teleop_port=teleop_port_input,
|
| 331 |
+
teleop_id=teleop_id_input
|
| 332 |
+
)
|
| 333 |
+
if success:
|
| 334 |
+
gr.Info("Evaluación completada.")
|
| 335 |
+
else:
|
| 336 |
+
gr.Info("Evaluación fallida. Revisa la salida.")
|
| 337 |
+
return gr.update(visible=True, value=message)
|
| 338 |
+
|
| 339 |
+
# --- Gradio Interface Definition ---
|
| 340 |
+
|
| 341 |
+
with gr.Blocks(title="Controlador de Entrenamiento y Evaluación LeRobot") as demo:
|
| 342 |
+
gr.Markdown("# <center>Controlador de Entrenamiento y Evaluación de Políticas LeRobot</center>")
|
| 343 |
+
gr.Markdown("Esta interfaz te permite entrenar, subir y evaluar políticas de robot con LeRobot.")
|
| 344 |
+
|
| 345 |
+
with gr.Tab("1. Configuración de Hugging Face"):
|
| 346 |
+
gr.Markdown("## Configuración de Hugging Face")
|
| 347 |
+
gr.Markdown(
|
| 348 |
+
"Introduce tu **token de Hugging Face con permisos de escritura**. "
|
| 349 |
+
"Puedes generarlo en [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)."
|
| 350 |
+
)
|
| 351 |
+
hf_token_input = gr.Textbox(
|
| 352 |
+
label="Token de Hugging Face",
|
| 353 |
+
type="password",
|
| 354 |
+
placeholder="hf_YOUR_ACTUAL_WRITE_TOKEN_HERE",
|
| 355 |
+
info="El token debe tener permisos de escritura (write)."
|
| 356 |
+
)
|
| 357 |
+
login_btn = gr.Button("Iniciar Sesión / Verificar Token")
|
| 358 |
+
hf_user_output = gr.Textbox(label="Usuario de Hugging Face Actual", interactive=False, placeholder="No autenticado", show_copy_button=True)
|
| 359 |
+
login_status_output = gr.Textbox(label="Estado de Autenticación", interactive=False, visible=False, lines=3)
|
| 360 |
+
|
| 361 |
+
login_btn.click(
|
| 362 |
+
fn=gradio_login,
|
| 363 |
+
inputs=hf_token_input,
|
| 364 |
+
outputs=[hf_user_output, login_status_output]
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
with gr.Tab("2. Entrenamiento de la Política"):
|
| 368 |
+
gr.Markdown("## Entrenar una Política")
|
| 369 |
+
gr.Markdown(
|
| 370 |
+
"Configura los parámetros para entrenar tu política. Asegúrate de tener el dataset listo en Hugging Face Hub."
|
| 371 |
+
)
|
| 372 |
+
with gr.Row():
|
| 373 |
+
dataset_repo_id_input = gr.Textbox(
|
| 374 |
+
label="ID del Repositorio del Dataset (ej. YOUR_USER/so101_test)",
|
| 375 |
+
value="YOUR_USER/so101_test", # Placeholder, will be updated by HF user
|
| 376 |
+
placeholder="Dataset para entrenar",
|
| 377 |
+
info="Asegúrate de que este dataset ya haya sido subido con el script de grabación."
|
| 378 |
+
)
|
| 379 |
+
policy_type_input = gr.Dropdown(
|
| 380 |
+
label="Tipo de Política",
|
| 381 |
+
choices=["act", "diffusion", "rlds"], # Add more types as needed from LeRobot
|
| 382 |
+
value="act",
|
| 383 |
+
info="Tipo de arquitectura de política a entrenar (e.g., ACT)."
|
| 384 |
+
)
|
| 385 |
+
with gr.Row():
|
| 386 |
+
output_dir_input = gr.Textbox(
|
| 387 |
+
label="Directorio de Salida para Checkpoints",
|
| 388 |
+
value="outputs/train/act_so101_test",
|
| 389 |
+
placeholder="Directorio donde se guardarán los resultados del entrenamiento."
|
| 390 |
+
)
|
| 391 |
+
job_name_input = gr.Textbox(
|
| 392 |
+
label="Nombre del Trabajo (Job Name)",
|
| 393 |
+
value="act_so101_test",
|
| 394 |
+
placeholder="Nombre para identificar tu sesión de entrenamiento."
|
| 395 |
+
)
|
| 396 |
+
with gr.Row():
|
| 397 |
+
policy_device_input = gr.Dropdown(
|
| 398 |
+
label="Dispositivo de Entrenamiento",
|
| 399 |
+
choices=["cuda", "mps", "cpu"],
|
| 400 |
+
value="cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu"), # Auto-detect simple device
|
| 401 |
+
info="Dispositivo a usar para el entrenamiento (GPU Nvidia: cuda, Apple Silicon: mps, CPU: cpu)."
|
| 402 |
+
)
|
| 403 |
+
wandb_enable_input = gr.Checkbox(
|
| 404 |
+
label="Habilitar Weights & Biases",
|
| 405 |
+
value=True,
|
| 406 |
+
info="Habilita el seguimiento de métricas con Weights & Biases (asegúrate de haber ejecutado 'wandb login')."
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
gr.Markdown("### Opciones de Reanudación")
|
| 410 |
+
resume_input = gr.Checkbox(
|
| 411 |
+
label="Reanudar Entrenamiento Existente",
|
| 412 |
+
value=False,
|
| 413 |
+
info="Marca esta casilla para continuar un entrenamiento desde un checkpoint."
|
| 414 |
+
)
|
| 415 |
+
resume_config_path_input = gr.Textbox(
|
| 416 |
+
label="Ruta al train_config.json para Reanudar (ej. outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json)",
|
| 417 |
+
placeholder="Ruta al archivo train_config.json del checkpoint a reanudar.",
|
| 418 |
+
visible=False # Hidden by default, shown when resume is checked
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
resume_input.change(
|
| 422 |
+
lambda x: gr.update(visible=x),
|
| 423 |
+
inputs=resume_input,
|
| 424 |
+
outputs=resume_config_path_input
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
train_btn = gr.Button("🚀 Iniciar Entrenamiento 🚀", variant="primary")
|
| 428 |
+
train_status_output = gr.Textbox(
|
| 429 |
+
label="Log de Entrenamiento",
|
| 430 |
+
interactive=False,
|
| 431 |
+
visible=False,
|
| 432 |
+
lines=20,
|
| 433 |
+
autoscroll=True
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
train_btn.click(
|
| 437 |
+
fn=gradio_train,
|
| 438 |
+
inputs=[
|
| 439 |
+
dataset_repo_id_input,
|
| 440 |
+
policy_type_input,
|
| 441 |
+
output_dir_input,
|
| 442 |
+
job_name_input,
|
| 443 |
+
policy_device_input,
|
| 444 |
+
wandb_enable_input,
|
| 445 |
+
resume_input,
|
| 446 |
+
resume_config_path_input
|
| 447 |
+
],
|
| 448 |
+
outputs=train_status_output
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
with gr.Tab("3. Subir Checkpoint de Política"):
|
| 452 |
+
gr.Markdown("## Subir Checkpoint de Política al Hub")
|
| 453 |
+
gr.Markdown(
|
| 454 |
+
"Sube tus modelos entrenados a Hugging Face Hub para compartirlos o usarlos en evaluación."
|
| 455 |
+
)
|
| 456 |
+
policy_repo_name_input = gr.Textbox(
|
| 457 |
+
label="Nombre del Repositorio de la Política (ej. act_so101_test)",
|
| 458 |
+
value="act_so101_test",
|
| 459 |
+
placeholder="Nombre del repositorio en Hugging Face Hub para tu política."
|
| 460 |
+
)
|
| 461 |
+
checkpoint_path_input = gr.Textbox(
|
| 462 |
+
label="Ruta Local al Directorio del Checkpoint (ej. outputs/train/act_so101_test/checkpoints/last/pretrained_model)",
|
| 463 |
+
placeholder="Ruta completa al directorio 'pretrained_model' del checkpoint."
|
| 464 |
+
)
|
| 465 |
+
upload_btn = gr.Button("⬆️ Subir Política ⬆️", variant="secondary")
|
| 466 |
+
upload_status_output = gr.Textbox(
|
| 467 |
+
label="Log de Subida",
|
| 468 |
+
interactive=False,
|
| 469 |
+
visible=False,
|
| 470 |
+
lines=5
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
upload_btn.click(
|
| 474 |
+
fn=gradio_upload,
|
| 475 |
+
inputs=[policy_repo_name_input, checkpoint_path_input],
|
| 476 |
+
outputs=upload_status_output
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
with gr.Tab("4. Evaluar Política"):
|
| 480 |
+
gr.Markdown("## Evaluar una Política Entrenada")
|
| 481 |
+
gr.Markdown(
|
| 482 |
+
"Usa esta sección para probar tu política entrenada con el robot real. "
|
| 483 |
+
"La teleoperación es opcional durante la evaluación."
|
| 484 |
+
)
|
| 485 |
+
with gr.Row():
|
| 486 |
+
robot_type_eval_input = gr.Textbox(label="Tipo de Robot (e.g., so100_follower)", value="so100_follower")
|
| 487 |
+
robot_port_eval_input = gr.Textbox(label="Puerto del Robot (e.g., /dev/ttyACM1)", value="/dev/ttyACM1")
|
| 488 |
+
|
| 489 |
+
robot_cameras_eval_input = gr.Textbox(
|
| 490 |
+
label="Configuración de Cámaras (JSON string)",
|
| 491 |
+
value='{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}}',
|
| 492 |
+
info="Define tus cámaras como un string JSON. Asegúrate de escapar las comillas internas si es necesario."
|
| 493 |
+
)
|
| 494 |
+
robot_id_eval_input = gr.Textbox(label="ID del Robot", value="my_awesome_follower_arm")
|
| 495 |
+
display_data_eval_input = gr.Checkbox(label="Mostrar Datos (Display Data)", value=False)
|
| 496 |
+
dataset_repo_id_eval_input = gr.Textbox(
|
| 497 |
+
label="ID del Repositorio del Dataset de Evaluación (ej. YOUR_USER/eval_so100)",
|
| 498 |
+
value="YOUR_USER/eval_so100",
|
| 499 |
+
info="El nombre del dataset para guardar los resultados de la evaluación (suele empezar con 'eval_')."
|
| 500 |
+
)
|
| 501 |
+
single_task_eval_input = gr.Textbox(
|
| 502 |
+
label="Descripción de la Tarea (Single Task)",
|
| 503 |
+
value="Put lego brick into the transparent box"
|
| 504 |
+
)
|
| 505 |
+
policy_path_input = gr.Textbox(
|
| 506 |
+
label="Ruta de la Política a Evaluar (local o Hugging Face Hub ID)",
|
| 507 |
+
placeholder="ej. outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model O YOUR_USER/my_policy",
|
| 508 |
+
info="Puede ser una ruta local al checkpoint o el ID de un repositorio de modelo en Hugging Face Hub."
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
gr.Markdown("### Teleoperación (Opcional durante la Evaluación)")
|
| 512 |
+
teleop_enable_eval_input = gr.Checkbox(label="Habilitar Teleoperación Durante Evaluación", value=False)
|
| 513 |
+
with gr.Row(visible=False) as teleop_options_row: # Hidden by default
|
| 514 |
+
teleop_type_eval_input = gr.Textbox(label="Tipo de Teleop (e.g., so100_leader)", value="so100_leader")
|
| 515 |
+
teleop_port_eval_input = gr.Textbox(label="Puerto de Teleop (e.g., /dev/ttyACM0)", value="/dev/ttyACM0")
|
| 516 |
+
teleop_id_eval_input = gr.Textbox(label="ID de Teleop", value="my_awesome_leader_arm")
|
| 517 |
+
|
| 518 |
+
teleop_enable_eval_input.change(
|
| 519 |
+
lambda x: gr.update(visible=x),
|
| 520 |
+
inputs=teleop_enable_eval_input,
|
| 521 |
+
outputs=teleop_options_row
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
evaluate_btn = gr.Button("📊 Iniciar Evaluación 📊", variant="primary")
|
| 525 |
+
evaluate_status_output = gr.Textbox(
|
| 526 |
+
label="Log de Evaluación",
|
| 527 |
+
interactive=False,
|
| 528 |
+
visible=False,
|
| 529 |
+
lines=15,
|
| 530 |
+
autoscroll=True
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
evaluate_btn.click(
|
| 534 |
+
fn=gradio_evaluate,
|
| 535 |
+
inputs=[
|
| 536 |
+
robot_type_eval_input,
|
| 537 |
+
robot_port_eval_input,
|
| 538 |
+
robot_cameras_eval_input,
|
| 539 |
+
robot_id_eval_input,
|
| 540 |
+
display_data_eval_input,
|
| 541 |
+
dataset_repo_id_eval_input,
|
| 542 |
+
single_task_eval_input,
|
| 543 |
+
policy_path_input,
|
| 544 |
+
teleop_enable_eval_input,
|
| 545 |
+
teleop_type_eval_input,
|
| 546 |
+
teleop_port_eval_input,
|
| 547 |
+
teleop_id_eval_input
|
| 548 |
+
],
|
| 549 |
+
outputs=evaluate_status_output
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
gr.Markdown("---")
|
| 553 |
+
gr.Markdown("Hecho con ❤️ para RobotCleanPupusas503")
|
| 554 |
+
|
| 555 |
+
# Auto-detect CUDA/MPS availability for default device selection (requires torch)
|
| 556 |
+
try:
|
| 557 |
+
import torch
|
| 558 |
+
except ImportError:
|
| 559 |
+
print("Advertencia: PyTorch no está instalado. No se podrá auto-detectar 'cuda' o 'mps'.")
|
| 560 |
+
torch = None
|
| 561 |
+
|
| 562 |
+
if __name__ == "__main__":
|
| 563 |
+
demo.launch(share=False)
|
requirements.txt → requeriments.txt
RENAMED
|
@@ -1,50 +1,114 @@
|
|
|
|
|
| 1 |
aiofiles==24.1.0
|
| 2 |
annotated-types==0.7.0
|
|
|
|
| 3 |
anyio==4.9.0
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
certifi==2025.6.15
|
|
|
|
| 6 |
charset-normalizer==3.4.2
|
| 7 |
click==8.2.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
fastapi==0.115.12
|
|
|
|
| 9 |
ffmpy==0.6.0
|
| 10 |
filelock==3.18.0
|
| 11 |
fsspec==2025.5.1
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
gradio==5.34.0
|
| 13 |
gradio_client==1.10.3
|
| 14 |
groovy==0.1.2
|
|
|
|
|
|
|
| 15 |
h11==0.16.0
|
|
|
|
| 16 |
hf-xet==1.1.3
|
| 17 |
httpcore==1.0.9
|
| 18 |
httpx==0.28.1
|
| 19 |
huggingface-hub==0.33.0
|
|
|
|
| 20 |
idna==3.10
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
Jinja2==3.1.6
|
|
|
|
|
|
|
|
|
|
| 22 |
markdown-it-py==3.0.0
|
| 23 |
MarkupSafe==3.0.2
|
| 24 |
mdurl==0.1.2
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
orjson==3.10.18
|
| 27 |
packaging==25.0
|
| 28 |
pandas==2.3.0
|
| 29 |
pillow==11.2.1
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
pydantic==2.11.7
|
| 31 |
pydantic_core==2.33.2
|
| 32 |
pydub==0.25.1
|
|
|
|
| 33 |
Pygments==2.19.1
|
|
|
|
|
|
|
|
|
|
| 34 |
python-dateutil==2.9.0.post0
|
| 35 |
python-multipart==0.0.20
|
| 36 |
pytz==2025.2
|
| 37 |
PyYAML==6.0.2
|
|
|
|
| 38 |
requests==2.32.4
|
| 39 |
rich==14.0.0
|
| 40 |
ruff==0.11.13
|
| 41 |
safehttpx==0.1.6
|
|
|
|
|
|
|
|
|
|
| 42 |
semantic-version==2.10.0
|
|
|
|
|
|
|
|
|
|
| 43 |
shellingham==1.5.4
|
| 44 |
six==1.17.0
|
|
|
|
| 45 |
sniffio==1.3.1
|
|
|
|
| 46 |
starlette==0.46.2
|
|
|
|
|
|
|
|
|
|
| 47 |
tomlkit==0.13.3
|
|
|
|
|
|
|
| 48 |
tqdm==4.67.1
|
| 49 |
typer==0.16.0
|
| 50 |
typing-inspection==0.4.1
|
|
@@ -52,4 +116,8 @@ typing_extensions==4.14.0
|
|
| 52 |
tzdata==2025.2
|
| 53 |
urllib3==2.4.0
|
| 54 |
uvicorn==0.34.3
|
|
|
|
| 55 |
websockets==15.0.1
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==2.3.0
|
| 2 |
aiofiles==24.1.0
|
| 3 |
annotated-types==0.7.0
|
| 4 |
+
antlr4-python3-runtime==4.9.3
|
| 5 |
anyio==4.9.0
|
| 6 |
+
appdirs==1.4.4
|
| 7 |
+
asciitree==0.3.3
|
| 8 |
+
attrs==25.3.0
|
| 9 |
+
av==14.4.0
|
| 10 |
+
beautifulsoup4==4.13.4
|
| 11 |
certifi==2025.6.15
|
| 12 |
+
cffi==1.17.1
|
| 13 |
charset-normalizer==3.4.2
|
| 14 |
click==8.2.1
|
| 15 |
+
cloudpickle==3.1.1
|
| 16 |
+
Cython==3.1.2
|
| 17 |
+
decorator==4.4.2
|
| 18 |
+
diffusers==0.26.3
|
| 19 |
+
dm-env==1.6
|
| 20 |
+
dm-tree==0.1.9
|
| 21 |
+
docker-pycreds==0.4.0
|
| 22 |
+
einops==0.7.0
|
| 23 |
+
etils==1.12.2
|
| 24 |
+
exceptiongroup==1.3.0
|
| 25 |
fastapi==0.115.12
|
| 26 |
+
fasteners==0.19
|
| 27 |
ffmpy==0.6.0
|
| 28 |
filelock==3.18.0
|
| 29 |
fsspec==2025.5.1
|
| 30 |
+
gdown==5.2.0
|
| 31 |
+
gitdb==4.0.12
|
| 32 |
+
GitPython==3.1.44
|
| 33 |
+
glfw==2.9.0
|
| 34 |
gradio==5.34.0
|
| 35 |
gradio_client==1.10.3
|
| 36 |
groovy==0.1.2
|
| 37 |
+
gym==0.26.2
|
| 38 |
+
gym-notices==0.0.8
|
| 39 |
h11==0.16.0
|
| 40 |
+
h5py==3.14.0
|
| 41 |
hf-xet==1.1.3
|
| 42 |
httpcore==1.0.9
|
| 43 |
httpx==0.28.1
|
| 44 |
huggingface-hub==0.33.0
|
| 45 |
+
hydra-core==1.3.2
|
| 46 |
idna==3.10
|
| 47 |
+
imageio==2.37.0
|
| 48 |
+
imageio-ffmpeg==0.6.0
|
| 49 |
+
importlib_metadata==8.7.0
|
| 50 |
+
importlib_resources==6.5.2
|
| 51 |
Jinja2==3.1.6
|
| 52 |
+
lazy_loader==0.4
|
| 53 |
+
lerobot==0.1.0
|
| 54 |
+
llvmlite==0.42.0
|
| 55 |
markdown-it-py==3.0.0
|
| 56 |
MarkupSafe==3.0.2
|
| 57 |
mdurl==0.1.2
|
| 58 |
+
moviepy==1.0.3
|
| 59 |
+
mpmath==1.3.0
|
| 60 |
+
mujoco==3.3.3
|
| 61 |
+
mujoco-py==2.1.2.14
|
| 62 |
+
networkx==3.4.2
|
| 63 |
+
numba==0.59.1
|
| 64 |
+
numcodecs==0.13.1
|
| 65 |
+
numpy==1.26.4
|
| 66 |
+
omegaconf==2.3.0
|
| 67 |
+
opencv-python==4.11.0.86
|
| 68 |
orjson==3.10.18
|
| 69 |
packaging==25.0
|
| 70 |
pandas==2.3.0
|
| 71 |
pillow==11.2.1
|
| 72 |
+
proglog==0.1.12
|
| 73 |
+
protobuf==4.25.8
|
| 74 |
+
psutil==7.0.0
|
| 75 |
+
pycparser==2.22
|
| 76 |
pydantic==2.11.7
|
| 77 |
pydantic_core==2.33.2
|
| 78 |
pydub==0.25.1
|
| 79 |
+
pygame==2.6.1
|
| 80 |
Pygments==2.19.1
|
| 81 |
+
pymunk==6.11.1
|
| 82 |
+
PyOpenGL==3.1.9
|
| 83 |
+
PySocks==1.7.1
|
| 84 |
python-dateutil==2.9.0.post0
|
| 85 |
python-multipart==0.0.20
|
| 86 |
pytz==2025.2
|
| 87 |
PyYAML==6.0.2
|
| 88 |
+
regex==2024.11.6
|
| 89 |
requests==2.32.4
|
| 90 |
rich==14.0.0
|
| 91 |
ruff==0.11.13
|
| 92 |
safehttpx==0.1.6
|
| 93 |
+
safetensors==0.5.3
|
| 94 |
+
scikit-image==0.22.0
|
| 95 |
+
scipy==1.15.3
|
| 96 |
semantic-version==2.10.0
|
| 97 |
+
sentry-sdk==2.30.0
|
| 98 |
+
setproctitle==1.3.6
|
| 99 |
+
shapely==2.1.1
|
| 100 |
shellingham==1.5.4
|
| 101 |
six==1.17.0
|
| 102 |
+
smmap==5.0.2
|
| 103 |
sniffio==1.3.1
|
| 104 |
+
soupsieve==2.7
|
| 105 |
starlette==0.46.2
|
| 106 |
+
sympy==1.14.0
|
| 107 |
+
termcolor==2.5.0
|
| 108 |
+
tifffile==2025.5.10
|
| 109 |
tomlkit==0.13.3
|
| 110 |
+
torch==2.2.2
|
| 111 |
+
torchvision==0.17.2
|
| 112 |
tqdm==4.67.1
|
| 113 |
typer==0.16.0
|
| 114 |
typing-inspection==0.4.1
|
|
|
|
| 116 |
tzdata==2025.2
|
| 117 |
urllib3==2.4.0
|
| 118 |
uvicorn==0.34.3
|
| 119 |
+
wandb==0.16.6
|
| 120 |
websockets==15.0.1
|
| 121 |
+
wrapt==1.17.2
|
| 122 |
+
zarr==2.18.3
|
| 123 |
+
zipp==3.23.0
|
robot_detector.py
CHANGED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# backend/robot_detector.py
|
| 2 |
+
import serial.tools.list_ports
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# Define the path to save/load configurations
|
| 8 |
+
CONFIG_FILE = "config/default_config.json"
|
| 9 |
+
|
| 10 |
+
def list_serial_ports():
|
| 11 |
+
"""
|
| 12 |
+
Lista todos los puertos seriales disponibles en el sistema.
|
| 13 |
+
"""
|
| 14 |
+
ports = serial.tools.list_ports.comports()
|
| 15 |
+
port_list = []
|
| 16 |
+
if not ports:
|
| 17 |
+
return ["No se encontraron puertos seriales disponibles."]
|
| 18 |
+
|
| 19 |
+
for port in ports:
|
| 20 |
+
port_info = f"{port.device} - {port.description}"
|
| 21 |
+
if port.hwid and "VID:PID" in port.hwid:
|
| 22 |
+
port_info += f" ({port.hwid})"
|
| 23 |
+
port_list.append(port_info)
|
| 24 |
+
|
| 25 |
+
return port_list
|
| 26 |
+
|
| 27 |
+
def save_config(follower_port: str, follower_id: str, leader_port: str, leader_id: str):
|
| 28 |
+
"""
|
| 29 |
+
Guarda la configuración del robot y teleoperador en un archivo JSON.
|
| 30 |
+
"""
|
| 31 |
+
config = {
|
| 32 |
+
"robot_follower": {
|
| 33 |
+
"port": follower_port.split(' ')[0] if follower_port else "", # Extract only device path
|
| 34 |
+
"id": follower_id
|
| 35 |
+
},
|
| 36 |
+
"teleop_leader": {
|
| 37 |
+
"port": leader_port.split(' ')[0] if leader_port else "", # Extract only device path
|
| 38 |
+
"id": leader_id
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
try:
|
| 42 |
+
with open(CONFIG_FILE, 'w') as f:
|
| 43 |
+
json.dump(config, f, indent=4)
|
| 44 |
+
return f"Configuración guardada en {CONFIG_FILE} correctamente."
|
| 45 |
+
except Exception as e:
|
| 46 |
+
return f"Error al guardar la configuración: {e}"
|
| 47 |
+
|
| 48 |
+
def load_config():
|
| 49 |
+
"""
|
| 50 |
+
Carga la configuración del robot y teleoperador desde un archivo JSON.
|
| 51 |
+
"""
|
| 52 |
+
if os.path.exists(CONFIG_FILE):
|
| 53 |
+
try:
|
| 54 |
+
with open(CONFIG_FILE, 'r') as f:
|
| 55 |
+
config = json.load(f)
|
| 56 |
+
return (
|
| 57 |
+
config.get("robot_follower", {}).get("port", ""),
|
| 58 |
+
config.get("robot_follower", {}).get("id", ""),
|
| 59 |
+
config.get("teleop_leader", {}).get("port", ""),
|
| 60 |
+
config.get("teleop_leader", {}).get("id", "")
|
| 61 |
+
)
|
| 62 |
+
except Exception as e:
|
| 63 |
+
gr.Warning(f"Error al cargar la configuración existente: {e}. Se usarán valores por defecto.")
|
| 64 |
+
return "", "", "", ""
|
| 65 |
+
return "", "", "", ""
|
| 66 |
+
|
| 67 |
+
# --- Gradio Interface ---
|
| 68 |
+
|
| 69 |
+
# Load initial config values for Gradio components
|
| 70 |
+
initial_follower_port, initial_follower_id, initial_leader_port, initial_leader_id = load_config()
|
| 71 |
+
|
| 72 |
+
with gr.Blocks(title="Detector y Configuración de Robots LeRobot") as demo:
|
| 73 |
+
gr.Markdown("# <center>Detector y Configuración de Robots LeRobot</center>")
|
| 74 |
+
gr.Markdown(
|
| 75 |
+
"Esta herramienta te ayuda a identificar los puertos seriales de tus robots "
|
| 76 |
+
"SO101 (follower y leader) y asignarles un ID consistente."
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
with gr.Tab("1. Detectar Puertos Seriales"):
|
| 80 |
+
gr.Markdown("## Puertos Seriales Detectados")
|
| 81 |
+
gr.Markdown(
|
| 82 |
+
"Conecta tus robots SO101 (follower y leader) a tu computadora y haz clic en 'Refrescar Puertos'. "
|
| 83 |
+
"Observa la lista y compara con los nombres de tus dispositivos si es posible para identificarlos."
|
| 84 |
+
)
|
| 85 |
+
refresh_btn = gr.Button("🔄 Refrescar Puertos")
|
| 86 |
+
port_list_output = gr.Textbox(
|
| 87 |
+
label="Puertos Seriales Disponibles",
|
| 88 |
+
interactive=False,
|
| 89 |
+
lines=10,
|
| 90 |
+
value="\n".join(list_serial_ports()), # Initial list on startup
|
| 91 |
+
show_copy_button=True
|
| 92 |
+
)
|
| 93 |
+
refresh_btn.click(
|
| 94 |
+
fn=lambda: "\n".join(list_serial_ports()),
|
| 95 |
+
inputs=None,
|
| 96 |
+
outputs=port_list_output
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
with gr.Tab("2. Asignar Puertos e IDs"):
|
| 100 |
+
gr.Markdown("## Asignación de Robots y Teleoperadores")
|
| 101 |
+
gr.Markdown(
|
| 102 |
+
"Selecciona el puerto correcto para cada dispositivo y asigna un ID único. "
|
| 103 |
+
"**¡Es crucial usar el mismo ID en todos los scripts (grabación, entrenamiento, evaluación)!**"
|
| 104 |
+
)
|
| 105 |
+
gr.Markdown("### Robot Follower (Brazo del Robot SO101)")
|
| 106 |
+
follower_port_dropdown = gr.Dropdown(
|
| 107 |
+
label="Puerto Serial del Robot Follower",
|
| 108 |
+
choices=[], # Will be populated dynamically
|
| 109 |
+
value=initial_follower_port,
|
| 110 |
+
info="Selecciona el puerto donde está conectado el brazo del robot (SO101)."
|
| 111 |
+
)
|
| 112 |
+
follower_id_input = gr.Textbox(
|
| 113 |
+
label="ID del Robot Follower",
|
| 114 |
+
value=initial_follower_id if initial_follower_id else "my_robot_follower_arm",
|
| 115 |
+
placeholder="ej. my_robot_follower_arm",
|
| 116 |
+
info="Un identificador único para este robot. Usado para archivos de calibración."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
gr.Markdown("### Teleoperador Leader (Brazo de Control SO101)")
|
| 120 |
+
leader_port_dropdown = gr.Dropdown(
|
| 121 |
+
label="Puerto Serial del Teleoperador Leader",
|
| 122 |
+
choices=[], # Will be populated dynamically
|
| 123 |
+
value=initial_leader_port,
|
| 124 |
+
info="Selecciona el puerto donde está conectado el dispositivo de control (SO101)."
|
| 125 |
+
)
|
| 126 |
+
leader_id_input = gr.Textbox(
|
| 127 |
+
label="ID del Teleoperador Leader",
|
| 128 |
+
value=initial_leader_id if initial_leader_id else "my_teleop_leader_arm",
|
| 129 |
+
placeholder="ej. my_teleop_leader_arm",
|
| 130 |
+
info="Un identificador único para el dispositivo de teleoperación."
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
save_btn = gr.Button("💾 Guardar Configuración", variant="primary")
|
| 134 |
+
save_status_output = gr.Textbox(
|
| 135 |
+
label="Estado de Guardado",
|
| 136 |
+
interactive=False,
|
| 137 |
+
visible=False
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
save_btn.click(
|
| 141 |
+
fn=save_config,
|
| 142 |
+
inputs=[follower_port_dropdown, follower_id_input, leader_port_dropdown, leader_id_input],
|
| 143 |
+
outputs=save_status_output
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Function to update dropdown choices based on current ports
|
| 147 |
+
def update_port_choices():
|
| 148 |
+
ports = list_serial_ports()
|
| 149 |
+
# Extract just the device path for dropdown value (e.g., /dev/tty.usbmodemXXX)
|
| 150 |
+
device_paths = [p.split(' ')[0] for p in ports if "No se encontraron" not in p]
|
| 151 |
+
return gr.update(choices=device_paths), gr.update(choices=device_paths)
|
| 152 |
+
|
| 153 |
+
# Update dropdowns on initial load and when refresh button is clicked
|
| 154 |
+
demo.load(
|
| 155 |
+
fn=update_port_choices,
|
| 156 |
+
inputs=None,
|
| 157 |
+
outputs=[follower_port_dropdown, leader_port_dropdown]
|
| 158 |
+
)
|
| 159 |
+
refresh_btn.click( # Re-bind refresh button to update dropdowns too
|
| 160 |
+
fn=update_port_choices,
|
| 161 |
+
inputs=None,
|
| 162 |
+
outputs=[follower_port_dropdown, leader_port_dropdown]
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
gr.Markdown("---")
|
| 167 |
+
gr.Markdown(f"**La configuración guardada se almacenará en:** `{os.path.abspath(CONFIG_FILE)}`")
|
| 168 |
+
gr.Markdown(
|
| 169 |
+
"**Instrucciones para otros scripts:**\n"
|
| 170 |
+
"1. Abre `robot_config.json`.\n"
|
| 171 |
+
"2. Copia los valores de `port` y `id` a tus scripts de `record_controller.py` y `train_robot.py` (para evaluación)."
|
| 172 |
+
)
|
| 173 |
+
gr.Markdown("Hecho con ❤️ para RobotCleanPupusas503")
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
try:
|
| 177 |
+
import serial # Test if pyserial is installed
|
| 178 |
+
except ImportError:
|
| 179 |
+
print("Error: La librería 'pyserial' no está instalada.")
|
| 180 |
+
print("Por favor, instala 'pyserial' ejecutando: pip install pyserial")
|
| 181 |
+
exit(1)
|
| 182 |
+
|
| 183 |
+
demo.launch(share=False)
|