Agent2Robot / backup /main_orchestrator.py
sam133
οΏ½ Restructure repo following successful MCP Hackathon pattern: Clean modular architecture with mcp_client.py, design_tools.py, and organized app.py
fe37569
raw
history blame
34.9 kB
import os
import ssl
import time
import imageio
import numpy as np
from PIL import Image
import json
from datetime import datetime
import tempfile
import traceback
import simulation_env_enhanced as simulation_env
import llm_interface_enhanced as llm_interface
import evaluation
# SSL workaround for Gradio issues
try:
import certifi
os.environ['SSL_CERT_FILE'] = certifi.where()
except ImportError:
pass
# Try to disable SSL verification as a workaround
try:
ssl._create_default_https_context = ssl._create_unverified_context
except AttributeError:
pass
# Try to import Gradio with error handling
GRADIO_AVAILABLE = False
try:
import gradio as gr
GRADIO_AVAILABLE = True
print("βœ“ Gradio imported successfully")
except Exception as e:
print(f"⚠ Gradio import failed: {e}")
print("Will use console-based interface instead")
GRADIO_AVAILABLE = False
# Global configuration
MAX_ITERATIONS = 5
SIMULATION_DURATION_SEC = 10
OBSTACLE_FAR_EDGE_X = 0.8
class HackathonVehicleDesigner:
"""Enhanced vehicle designer for hackathon with comprehensive tracking and feedback"""
def __init__(self):
self.reset_design_session()
def reset_design_session(self):
"""Reset all session variables for new design process"""
self.all_attempts = []
self.best_attempt = None
self.best_iteration = None
self.process_log = []
self.current_iteration = 0
self.overall_success = False
self.user_task_description = ""
self.vehicle_type = "robot"
self.llm_interpreted_criteria = []
def log_process_step(self, message):
"""Add a step to the process log with timestamp"""
timestamp = datetime.now().strftime("%H:%M:%S")
log_entry = f"[{timestamp}] {message}"
self.process_log.append(log_entry)
print(log_entry) # Also print to console
def parse_user_task_for_criteria(self, task_description):
"""Extract and interpret success criteria from user task description"""
# This is where the LLM would interpret user criteria
# For now, we'll use a simple rule-based approach and enhance with LLM later
criteria = []
task_lower = task_description.lower()
# Basic criteria that are always present
criteria.append("Cross the obstacle completely (reach x > 0.8m)")
criteria.append("Maintain stability throughout the process")
criteria.append("Avoid getting stuck on or damaged by the obstacle")
# Additional criteria based on task description
if "quick" in task_lower or "fast" in task_lower:
criteria.append("Complete the task as quickly as possible")
if "stop" in task_lower or "halt" in task_lower:
criteria.append("Come to a controlled stop after crossing")
if "land" in task_lower and "drone" in self.vehicle_type:
criteria.append("Land safely after crossing the obstacle")
if "stable" in task_lower or "steady" in task_lower:
criteria.append("Maintain steady movement without excessive oscillation")
self.llm_interpreted_criteria = criteria
return criteria
def run_single_iteration(self, iteration_num):
"""Run a single design and simulation iteration"""
self.current_iteration = iteration_num
self.log_process_step(f"=== Starting Iteration {iteration_num} ===")
try:
# Generate prompt for LLM
if iteration_num == 1:
self.log_process_step("Requesting initial design from LLM agent...")
if self.vehicle_type == "robot":
prompt = llm_interface.generate_initial_robot_design_prompt_with_criteria(
self.user_task_description, self.llm_interpreted_criteria
)
else:
prompt = llm_interface.generate_initial_drone_design_prompt_with_criteria(
self.user_task_description, self.llm_interpreted_criteria
)
previous_attempt = None
else:
self.log_process_step(f"Requesting design refinement from LLM agent (iteration {iteration_num})...")
previous_attempt = self.all_attempts[-1]
if self.vehicle_type == "robot":
prompt = llm_interface.generate_iterative_robot_design_prompt_with_criteria(
previous_attempt, iteration_num, self.llm_interpreted_criteria
)
else:
prompt = llm_interface.generate_iterative_drone_design_prompt_with_criteria(
previous_attempt, iteration_num, self.llm_interpreted_criteria
)
# Call LLM for design
llm_response = llm_interface.call_llm_api(prompt)
if not llm_response:
raise Exception("Failed to get valid response from LLM")
# Extract vehicle specs and reasoning
vehicle_specs = llm_response.get('robot_specs', {})
vehicle_specs["vehicle_type"] = self.vehicle_type
design_reasoning = llm_response.get('design_reasoning', 'No reasoning provided')
llm_success_conditions = llm_response.get('llm_interpreted_success_conditions', self.llm_interpreted_criteria)
self.log_process_step(f"LLM proposed design: {vehicle_specs}")
self.log_process_step(f"Design reasoning: {design_reasoning}")
self.log_process_step(f"LLM's success conditions: {llm_success_conditions}")
# Setup and run simulation
self.log_process_step("Setting up PyBullet simulation environment...")
obstacle_id, plane_id = simulation_env.setup_pybullet_environment()
# Create vehicle
self.log_process_step(f"Creating {self.vehicle_type} in simulation...")
if self.vehicle_type == "robot":
vehicle_id, joint_indices, v_type = simulation_env.create_robot(vehicle_specs)
vehicle_props = None
else:
vehicle_id, joint_indices, v_type, vehicle_props = simulation_env.create_drone(vehicle_specs)
# Run simulation
self.log_process_step("Running physics simulation...")
frames, final_feedback = self.run_simulation_loop(
vehicle_id, joint_indices, vehicle_props
)
# Evaluate results
self.log_process_step("Evaluating simulation results...")
evaluation_results = evaluation.evaluate_simulation_outcome_with_criteria(
final_feedback, OBSTACLE_FAR_EDGE_X, llm_success_conditions
)
# Create feedback for LLM
llm_feedback = evaluation.format_feedback_for_llm_with_criteria(
evaluation_results, llm_success_conditions
)
self.log_process_step(f"Simulation results: {llm_feedback}")
# Store attempt data
attempt_data = {
"iteration": iteration_num,
"llm_design": llm_response,
"vehicle_specs": vehicle_specs,
"design_reasoning": design_reasoning,
"llm_success_conditions": llm_success_conditions,
"evaluation_results": evaluation_results,
"feedback_from_simulation": llm_feedback,
"frames": frames
}
self.all_attempts.append(attempt_data)
# Update best attempt
if self.is_current_better_than_best(attempt_data):
self.best_attempt = attempt_data
self.best_iteration = iteration_num
self.log_process_step(f"πŸ† New best design found in iteration {iteration_num}!")
# Check for overall success
if evaluation_results.get('overall_success', False):
self.overall_success = True
self.log_process_step("πŸŽ‰ SUCCESS! Design meets all criteria!")
return True
else:
failure_reason = evaluation_results.get('specific_failure_point', 'unknown')
self.log_process_step(f"❌ Iteration {iteration_num} failed: {failure_reason}")
return False
except Exception as e:
error_msg = f"Error in iteration {iteration_num}: {str(e)}"
self.log_process_step(f"🚨 {error_msg}")
print(f"Full error traceback: {traceback.format_exc()}")
# Create error attempt data
error_attempt = {
"iteration": iteration_num,
"llm_design": {"error": str(e)},
"vehicle_specs": {},
"design_reasoning": f"Error occurred: {str(e)}",
"llm_success_conditions": self.llm_interpreted_criteria,
"evaluation_results": {
"overall_success": False,
"robot_crossed_obstacle": False,
"robot_remains_upright": False,
"final_robot_x_position": 0.0,
"specific_failure_point": "simulation_error"
},
"feedback_from_simulation": f"Simulation failed: {str(e)}",
"frames": []
}
self.all_attempts.append(error_attempt)
return False
finally:
# Cleanup simulation
try:
simulation_env.reset_simulation()
except:
pass
def run_simulation_loop(self, vehicle_id, joint_indices, vehicle_props):
"""Run the simulation loop and capture frames"""
frames = []
start_time = time.time()
simulation_steps = int(SIMULATION_DURATION_SEC * 240)
for step in range(simulation_steps):
# Run simulation step
simulation_env.run_simulation_step(
vehicle_id, joint_indices, {}, self.vehicle_type, vehicle_props
)
current_sim_time = time.time() - start_time
# Capture frames for visualization
if step % 24 == 0: # 10 FPS
try:
frame = simulation_env.capture_frame()
if frame:
frames.append(frame)
except:
pass
# Get current feedback
obstacle_id = 1 # Assuming obstacle has ID 1
feedback = simulation_env.get_simulation_feedback(
vehicle_id, obstacle_id, start_time, current_sim_time, self.vehicle_type
)
# Check for early exit conditions
vehicle_x_pos = feedback['robot_position'][0]
is_stable = feedback['is_robot_upright']
if vehicle_x_pos > OBSTACLE_FAR_EDGE_X + 0.1 or not is_stable:
break
if current_sim_time > SIMULATION_DURATION_SEC:
break
return frames, feedback
def is_current_better_than_best(self, current_attempt):
"""Determine if current attempt is better than the current best"""
if not self.best_attempt:
return True
current_eval = current_attempt['evaluation_results']
best_eval = self.best_attempt['evaluation_results']
# Priority 1: Overall success
if current_eval.get('overall_success', False) and not best_eval.get('overall_success', False):
return True
elif best_eval.get('overall_success', False) and not current_eval.get('overall_success', False):
return False
# Priority 2: Obstacle crossing
if current_eval.get('robot_crossed_obstacle', False) and not best_eval.get('robot_crossed_obstacle', False):
return True
elif best_eval.get('robot_crossed_obstacle', False) and not current_eval.get('robot_crossed_obstacle', False):
return False
# Priority 3: Distance traveled
current_distance = current_eval.get('final_robot_x_position', 0.0)
best_distance = best_eval.get('final_robot_x_position', 0.0)
return current_distance > best_distance
def create_final_visualization(self):
"""Create GIF from best attempt frames"""
if not self.best_attempt or not self.best_attempt.get('frames'):
return None
try:
# Create timestamp for unique filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
gif_filename = f"best_{self.vehicle_type}_design_{timestamp}.gif"
gif_path = os.path.join("outputs", gif_filename)
# Ensure outputs directory exists
os.makedirs("outputs", exist_ok=True)
# Convert frames to numpy arrays
frame_arrays = []
for frame in self.best_attempt['frames']:
if isinstance(frame, Image.Image):
frame_arrays.append(np.array(frame))
else:
frame_arrays.append(frame)
if frame_arrays:
imageio.mimsave(gif_path, frame_arrays, fps=10, loop=0)
return gif_path
else:
return None
except Exception as e:
print(f"Error creating visualization: {e}")
return None
def save_design_specs_json(self):
"""Save best design specifications to downloadable JSON file"""
if not self.best_attempt:
return None
try:
# Create comprehensive design specification
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
design_data = {
"hackathon_submission": {
"project_title": "LLM-Agent-Designed Obstacle-Passing Vehicle System",
"track": "Track 3: Agentic Demo Showcase",
"timestamp": datetime.now().isoformat(),
"vehicle_type": self.vehicle_type
},
"user_task": {
"description": self.user_task_description,
"llm_interpreted_criteria": self.llm_interpreted_criteria
},
"design_process": {
"total_iterations": len(self.all_attempts),
"best_iteration": self.best_iteration,
"overall_success": self.overall_success,
"max_iterations_allowed": MAX_ITERATIONS
},
"best_design": {
"vehicle_specifications": self.best_attempt['vehicle_specs'],
"design_reasoning": self.best_attempt['design_reasoning'],
"llm_success_conditions": self.best_attempt['llm_success_conditions']
},
"performance_results": self.best_attempt['evaluation_results'],
"technical_details": {
"simulation_duration_sec": SIMULATION_DURATION_SEC,
"obstacle_specifications": {
"height_cm": 5,
"width_cm": 50,
"depth_cm": 10,
"position_x_m": 0.75
},
"success_threshold_x_m": OBSTACLE_FAR_EDGE_X,
"physics_engine": "PyBullet",
"llm_model": "Enhanced fallback system"
}
}
# Create temporary file for download
temp_file = tempfile.NamedTemporaryFile(
mode='w', suffix='.json', delete=False,
prefix=f'best_{self.vehicle_type}_design_{timestamp}_'
)
json.dump(design_data, temp_file, indent=2, ensure_ascii=False)
temp_file.close()
return temp_file.name
except Exception as e:
print(f"Error saving design specs: {e}")
return None
def generate_readme_content(self):
"""Generate README content for hackathon submission"""
readme_content = f"""# πŸ€–πŸš LLM-Agent-Designed Obstacle-Passing Vehicle System
**Hackathon Submission - Track 3: Agentic Demo Showcase**
## Project Description
An AI agent that iteratively designs robots or drones using an LLM and PyBullet simulation to meet user-defined functional criteria. The system demonstrates autonomous design iteration, real-time physics simulation, and intelligent performance optimization.
## 🎯 Key Innovation
- **LLM-Driven Design**: AI agent autonomously proposes and refines vehicle designs
- **Physics-Based Validation**: Real-time PyBullet simulation for accurate performance testing
- **Criteria-Driven Optimization**: User-defined success criteria guide the design process
- **Iterative Intelligence**: Agent learns from simulation feedback to improve designs
## πŸš€ How to Run
### Prerequisites
- Python 3.10+
- Required packages: `pip install -r requirements.txt`
### Usage
```bash
python main_orchestrator.py
```
Open your browser to the provided URL (typically http://localhost:7860)
## πŸ› οΈ Key Technologies Used
- **Python**: Core implementation language
- **Gradio**: Interactive web interface
- **PyBullet**: Physics simulation engine
- **Transformers/LLM**: AI agent for design generation
- **PIL/imageio**: Visualization and GIF generation
## 🎬 Demo Video
[Link to Video Overview/Demo] - *To be added*
## πŸ† Hackathon Features Demonstrated
### Technical Implementation
- Robust PyBullet physics simulation
- LLM integration with fallback mechanisms
- Real-time iterative design optimization
- Comprehensive error handling
### Usability
- Intuitive Gradio interface
- Real-time process visualization
- Downloadable design specifications
- Clear success/failure feedback
### Innovation
- AI agent designing physical entities
- Dynamic success criteria interpretation
- Physics-simulation feedback loop
- Best design tracking and analysis
### Impact
- Educational tool for understanding AI-driven design
- Framework for autonomous vehicle optimization
- Demonstration of LLM practical applications
## πŸ“Š Current Session Results
**Vehicle Type**: {self.vehicle_type.capitalize()}
**Task**: {self.user_task_description}
**Iterations Completed**: {len(self.all_attempts)}
**Overall Success**: {'βœ… Yes' if self.overall_success else '❌ No'}
## 🀝 MCP Integration Potential
This system can be extended to function as an MCP Tool/Server (Track 1) by exposing:
- Vehicle design tools
- Simulation execution tools
- Performance evaluation tools
- Iterative optimization tools
## πŸ“„ License
MIT License - Open source for educational and research purposes.
---
*Generated automatically by LLM-Agent-Designed Vehicle System*
*Timestamp: {datetime.now().isoformat()}*
"""
return readme_content
# Enhanced LLM Interface Functions (add to llm_interface_enhanced.py)
def generate_initial_robot_design_prompt_with_criteria(task_description, success_criteria):
"""Generate initial robot design prompt with user-defined criteria"""
criteria_text = "\n".join([f"- {criterion}" for criterion in success_criteria])
prompt = f"""You are an expert robot design AI. Your task is to design a robot based on the following user requirements:
USER TASK: {task_description}
USER SUCCESS CRITERIA (as interpreted by the system):
{criteria_text}
ENVIRONMENT:
Obstacle: Rectangular block (5cm high, 50cm wide, 10cm deep) at x=0.75m
Robot starts at x=0m and must traverse forward
AVAILABLE ROBOT PARAMETERS (provide in JSON format within 'robot_specs'):
- "wheel_type": ["small_high_grip", "large_smooth", "tracked_base"]
- "body_clearance_cm": integer 1-10 (ground clearance in cm)
- "approach_sensor_enabled": true/false
- "main_material": ["light_plastic", "sturdy_metal_alloy"]
REQUIRED OUTPUT FORMAT:
{{
"robot_design_iteration": 1,
"design_reasoning": "Your detailed explanation of design choices",
"llm_interpreted_success_conditions": ["condition 1", "condition 2", ...],
"robot_specs": {{
"wheel_type": "your_choice",
"body_clearance_cm": your_number,
"approach_sensor_enabled": your_boolean,
"main_material": "your_choice"
}}
}}
Please provide your robot design now:"""
return prompt
def generate_initial_drone_design_prompt_with_criteria(task_description, success_criteria):
"""Generate initial drone design prompt with user-defined criteria"""
criteria_text = "\n".join([f"- {criterion}" for criterion in success_criteria])
prompt = f"""You are an expert drone design AI. Your task is to design a drone based on the following user requirements:
USER TASK: {task_description}
USER SUCCESS CRITERIA (as interpreted by the system):
{criteria_text}
ENVIRONMENT:
Obstacle: Rectangular block (5cm high, 50cm wide, 10cm deep) at x=0.75m
Drone starts at x=0m and must fly over/around the obstacle
AVAILABLE DRONE PARAMETERS (provide in JSON format within 'robot_specs'):
- "propeller_size": ["small_agile", "medium", "large_stable"]
- "flight_height_cm": integer 10-50 (target flight altitude)
- "stability_mode": ["auto_hover", "manual_control"]
- "main_material": ["light_carbon_fiber", "sturdy_aluminum"]
REQUIRED OUTPUT FORMAT:
{{
"robot_design_iteration": 1,
"design_reasoning": "Your detailed explanation of design choices",
"llm_interpreted_success_conditions": ["condition 1", "condition 2", ...],
"robot_specs": {{
"propeller_size": "your_choice",
"flight_height_cm": your_number,
"stability_mode": "your_choice",
"main_material": "your_choice"
}}
}}
Please provide your drone design now:"""
return prompt
# Initialize global designer instance
designer = HackathonVehicleDesigner()
def design_vehicle_task(vehicle_type, task_description, progress=gr.Progress()):
"""Main function for Gradio interface - enhanced for hackathon"""
global designer
# Reset designer for new task
designer.reset_design_session()
designer.vehicle_type = vehicle_type
designer.user_task_description = task_description
# Parse user criteria
designer.log_process_step("🎯 Analyzing user task and success criteria...")
criteria = designer.parse_user_task_for_criteria(task_description)
designer.log_process_step(f"πŸ“‹ Interpreted success criteria:")
for criterion in criteria:
designer.log_process_step(f" β€’ {criterion}")
# Start design process
designer.log_process_step(f"πŸš€ Starting {vehicle_type} design process...")
designer.log_process_step(f"🎯 Target: {task_description}")
# Run iterations
for iteration in range(1, MAX_ITERATIONS + 1):
if progress:
progress((iteration - 1) / MAX_ITERATIONS, f"Running iteration {iteration}/{MAX_ITERATIONS}")
success = designer.run_single_iteration(iteration)
# Yield current progress
current_log = "\n".join(designer.process_log)
yield (
current_log, # process_log
None, # overall_status (placeholder)
None, # best_design_specs (placeholder)
None, # simulation_gif (placeholder)
None, # performance_summary (placeholder)
None, # llm_rationale (placeholder)
None, # download_specs (placeholder)
None # readme_content (placeholder)
)
if success:
break
# Generate final results
designer.log_process_step("πŸ“Š Generating final results and visualizations...")
# Create overall status
if designer.overall_success:
overall_status = "## πŸŽ‰ SUCCESS!\n\nThe LLM agent successfully designed a vehicle that meets all criteria!"
else:
overall_status = "## ❌ PROCESS COMPLETED\n\nThe agent completed all iterations but did not achieve full success. Best attempt is shown below."
# Get best design specs
best_specs = designer.best_attempt['vehicle_specs'] if designer.best_attempt else {}
# Create visualization
simulation_gif = designer.create_final_visualization()
# Format performance summary
if designer.best_attempt:
eval_results = designer.best_attempt['evaluation_results']
performance_summary = f"""## πŸ“Š Performance Summary of Best Design
**Final Position**: {eval_results.get('final_robot_x_position', 0.0):.3f}m
**Crossed Obstacle**: {'βœ… Yes' if eval_results.get('robot_crossed_obstacle', False) else '❌ No'}
**Remained Stable**: {'βœ… Yes' if eval_results.get('robot_remains_upright', False) else '❌ No'}
**Clean Pass**: {'βœ… Yes' if eval_results.get('no_significant_collision_with_obstacle_during_pass', False) else '❌ No'}
**Overall Success**: {'βœ… ACHIEVED' if eval_results.get('overall_success', False) else '❌ NOT ACHIEVED'}
**Target Distance**: 0.8m
**Achieved Distance**: {eval_results.get('final_robot_x_position', 0.0):.3f}m
**Success Rate**: {'100%' if eval_results.get('overall_success', False) else '0%'}
"""
else:
performance_summary = "## ❌ No successful attempts recorded"
# Get LLM rationale
llm_rationale = designer.best_attempt['design_reasoning'] if designer.best_attempt else "No design reasoning available"
# Create downloadable specs
download_specs = designer.save_design_specs_json()
# Generate README content
readme_content = designer.generate_readme_content()
# Final log
final_log = "\n".join(designer.process_log)
final_log += f"\n\n🏁 DESIGN PROCESS COMPLETED"
final_log += f"\nπŸ“Š Total iterations: {len(designer.all_attempts)}"
final_log += f"\nπŸ† Best iteration: {designer.best_iteration}"
final_log += f"\nβœ… Overall success: {designer.overall_success}"
return (
final_log, # process_log
overall_status, # overall_status
best_specs, # best_design_specs
simulation_gif, # simulation_gif
performance_summary, # performance_summary
llm_rationale, # llm_rationale
download_specs, # download_specs
readme_content # readme_content
)
def create_hackathon_gradio_interface():
"""Create enhanced Gradio interface for hackathon submission"""
# Custom CSS for better appearance
custom_css = """
.main-header {
text-align: center;
background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 20px;
border-radius: 10px;
margin-bottom: 20px;
}
.success-box {
background-color: #d4edda;
border: 1px solid #c3e6cb;
color: #155724;
padding: 15px;
border-radius: 5px;
margin: 10px 0;
}
.failure-box {
background-color: #f8d7da;
border: 1px solid #f5c6cb;
color: #721c24;
padding: 15px;
border-radius: 5px;
margin: 10px 0;
}
"""
with gr.Blocks(
title="πŸ€–πŸš LLM Vehicle Designer - Hackathon Demo",
theme=gr.themes.Soft(),
css=custom_css
) as iface:
# Header
gr.HTML("""
<div class="main-header">
<h1>πŸ€–πŸš LLM-Agent-Designed Obstacle-Passing Vehicle System</h1>
<h3>Hackathon Submission - Track 3: Agentic Demo Showcase</h3>
<p>An intelligent system where an LLM agent iteratively designs robots and drones to meet your custom criteria!</p>
</div>
""")
# User Input Section
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("## 🎯 Define Your Challenge")
vehicle_type = gr.Dropdown(
label="Select Vehicle Type",
choices=["robot", "drone"],
value="robot",
info="Choose between ground robot or flying drone"
)
task_description = gr.Textbox(
label="Describe the Vehicle's Task & Success Criteria",
placeholder="e.g., 'Robot to cross a 5cm high box quickly and without falling over, then stop.' or 'Drone to fly over a 10cm wall, land 1m beyond it, and stay stable.'",
lines=3,
value="Design a robot that can cross the 5cm high obstacle smoothly and come to a controlled stop."
)
submit_btn = gr.Button(
"πŸš€ Start LLM Agent Design Process",
variant="primary",
size="lg"
)
with gr.Column(scale=1):
gr.Markdown("## πŸ“‹ Process Info")
gr.Markdown("""
**Environment Setup:**
- πŸ“¦ Obstacle: 5cm high Γ— 50cm wide Γ— 10cm deep
- πŸ“ Position: x = 0.75m
- 🎯 Success: Vehicle must reach x > 0.8m
**Agent Capabilities:**
- πŸ€– **Robot**: Wheel types, clearance, materials
- 🚁 **Drone**: Propellers, flight height, stability
- πŸ”„ **Max Iterations**: 5
- 🧠 **LLM-Driven**: AI interprets your criteria
""")
gr.Markdown("---")
# Real-time Process Section
with gr.Row():
with gr.Column(scale=3):
gr.Markdown("## πŸ”„ Live Agent Process")
process_log = gr.Textbox(
label="Full Process Log - Real-time Agent Activity",
lines=25,
max_lines=40,
show_copy_button=True,
interactive=False,
placeholder="Agent process log will appear here in real-time..."
)
with gr.Column(scale=2):
gr.Markdown("## 🎬 Current Simulation")
current_iteration_info = gr.Markdown("Ready to start...")
simulation_gif = gr.Image(
label="Simulation Recording of Best Design's Trial",
type="filepath",
interactive=False
)
gr.Markdown("---")
# Results Section
gr.Markdown("## πŸ† Final Results & Analysis")
overall_status = gr.Markdown(
label="Overall Run Status",
value="Waiting for process to complete..."
)
gr.Markdown("### --- Best Design Found ---")
with gr.Row():
with gr.Column(scale=2):
best_design_specs = gr.JSON(
label="Best Vehicle Design Specifications (JSON)",
show_label=True
)
performance_summary = gr.Markdown(
label="Performance Summary of Best Design"
)
with gr.Column(scale=1):
download_specs = gr.File(
label="πŸ“„ Download Design Specs (JSON)",
file_count="single",
type="filepath",
interactive=False
)
llm_rationale = gr.Textbox(
label="🧠 LLM's Rationale for Best Design",
lines=8,
interactive=False
)
gr.Markdown("---")
# Hackathon Submission Section
gr.Markdown("## πŸ“ Hackathon Submission Materials")
readme_content = gr.Textbox(
label="πŸ“‹ Generated README.md Content",
lines=15,
show_copy_button=True,
interactive=False,
placeholder="README content will be generated after process completion..."
)
# Set up interface interaction
submit_btn.click(
fn=design_vehicle_task,
inputs=[vehicle_type, task_description],
outputs=[
process_log,
overall_status,
best_design_specs,
simulation_gif,
performance_summary,
llm_rationale,
download_specs,
readme_content
],
show_progress=True
)
gr.Markdown("---")
# Footer Information
gr.Markdown("""
## 🎯 How the LLM Agent Works
1. **🎯 Criteria Interpretation**: Agent analyzes your task description and defines success conditions
2. **πŸ”§ Initial Design**: LLM proposes vehicle specifications based on requirements
3. **βš—οΈ Physics Simulation**: Design tested in PyBullet with real physics
4. **πŸ“Š Performance Analysis**: Results evaluated against interpreted criteria
5. **πŸ”„ Iterative Refinement**: Agent uses feedback to improve design
6. **πŸ† Best Design Selection**: System tracks and presents optimal solution
**Key Innovation**: This demonstrates an autonomous AI agent that can design physical systems to meet user-defined functional requirements through simulation-based optimization.
""")
return iface
if __name__ == "__main__":
print("πŸ€–πŸš LLM-Agent-Designed Vehicle System - Hackathon Edition")
print("=" * 70)
if GRADIO_AVAILABLE:
print("πŸš€ Starting enhanced Gradio interface for hackathon...")
try:
# Create and launch enhanced interface
interface = create_hackathon_gradio_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=True,
show_error=True,
inbrowser=True
)
except Exception as e:
print(f"❌ Failed to start Gradio interface: {e}")
print("Please check your installation and try again.")
else:
print("❌ Gradio not available. Please install requirements:")
print("pip install -r requirements.txt")