sam133 commited on
Commit
2fbee2e
Β·
1 Parent(s): b3e9800

Finalize th rest of the files

Browse files
demo_mcp_usage.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Demo script showcasing MCP server integration for LLM-Agent-Designed Vehicle System
4
+
5
+ This script demonstrates how to interact with our MCP server to design robots and drones
6
+ for obstacle crossing using the Model Context Protocol.
7
+ """
8
+
9
+ import asyncio
10
+ import json
11
+ import time
12
+ from typing import Dict, Any
13
+
14
+ # Note: In a real implementation, you would use the actual MCP client
15
+ # For demo purposes, we'll simulate the MCP calls by importing our server directly
16
+ import mcp_server
17
+
18
+ class MCPDemoClient:
19
+ """Simulated MCP client for demonstration purposes"""
20
+
21
+ def __init__(self):
22
+ self.server = mcp_server # Direct import for demo
23
+
24
+ async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
25
+ """Simulate MCP tool call"""
26
+ print(f"πŸ”§ Calling MCP tool: {tool_name}")
27
+ print(f"πŸ“Š Arguments: {json.dumps(arguments, indent=2)}")
28
+ print("─" * 50)
29
+
30
+ # Call the actual tool function
31
+ tool_func = getattr(self.server, tool_name, None)
32
+ if tool_func:
33
+ result = tool_func(**arguments)
34
+ return result
35
+ else:
36
+ return {"error": f"Tool {tool_name} not found"}
37
+
38
+ async def demo_obstacle_info():
39
+ """Demo: Get obstacle information"""
40
+ print("🎯 DEMO 1: Getting Obstacle Information")
41
+ print("=" * 60)
42
+
43
+ client = MCPDemoClient()
44
+ result = await client.call_tool("get_obstacle_info", {})
45
+
46
+ print("βœ… Obstacle Information:")
47
+ print(json.dumps(result, indent=2))
48
+ print("\n")
49
+
50
+ async def demo_design_parameters():
51
+ """Demo: Get design parameters for robots and drones"""
52
+ print("πŸ”§ DEMO 2: Getting Design Parameters")
53
+ print("=" * 60)
54
+
55
+ client = MCPDemoClient()
56
+
57
+ # Get robot parameters
58
+ print("πŸ€– Robot Design Parameters:")
59
+ robot_params = await client.call_tool("get_design_parameters", {"vehicle_type": "robot"})
60
+ print(json.dumps(robot_params, indent=2))
61
+ print()
62
+
63
+ # Get drone parameters
64
+ print("🚁 Drone Design Parameters:")
65
+ drone_params = await client.call_tool("get_design_parameters", {"vehicle_type": "drone"})
66
+ print(json.dumps(drone_params, indent=2))
67
+ print("\n")
68
+
69
+ async def demo_robot_design():
70
+ """Demo: Design and simulate a robot"""
71
+ print("πŸ€– DEMO 3: Robot Design and Simulation")
72
+ print("=" * 60)
73
+
74
+ client = MCPDemoClient()
75
+
76
+ # Design a robot
77
+ robot_specs = {
78
+ "wheel_type": "large_smooth",
79
+ "body_clearance_cm": 7,
80
+ "approach_sensor_enabled": True,
81
+ "main_material": "light_plastic"
82
+ }
83
+
84
+ design_result = await client.call_tool("design_vehicle", {
85
+ "vehicle_type": "robot",
86
+ "specifications": robot_specs,
87
+ "design_reasoning": "Large smooth wheels for obstacle climbing, moderate clearance for 5cm obstacle, light material for mobility"
88
+ })
89
+
90
+ print("βœ… Robot Design Result:")
91
+ print(json.dumps(design_result, indent=2))
92
+ print()
93
+
94
+ if design_result.get("success"):
95
+ # Simulate the robot
96
+ print("πŸƒ Running robot simulation...")
97
+ sim_result = await client.call_tool("simulate_vehicle", {
98
+ "vehicle_type": "robot",
99
+ "specifications": robot_specs,
100
+ "simulation_duration": 5.0
101
+ })
102
+
103
+ print("βœ… Simulation Result:")
104
+ print(json.dumps(sim_result, indent=2))
105
+ print()
106
+
107
+ if sim_result.get("success"):
108
+ # Evaluate performance
109
+ print("πŸ“Š Evaluating robot performance...")
110
+ eval_result = await client.call_tool("evaluate_performance", {
111
+ "simulation_feedback": sim_result["simulation_feedback"]
112
+ })
113
+
114
+ print("βœ… Evaluation Result:")
115
+ print(json.dumps(eval_result, indent=2))
116
+
117
+ if eval_result.get("success"):
118
+ success_criteria = eval_result["success_criteria"]
119
+ print(f"\n🎯 Performance Summary:")
120
+ print(f" Crossed Obstacle: {'βœ…' if success_criteria['crossed_obstacle'] else '❌'}")
121
+ print(f" Remained Stable: {'βœ…' if success_criteria['remained_stable'] else '❌'}")
122
+ print(f" Minimal Collision: {'βœ…' if success_criteria['minimal_collision'] else '❌'}")
123
+ print(f" Overall Success: {'βœ…' if success_criteria['overall_success'] else '❌'}")
124
+
125
+ print("\n")
126
+
127
+ async def demo_drone_design():
128
+ """Demo: Design and simulate a drone"""
129
+ print("🚁 DEMO 4: Drone Design and Simulation")
130
+ print("=" * 60)
131
+
132
+ client = MCPDemoClient()
133
+
134
+ # Design a drone
135
+ drone_specs = {
136
+ "propeller_size": "medium",
137
+ "flight_height_cm": 25,
138
+ "stability_mode": "auto_hover",
139
+ "main_material": "light_carbon_fiber"
140
+ }
141
+
142
+ design_result = await client.call_tool("design_vehicle", {
143
+ "vehicle_type": "drone",
144
+ "specifications": drone_specs,
145
+ "design_reasoning": "Medium propellers for balanced thrust, 25cm flight height to safely clear obstacle, auto-hover for stability, carbon fiber for lightweight performance"
146
+ })
147
+
148
+ print("βœ… Drone Design Result:")
149
+ print(json.dumps(design_result, indent=2))
150
+ print()
151
+
152
+ if design_result.get("success"):
153
+ # Simulate the drone
154
+ print("πŸ›Έ Running drone simulation...")
155
+ sim_result = await client.call_tool("simulate_vehicle", {
156
+ "vehicle_type": "drone",
157
+ "specifications": drone_specs,
158
+ "simulation_duration": 5.0
159
+ })
160
+
161
+ print("βœ… Simulation Result:")
162
+ print(json.dumps(sim_result, indent=2))
163
+ print()
164
+
165
+ if sim_result.get("success"):
166
+ # Evaluate performance
167
+ print("πŸ“Š Evaluating drone performance...")
168
+ eval_result = await client.call_tool("evaluate_performance", {
169
+ "simulation_feedback": sim_result["simulation_feedback"]
170
+ })
171
+
172
+ print("βœ… Evaluation Result:")
173
+ print(json.dumps(eval_result, indent=2))
174
+
175
+ if eval_result.get("success"):
176
+ success_criteria = eval_result["success_criteria"]
177
+ print(f"\n🎯 Performance Summary:")
178
+ print(f" Crossed Obstacle: {'βœ…' if success_criteria['crossed_obstacle'] else '❌'}")
179
+ print(f" Remained Stable: {'βœ…' if success_criteria['remained_stable'] else '❌'}")
180
+ print(f" Minimal Collision: {'βœ…' if success_criteria['minimal_collision'] else '❌'}")
181
+ print(f" Overall Success: {'βœ…' if success_criteria['overall_success'] else '❌'}")
182
+
183
+ print("\n")
184
+
185
+ async def demo_iterative_design():
186
+ """Demo: Complete iterative design process"""
187
+ print("πŸ”„ DEMO 5: Complete Iterative Design Process")
188
+ print("=" * 60)
189
+
190
+ client = MCPDemoClient()
191
+
192
+ # Run iterative design process for a robot
193
+ print("πŸ€– Running iterative robot design process...")
194
+ iterative_result = await client.call_tool("iterative_design_process", {
195
+ "vehicle_type": "robot",
196
+ "task_description": "Design a robot that can efficiently cross the 5cm obstacle",
197
+ "max_iterations": 3
198
+ })
199
+
200
+ print("βœ… Iterative Design Result:")
201
+ print(json.dumps(iterative_result, indent=2))
202
+
203
+ if iterative_result.get("success"):
204
+ print(f"\nπŸŽ‰ SUCCESS! Robot design completed in {iterative_result['iterations_needed']} iterations")
205
+ print(f"πŸ”§ Final Design: {iterative_result['successful_design']}")
206
+ else:
207
+ print(f"\n❌ Design process did not achieve success within max iterations")
208
+ if "best_attempt" in iterative_result:
209
+ print(f"πŸ”§ Best Attempt: {iterative_result['best_attempt'].get('specifications', {})}")
210
+
211
+ print("\n")
212
+
213
+ async def demo_comparison():
214
+ """Demo: Compare robot vs drone approaches"""
215
+ print("βš”οΈ DEMO 6: Robot vs Drone Comparison")
216
+ print("=" * 60)
217
+
218
+ client = MCPDemoClient()
219
+
220
+ # Design specifications for comparison
221
+ robot_specs = {
222
+ "wheel_type": "tracked_base",
223
+ "body_clearance_cm": 8,
224
+ "approach_sensor_enabled": True,
225
+ "main_material": "sturdy_metal_alloy"
226
+ }
227
+
228
+ drone_specs = {
229
+ "propeller_size": "large_stable",
230
+ "flight_height_cm": 20,
231
+ "stability_mode": "auto_hover",
232
+ "main_material": "sturdy_aluminum"
233
+ }
234
+
235
+ print("πŸ€– Testing Robust Robot Design...")
236
+ robot_sim = await client.call_tool("simulate_vehicle", {
237
+ "vehicle_type": "robot",
238
+ "specifications": robot_specs,
239
+ "simulation_duration": 8.0
240
+ })
241
+
242
+ print("🚁 Testing Stable Drone Design...")
243
+ drone_sim = await client.call_tool("simulate_vehicle", {
244
+ "vehicle_type": "drone",
245
+ "specifications": drone_specs,
246
+ "simulation_duration": 8.0
247
+ })
248
+
249
+ # Evaluate both
250
+ if robot_sim.get("success"):
251
+ robot_eval = await client.call_tool("evaluate_performance", {
252
+ "simulation_feedback": robot_sim["simulation_feedback"]
253
+ })
254
+
255
+ print("\nπŸ€– Robot Performance:")
256
+ if robot_eval.get("success"):
257
+ robot_success = robot_eval["success_criteria"]["overall_success"]
258
+ print(f" Overall Success: {'βœ…' if robot_success else '❌'}")
259
+ print(f" Final Position: {robot_eval['evaluation_results'].get('final_robot_x_position', 0):.2f}m")
260
+
261
+ if drone_sim.get("success"):
262
+ drone_eval = await client.call_tool("evaluate_performance", {
263
+ "simulation_feedback": drone_sim["simulation_feedback"]
264
+ })
265
+
266
+ print("\n🚁 Drone Performance:")
267
+ if drone_eval.get("success"):
268
+ drone_success = drone_eval["success_criteria"]["overall_success"]
269
+ print(f" Overall Success: {'βœ…' if drone_success else 'οΏ½οΏ½οΏ½'}")
270
+ print(f" Final Position: {drone_eval['evaluation_results'].get('final_robot_x_position', 0):.2f}m")
271
+
272
+ print("\n")
273
+
274
+ async def main():
275
+ """Run all demo scenarios"""
276
+ print("πŸš€ MCP Vehicle Design System Demo")
277
+ print("=" * 80)
278
+ print("This demo showcases the Model Context Protocol integration")
279
+ print("for our LLM-Agent-Designed Vehicle System.")
280
+ print("=" * 80)
281
+ print()
282
+
283
+ try:
284
+ # Run all demos
285
+ await demo_obstacle_info()
286
+ await demo_design_parameters()
287
+ await demo_robot_design()
288
+ await demo_drone_design()
289
+ await demo_iterative_design()
290
+ await demo_comparison()
291
+
292
+ print("🎊 Demo completed successfully!")
293
+ print("\n" + "=" * 80)
294
+ print("πŸ’‘ MCP Integration Benefits Demonstrated:")
295
+ print(" βœ… Standardized tool access for vehicle design")
296
+ print(" βœ… Support for both robots and drones")
297
+ print(" βœ… Complete design-simulate-evaluate loop")
298
+ print(" βœ… Iterative optimization with LLM feedback")
299
+ print(" βœ… Performance evaluation and comparison")
300
+ print(" βœ… Type-safe parameter validation")
301
+ print("=" * 80)
302
+
303
+ except Exception as e:
304
+ print(f"❌ Demo failed with error: {e}")
305
+ print("\nNote: This demo requires the enhanced simulation environment.")
306
+ print("Make sure all dependencies are installed and PyBullet is working.")
307
+
308
+ def run_cli_demo():
309
+ """CLI version that can be run without MCP client"""
310
+ print("πŸ”§ MCP CLI Demo")
311
+ print("=" * 50)
312
+ print("This demonstrates the MCP tools available in our system:")
313
+ print()
314
+
315
+ # Show available tools
316
+ tools = [
317
+ "get_obstacle_info() - Get obstacle specifications",
318
+ "get_design_parameters(vehicle_type) - Get available parameters",
319
+ "design_vehicle(vehicle_type, specs, reasoning) - Design validation",
320
+ "simulate_vehicle(vehicle_type, specs, duration) - Run simulation",
321
+ "evaluate_performance(feedback) - Evaluate performance",
322
+ "iterative_design_process(vehicle_type, task, max_iter) - Complete process"
323
+ ]
324
+
325
+ print("πŸ“‹ Available MCP Tools:")
326
+ for i, tool in enumerate(tools, 1):
327
+ print(f" {i}. {tool}")
328
+
329
+ print()
330
+ print("🎯 Example Usage:")
331
+ print(" # Get obstacle info")
332
+ print(" mcp call get_obstacle_info")
333
+ print()
334
+ print(" # Design a robot")
335
+ print(" mcp call design_vehicle \\")
336
+ print(" --vehicle_type robot \\")
337
+ print(" --specifications '{\"wheel_type\": \"large_smooth\", \"body_clearance_cm\": 7}' \\")
338
+ print(" --design_reasoning 'Balanced design for obstacle crossing'")
339
+ print()
340
+ print(" # Run complete design process")
341
+ print(" mcp call iterative_design_process \\")
342
+ print(" --vehicle_type drone \\")
343
+ print(" --max_iterations 3")
344
+ print()
345
+ print("πŸš€ To use with real MCP client:")
346
+ print(" 1. Start server: python mcp_server.py")
347
+ print(" 2. Connect client to our MCP server")
348
+ print(" 3. Use the tools listed above")
349
+
350
+ if __name__ == "__main__":
351
+ import sys
352
+
353
+ if len(sys.argv) > 1 and sys.argv[1] == "--cli":
354
+ run_cli_demo()
355
+ else:
356
+ print("Starting full MCP demo...")
357
+ asyncio.run(main())
main_orchestrator_enhanced.py ADDED
@@ -0,0 +1,702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import ssl
3
+ import time
4
+ import imageio
5
+ import numpy as np
6
+ from PIL import Image
7
+ import json
8
+ from datetime import datetime
9
+ import simulation_env_enhanced as simulation_env
10
+ import llm_interface_enhanced as llm_interface
11
+ import evaluation
12
+
13
+ # SSL workaround for Gradio issues
14
+ try:
15
+ import certifi
16
+ os.environ['SSL_CERT_FILE'] = certifi.where()
17
+ except ImportError:
18
+ pass
19
+
20
+ # Try to disable SSL verification as a workaround
21
+ try:
22
+ ssl._create_default_https_context = ssl._create_unverified_context
23
+ except AttributeError:
24
+ pass
25
+
26
+ # Try to import Gradio with error handling
27
+ GRADIO_AVAILABLE = False
28
+ try:
29
+ import gradio as gr
30
+ GRADIO_AVAILABLE = True
31
+ print("βœ“ Gradio imported successfully")
32
+ except Exception as e:
33
+ print(f"⚠ Gradio import failed: {e}")
34
+ print("Will use console-based interface instead")
35
+ GRADIO_AVAILABLE = False
36
+
37
+ # Global configuration
38
+ MAX_ITERATIONS = 5
39
+ SIMULATION_DURATION_SEC = 10
40
+ OBSTACLE_FAR_EDGE_X = 0.8
41
+
42
+ def run_design_and_simulation_iteration(llm_prompt_func, previous_attempt_data, current_iteration, vehicle_type="robot"):
43
+ """Run one complete design and simulation iteration for robot or drone"""
44
+
45
+ try:
46
+ # Call LLM for vehicle design
47
+ prompt = llm_prompt_func()
48
+ llm_response_dict = llm_interface.call_llm_api(prompt)
49
+
50
+ if not llm_response_dict:
51
+ raise Exception("Failed to get valid LLM response")
52
+
53
+ # Extract vehicle specs and validate
54
+ vehicle_specs = llm_response_dict.get('robot_specs', {})
55
+ vehicle_specs["vehicle_type"] = vehicle_type # Add vehicle type
56
+ design_reasoning = llm_response_dict.get('design_reasoning', 'No reasoning provided')
57
+
58
+ # Setup PyBullet environment
59
+ obstacle_id, plane_id = simulation_env.setup_pybullet_environment()
60
+
61
+ # Create vehicle (robot or drone)
62
+ if vehicle_type == "robot":
63
+ vehicle_id, joint_indices, v_type = simulation_env.create_robot(vehicle_specs)
64
+ vehicle_props = None
65
+ elif vehicle_type == "drone":
66
+ vehicle_id, joint_indices, v_type, vehicle_props = simulation_env.create_drone(vehicle_specs)
67
+ else:
68
+ raise ValueError(f"Unknown vehicle type: {vehicle_type}")
69
+
70
+ # Run simulation loop
71
+ current_sim_time = 0
72
+ frames_for_gif = []
73
+ final_pybullet_feedback = {}
74
+ start_time = time.time()
75
+
76
+ # Simulation loop
77
+ simulation_steps = int(SIMULATION_DURATION_SEC * 240)
78
+ for step in range(simulation_steps):
79
+ # Run simulation step
80
+ simulation_env.run_simulation_step(
81
+ vehicle_id, joint_indices, {}, vehicle_type,
82
+ vehicle_props if vehicle_type == "drone" else None
83
+ )
84
+
85
+ current_sim_time = time.time() - start_time
86
+
87
+ # Capture frame every 24 steps (10 fps for GIF)
88
+ if step % 24 == 0:
89
+ try:
90
+ frame = simulation_env.capture_frame()
91
+ frames_for_gif.append(frame)
92
+ except:
93
+ pass # Skip frame capture if it fails
94
+
95
+ # Get current feedback
96
+ pybullet_feedback_snapshot = simulation_env.get_simulation_feedback(
97
+ vehicle_id, obstacle_id, start_time, current_sim_time, vehicle_type
98
+ )
99
+ final_pybullet_feedback = pybullet_feedback_snapshot
100
+
101
+ # Check for early exit conditions
102
+ vehicle_x_pos = pybullet_feedback_snapshot['robot_position'][0] # Using robot_position for compatibility
103
+ is_stable = pybullet_feedback_snapshot['is_robot_upright'] # Using is_robot_upright for compatibility
104
+
105
+ # Exit if vehicle crossed well past obstacle or became unstable
106
+ if vehicle_x_pos > OBSTACLE_FAR_EDGE_X + 0.1 or not is_stable:
107
+ break
108
+
109
+ # Exit if simulation time exceeded
110
+ if current_sim_time > SIMULATION_DURATION_SEC:
111
+ break
112
+
113
+ # Evaluate results
114
+ evaluation_results = evaluation.evaluate_simulation_outcome(
115
+ final_pybullet_feedback, OBSTACLE_FAR_EDGE_X
116
+ )
117
+ llm_feedback_string = evaluation.format_feedback_for_llm(evaluation_results)
118
+
119
+ # Cleanup PyBullet
120
+ simulation_env.reset_simulation()
121
+
122
+ return llm_response_dict, evaluation_results, llm_feedback_string, frames_for_gif
123
+
124
+ except Exception as e:
125
+ print(f"Error in simulation iteration: {e}")
126
+ # Cleanup on error
127
+ try:
128
+ simulation_env.reset_simulation()
129
+ except:
130
+ pass
131
+
132
+ # Return error response
133
+ error_response = {
134
+ 'robot_design_iteration': current_iteration,
135
+ 'design_reasoning': f'Error occurred: {str(e)}',
136
+ 'robot_specs': {}
137
+ }
138
+ error_evaluation = {
139
+ 'robot_crossed_obstacle': False,
140
+ 'no_significant_collision_with_obstacle_during_pass': False,
141
+ 'robot_remains_upright': False,
142
+ 'overall_success': False,
143
+ 'specific_failure_point': 'simulation_error',
144
+ 'final_robot_x_position': 0.0
145
+ }
146
+ error_feedback = f"Simulation failed with error: {str(e)}"
147
+
148
+ return error_response, error_evaluation, error_feedback, []
149
+
150
+ def create_gif_from_frames(frames, filename):
151
+ """Create GIF from simulation frames"""
152
+ try:
153
+ # Create outputs directory if it doesn't exist
154
+ outputs_dir = "outputs"
155
+ if not os.path.exists(outputs_dir):
156
+ os.makedirs(outputs_dir)
157
+
158
+ gif_path = os.path.join(outputs_dir, filename)
159
+
160
+ # Convert PIL images to numpy arrays
161
+ frame_arrays = []
162
+ for frame in frames:
163
+ if isinstance(frame, Image.Image):
164
+ frame_arrays.append(np.array(frame))
165
+ else:
166
+ frame_arrays.append(frame)
167
+
168
+ # Create GIF
169
+ if frame_arrays:
170
+ imageio.mimsave(gif_path, frame_arrays, fps=10, loop=0)
171
+ return gif_path
172
+ else:
173
+ return None
174
+
175
+ except Exception as e:
176
+ print(f"Error creating GIF: {e}")
177
+ return None
178
+
179
+ def find_best_attempt(all_attempts_data):
180
+ """Find the best attempt from all attempts"""
181
+ if not all_attempts_data:
182
+ return None
183
+
184
+ # Sort by final position reached
185
+ best_attempt = max(all_attempts_data,
186
+ key=lambda x: x['evaluation_results']['final_robot_x_position'])
187
+ return best_attempt
188
+
189
+ def design_vehicle_for_obstacle(vehicle_type, user_task_description):
190
+ """Main function for Gradio interface - design vehicle iteratively"""
191
+
192
+ # Initialize
193
+ iteration = 1
194
+ all_attempts_data = []
195
+ overall_process_log = []
196
+
197
+ vehicle_name = "robot" if vehicle_type == "robot" else "drone"
198
+ overall_process_log.append(f"Starting {vehicle_name} design process for task: {user_task_description}")
199
+ overall_process_log.append(f"Target: Cross 5cm high obstacle at x=0.75m")
200
+ overall_process_log.append(f"Success criteria: Cross obstacle (x>0.8m), stay upright/stable, minimal collision")
201
+ overall_process_log.append("")
202
+
203
+ # Main iteration loop
204
+ while iteration <= MAX_ITERATIONS:
205
+ overall_process_log.append(f"--- Iteration {iteration} ---")
206
+
207
+ # Yield current progress
208
+ yield "\n".join(overall_process_log), None, None
209
+
210
+ try:
211
+ # Determine prompt function
212
+ if iteration == 1:
213
+ if vehicle_type == "robot":
214
+ prompt_func = lambda: llm_interface.generate_initial_robot_design_prompt()
215
+ else: # drone
216
+ prompt_func = lambda: llm_interface.generate_initial_drone_design_prompt()
217
+ prev_attempt = None
218
+ else:
219
+ last_attempt = all_attempts_data[-1]
220
+ if vehicle_type == "robot":
221
+ prompt_func = lambda: llm_interface.generate_iterative_robot_design_prompt(
222
+ last_attempt, iteration
223
+ )
224
+ else: # drone
225
+ prompt_func = lambda: llm_interface.generate_iterative_drone_design_prompt(
226
+ last_attempt, iteration
227
+ )
228
+ prev_attempt = last_attempt
229
+
230
+ # Run design and simulation iteration
231
+ llm_design, eval_results, feedback_str, frames = run_design_and_simulation_iteration(
232
+ prompt_func, prev_attempt, iteration, vehicle_type
233
+ )
234
+
235
+ # Store attempt data
236
+ current_attempt_data = {
237
+ "llm_design": llm_design,
238
+ "robot_specs": llm_design.get('robot_specs', {}),
239
+ "design_reasoning": llm_design.get('design_reasoning', ''),
240
+ "evaluation_results": eval_results,
241
+ "feedback_from_simulation": feedback_str
242
+ }
243
+ all_attempts_data.append(current_attempt_data)
244
+
245
+ # Update log
246
+ overall_process_log.append(f"LLM Design ({iteration}): {llm_design.get('robot_specs')}")
247
+ overall_process_log.append(f"Design Reasoning: {llm_design.get('design_reasoning', 'N/A')}")
248
+ overall_process_log.append(f"Simulation Feedback: {feedback_str}")
249
+ overall_process_log.append("")
250
+
251
+ # Create GIF from frames
252
+ gif_path = None
253
+ if frames:
254
+ gif_filename = f"{vehicle_name}_iteration_{iteration}.gif"
255
+ gif_path = create_gif_from_frames(frames, gif_filename)
256
+
257
+ # Check for success
258
+ if eval_results.get('overall_success', False):
259
+ overall_process_log.append(f"πŸŽ‰ SUCCESS! {vehicle_name.title()} passed the obstacle in {iteration} iterations.")
260
+ overall_process_log.append(f"Final {vehicle_name} specs: {llm_design.get('robot_specs')}")
261
+
262
+ # Return final results
263
+ final_log = "\n".join(overall_process_log)
264
+ final_specs = llm_design.get('robot_specs', {})
265
+
266
+ yield final_log, gif_path, final_specs
267
+ return
268
+
269
+ # Yield current progress with GIF
270
+ yield "\n".join(overall_process_log), gif_path, llm_design.get('robot_specs', {})
271
+
272
+ except Exception as e:
273
+ overall_process_log.append(f"Error in iteration {iteration}: {str(e)}")
274
+ overall_process_log.append("")
275
+
276
+ iteration += 1
277
+
278
+ # Max iterations reached without success
279
+ overall_process_log.append(f"❌ FAILURE: Max {MAX_ITERATIONS} iterations reached. Obstacle not passed.")
280
+
281
+ # Find best attempt
282
+ best_attempt = find_best_attempt(all_attempts_data)
283
+ best_specs = best_attempt['robot_specs'] if best_attempt else "No valid designs generated"
284
+
285
+ if best_attempt:
286
+ overall_process_log.append(f"Best attempt reached x={best_attempt['evaluation_results']['final_robot_x_position']:.2f}m")
287
+ overall_process_log.append(f"Best {vehicle_name} specs: {best_specs}")
288
+
289
+ # Create final GIF from last attempt
290
+ final_gif_path = None
291
+ if all_attempts_data and 'frames' in locals():
292
+ final_gif_path = create_gif_from_frames(frames, f"final_{vehicle_name}_attempt.gif")
293
+
294
+ final_log = "\n".join(overall_process_log)
295
+ yield final_log, final_gif_path, best_specs
296
+
297
+ def console_design_vehicle_for_obstacle(vehicle_type, user_task_description):
298
+ """Console-based version of the vehicle design system"""
299
+
300
+ vehicle_name = "robot" if vehicle_type == "robot" else "drone"
301
+
302
+ print("=" * 60)
303
+ print(f"LLM {vehicle_name.title()} Design System - Console Interface")
304
+ print("=" * 60)
305
+ print(f"Task: {user_task_description}")
306
+ print(f"Target: Cross 5cm high obstacle at x=0.75m")
307
+ print(f"Success criteria: Cross obstacle (x>0.8m), stay upright/stable, minimal collision")
308
+ print("")
309
+
310
+ # Initialize best design tracking
311
+ iteration = 1
312
+ all_attempts_data = []
313
+ best_attempt_data = None
314
+ best_iteration = 0
315
+
316
+ # Main iteration loop
317
+ while iteration <= MAX_ITERATIONS:
318
+ print(f"--- Iteration {iteration} ---")
319
+
320
+ try:
321
+ # Determine prompt function
322
+ if iteration == 1:
323
+ if vehicle_type == "robot":
324
+ prompt_func = lambda: llm_interface.generate_initial_robot_design_prompt()
325
+ else: # drone
326
+ prompt_func = lambda: llm_interface.generate_initial_drone_design_prompt()
327
+ prev_attempt = None
328
+ else:
329
+ last_attempt = all_attempts_data[-1]
330
+ if vehicle_type == "robot":
331
+ prompt_func = lambda: llm_interface.generate_iterative_robot_design_prompt(
332
+ last_attempt, iteration
333
+ )
334
+ else: # drone
335
+ prompt_func = lambda: llm_interface.generate_iterative_drone_design_prompt(
336
+ last_attempt, iteration
337
+ )
338
+ prev_attempt = last_attempt
339
+
340
+ # Run design and simulation iteration
341
+ llm_design, eval_results, feedback_str, frames = run_design_and_simulation_iteration(
342
+ prompt_func, prev_attempt, iteration, vehicle_type
343
+ )
344
+
345
+ # Store attempt data
346
+ current_attempt_data = {
347
+ "llm_design": llm_design,
348
+ "robot_specs": llm_design.get('robot_specs', {}),
349
+ "design_reasoning": llm_design.get('design_reasoning', ''),
350
+ "evaluation_results": eval_results,
351
+ "feedback_from_simulation": feedback_str,
352
+ "iteration": iteration
353
+ }
354
+ all_attempts_data.append(current_attempt_data)
355
+
356
+ # Check if this is the best design so far
357
+ if best_attempt_data is None or is_current_better(
358
+ eval_results, iteration,
359
+ best_attempt_data['evaluation_results'], best_iteration
360
+ ):
361
+ best_attempt_data = current_attempt_data
362
+ best_iteration = iteration
363
+ print(f"πŸ† New best design found in iteration {iteration}!")
364
+
365
+ # Display results
366
+ print(f"LLM Design ({iteration}): {llm_design.get('robot_specs')}")
367
+ print(f"Design Reasoning: {llm_design.get('design_reasoning', 'N/A')}")
368
+ print(f"Simulation Feedback: {feedback_str}")
369
+ print("")
370
+
371
+ # Create GIF from frames
372
+ if frames:
373
+ gif_filename = f"{vehicle_name}_iteration_{iteration}.gif"
374
+ gif_path = create_gif_from_frames(frames, gif_filename)
375
+ if gif_path:
376
+ print(f"Simulation GIF saved: {gif_path}")
377
+
378
+ # Check for success
379
+ if eval_results.get('overall_success', False):
380
+ print(f"πŸŽ‰ SUCCESS! {vehicle_name.title()} passed the obstacle in {iteration} iterations.")
381
+ print(f"Final {vehicle_name} specs: {llm_design.get('robot_specs')}")
382
+
383
+ # Save best design JSON
384
+ json_file = save_best_design_json(best_attempt_data, vehicle_type, iteration)
385
+
386
+ return True, llm_design.get('robot_specs')
387
+
388
+ except Exception as e:
389
+ print(f"Error in iteration {iteration}: {str(e)}")
390
+
391
+ iteration += 1
392
+
393
+ # Max iterations reached without success
394
+ print(f"❌ FAILURE: Max {MAX_ITERATIONS} iterations reached. Obstacle not passed.")
395
+
396
+ # Display best design summary
397
+ if best_attempt_data:
398
+ print(f"\nπŸ† BEST DESIGN SUMMARY (from iteration {best_iteration}):")
399
+ print(f" β€’ Final Position: {best_attempt_data['evaluation_results']['final_robot_x_position']:.3f}m")
400
+ print(f" β€’ Success: {best_attempt_data['evaluation_results']['overall_success']}")
401
+
402
+ best_specs = best_attempt_data['robot_specs']
403
+ if vehicle_type == "robot":
404
+ print(f" β€’ Wheel Type: {best_specs.get('wheel_type', 'unknown')}")
405
+ print(f" β€’ Body Clearance: {best_specs.get('body_clearance_cm', 0)}cm")
406
+ print(f" β€’ Material: {best_specs.get('main_material', 'unknown')}")
407
+ else: # drone
408
+ print(f" β€’ Propeller Size: {best_specs.get('propeller_size', 'unknown')}")
409
+ print(f" β€’ Flight Height: {best_specs.get('flight_height_cm', 0)}cm")
410
+ print(f" β€’ Material: {best_specs.get('main_material', 'unknown')}")
411
+
412
+ print(f" β€’ Design Reasoning: {best_attempt_data['design_reasoning']}")
413
+
414
+ # Save best design JSON
415
+ json_file = save_best_design_json(best_attempt_data, vehicle_type, MAX_ITERATIONS)
416
+
417
+ return False, best_specs
418
+ else:
419
+ print("No valid designs generated")
420
+ return False, "No valid designs generated"
421
+
422
+ # Gradio Interface Setup
423
+ def create_gradio_interface():
424
+ """Create and configure Gradio interface with vehicle type selection"""
425
+
426
+ with gr.Blocks(title="LLM Vehicle Designer - Robots & Drones", theme=gr.themes.Soft()) as iface:
427
+ gr.Markdown("# πŸ€–πŸš LLM-Agent-Designed Obstacle-Passing Vehicle System")
428
+ gr.Markdown("This system uses an LLM agent to iteratively design **robots** or **drones** that can pass a 5cm high obstacle.")
429
+
430
+ with gr.Row():
431
+ with gr.Column(scale=2):
432
+ vehicle_type = gr.Radio(
433
+ choices=["robot", "drone"],
434
+ label="Vehicle Type",
435
+ value="robot",
436
+ info="Choose between a ground robot or flying drone"
437
+ )
438
+
439
+ task_input = gr.Textbox(
440
+ label="Task Description",
441
+ value="Design a vehicle that can pass the 5cm high obstacle",
442
+ placeholder="Describe what you want the vehicle to accomplish...",
443
+ lines=2
444
+ )
445
+
446
+ submit_btn = gr.Button("πŸš€ Start Vehicle Design Process", variant="primary", size="lg")
447
+
448
+ with gr.Column(scale=1):
449
+ gr.Markdown("### Process Info")
450
+ gr.Markdown("- **Obstacle**: 5cm high, 50cm wide, 10cm deep")
451
+ gr.Markdown("- **Success**: Vehicle crosses (x > 0.8m), stays stable")
452
+ gr.Markdown("- **Max Iterations**: 5")
453
+ gr.Markdown("- **Robot**: Wheels, clearance, materials")
454
+ gr.Markdown("- **Drone**: Propellers, flight height, materials")
455
+
456
+ with gr.Row():
457
+ with gr.Column(scale=2):
458
+ process_log = gr.Textbox(
459
+ label="πŸ”„ Design Process Log",
460
+ lines=20,
461
+ max_lines=30,
462
+ show_copy_button=True
463
+ )
464
+
465
+ with gr.Column(scale=1):
466
+ simulation_gif = gr.Image(
467
+ label="🎬 Simulation Visualization",
468
+ type="filepath"
469
+ )
470
+
471
+ vehicle_specs = gr.JSON(
472
+ label="πŸ”§ Final Vehicle Specifications"
473
+ )
474
+
475
+ # Set up the interface interaction
476
+ submit_btn.click(
477
+ fn=design_vehicle_for_obstacle,
478
+ inputs=[vehicle_type, task_input],
479
+ outputs=[process_log, simulation_gif, vehicle_specs]
480
+ )
481
+
482
+ gr.Markdown("---")
483
+ gr.Markdown("### How it works:")
484
+ gr.Markdown("1. **Vehicle Selection**: Choose robot (ground) or drone (flying)")
485
+ gr.Markdown("2. **LLM Design**: AI agent proposes vehicle parameters")
486
+ gr.Markdown("3. **Simulation**: Vehicle tested in PyBullet physics")
487
+ gr.Markdown("4. **Evaluation**: Performance measured against success criteria")
488
+ gr.Markdown("5. **Iteration**: Feedback used to improve design")
489
+ gr.Markdown("6. **Success/Failure**: Process continues until success or max iterations")
490
+
491
+ return iface
492
+
493
+ def is_current_better(current_eval, current_iteration, best_eval, best_iteration):
494
+ """
495
+ Determine if current design is better than best design using priority criteria:
496
+ 1. Priority 1: Any design achieving overall_success == True
497
+ 2. Priority 2: Among successful designs, fewer iterations preferred
498
+ 3. Priority 3: Designs that crossed obstacle (even if failed other criteria)
499
+ 4. Priority 4: Design with furthest final_robot_x_position
500
+ 5. Priority 5: If tied, use last design
501
+ """
502
+
503
+ # Priority 1: Success vs non-success
504
+ current_success = current_eval.get('overall_success', False)
505
+ best_success = best_eval.get('overall_success', False)
506
+
507
+ if current_success and not best_success:
508
+ return True
509
+ elif best_success and not current_success:
510
+ return False
511
+ elif current_success and best_success:
512
+ # Priority 2: Among successful, prefer fewer iterations
513
+ return current_iteration < best_iteration
514
+
515
+ # Priority 3: Obstacle crossing (even if overall failed)
516
+ current_crossed = current_eval.get('robot_crossed_obstacle', False)
517
+ best_crossed = best_eval.get('robot_crossed_obstacle', False)
518
+
519
+ if current_crossed and not best_crossed:
520
+ return True
521
+ elif best_crossed and not current_crossed:
522
+ return False
523
+
524
+ # Priority 4: Furthest distance
525
+ current_distance = current_eval.get('final_robot_x_position', 0.0)
526
+ best_distance = best_eval.get('final_robot_x_position', 0.0)
527
+
528
+ if current_distance > best_distance:
529
+ return True
530
+ elif current_distance < best_distance:
531
+ return False
532
+
533
+ # Priority 5: If tied, use last design (current)
534
+ return True
535
+
536
+ def format_best_design_performance(eval_results):
537
+ """Format evaluation results for user-friendly display"""
538
+
539
+ success_status = "βœ… SUCCESS" if eval_results.get('overall_success', False) else "❌ FAILED"
540
+
541
+ performance_text = f"""
542
+ {success_status}
543
+
544
+ πŸ“Š **Performance Metrics:**
545
+ β€’ Final Position: {eval_results.get('final_robot_x_position', 0.0):.3f}m
546
+ β€’ Crossed Obstacle: {'βœ… Yes' if eval_results.get('robot_crossed_obstacle', False) else '❌ No'}
547
+ β€’ Remained Upright: {'βœ… Yes' if eval_results.get('robot_remains_upright', False) else '❌ No'}
548
+ β€’ Clean Pass: {'βœ… Yes' if eval_results.get('no_significant_collision_with_obstacle_during_pass', False) else '❌ No'}
549
+
550
+ 🎯 **Success Criteria:**
551
+ β€’ Target: Cross obstacle (reach x > 0.8m)
552
+ β€’ Current: {eval_results.get('final_robot_x_position', 0.0):.3f}m
553
+ β€’ Status: {'ACHIEVED' if eval_results.get('final_robot_x_position', 0.0) > 0.8 else 'NOT ACHIEVED'}
554
+
555
+ ⚠️ **Failure Analysis:**
556
+ {eval_results.get('specific_failure_point', 'No specific failure identified')}
557
+ """
558
+
559
+ return performance_text.strip()
560
+
561
+ def save_best_design_json(best_attempt_data, vehicle_type, iteration_count):
562
+ """Save the best design to a comprehensive JSON file"""
563
+
564
+ if not best_attempt_data:
565
+ return None
566
+
567
+ # Create timestamp
568
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
569
+
570
+ # Determine vehicle specifications key
571
+ if vehicle_type == "robot":
572
+ specs_key = "robot_specs"
573
+ json_filename = f"best_robot_design_{timestamp}.json"
574
+ else:
575
+ specs_key = "robot_specs" # Using same key for compatibility
576
+ json_filename = f"best_drone_design_{timestamp}.json"
577
+
578
+ # Extract specifications
579
+ vehicle_specs = best_attempt_data.get(specs_key, {})
580
+
581
+ # Create comprehensive JSON structure
582
+ json_data = {
583
+ "timestamp": datetime.now().isoformat(),
584
+ "vehicle_type": vehicle_type,
585
+ "total_iterations": iteration_count,
586
+ "design_process_summary": {
587
+ "success": best_attempt_data['evaluation_results'].get('overall_success', False),
588
+ "best_iteration": "Final attempt", # Could be enhanced to track actual best iteration
589
+ "total_attempts": iteration_count
590
+ }
591
+ }
592
+
593
+ # Add vehicle-specific specifications
594
+ if vehicle_type == "robot":
595
+ json_data["robot_specifications"] = {
596
+ "wheel_type": vehicle_specs.get('wheel_type', 'unknown'),
597
+ "body_clearance_cm": vehicle_specs.get('body_clearance_cm', 0),
598
+ "approach_sensor_enabled": vehicle_specs.get('approach_sensor_enabled', False),
599
+ "main_material": vehicle_specs.get('main_material', 'unknown'),
600
+ "vehicle_type": vehicle_specs.get('vehicle_type', 'robot')
601
+ }
602
+ else: # drone
603
+ json_data["drone_specifications"] = {
604
+ "propeller_size": vehicle_specs.get('propeller_size', 'unknown'),
605
+ "flight_height_cm": vehicle_specs.get('flight_height_cm', 0),
606
+ "stability_mode": vehicle_specs.get('stability_mode', 'unknown'),
607
+ "main_material": vehicle_specs.get('main_material', 'unknown'),
608
+ "vehicle_type": vehicle_specs.get('vehicle_type', 'drone')
609
+ }
610
+
611
+ # Add design reasoning
612
+ json_data["design_reasoning"] = best_attempt_data.get('design_reasoning', 'No reasoning provided')
613
+
614
+ # Add performance results
615
+ eval_results = best_attempt_data['evaluation_results']
616
+ json_data["performance_results"] = {
617
+ "overall_success": eval_results.get('overall_success', False),
618
+ "robot_crossed_obstacle": eval_results.get('robot_crossed_obstacle', False),
619
+ "robot_remains_upright": eval_results.get('robot_remains_upright', False),
620
+ "no_significant_collision_with_obstacle_during_pass": eval_results.get('no_significant_collision_with_obstacle_during_pass', False),
621
+ "final_robot_x_position": eval_results.get('final_robot_x_position', 0.0),
622
+ "specific_failure_point": eval_results.get('specific_failure_point', 'none')
623
+ }
624
+
625
+ # Add success criteria breakdown
626
+ json_data["success_criteria_analysis"] = {
627
+ "target_distance": 0.8,
628
+ "achieved_distance": eval_results.get('final_robot_x_position', 0.0),
629
+ "distance_success": eval_results.get('final_robot_x_position', 0.0) > 0.8,
630
+ "stability_success": eval_results.get('robot_remains_upright', False),
631
+ "collision_success": eval_results.get('no_significant_collision_with_obstacle_during_pass', False),
632
+ "overall_success": eval_results.get('overall_success', False)
633
+ }
634
+
635
+ # Add performance summary
636
+ json_data["performance_summary"] = {
637
+ "distance_traveled": f"{eval_results.get('final_robot_x_position', 0.0):.3f}m",
638
+ "success_rate": "100%" if eval_results.get('overall_success', False) else "0%",
639
+ "primary_failure": eval_results.get('specific_failure_point', 'unknown') if not eval_results.get('overall_success', False) else None
640
+ }
641
+
642
+ # Save JSON file
643
+ try:
644
+ with open(json_filename, 'w', encoding='utf-8') as f:
645
+ json.dump(json_data, f, indent=2, ensure_ascii=False)
646
+
647
+ print(f"πŸ“„ Best {vehicle_type} design saved to: {json_filename}")
648
+ return json_filename
649
+
650
+ except Exception as e:
651
+ print(f"❌ Error saving JSON file: {e}")
652
+ return None
653
+
654
+ if __name__ == "__main__":
655
+ print("πŸ€–πŸš LLM-Agent-Designed Vehicle System (Robots & Drones)")
656
+ print("=" * 60)
657
+
658
+ if GRADIO_AVAILABLE:
659
+ print("Starting Gradio web interface...")
660
+ try:
661
+ # Create and launch Gradio interface
662
+ interface = create_gradio_interface()
663
+ interface.launch(
664
+ server_name="0.0.0.0",
665
+ server_port=7860,
666
+ share=True,
667
+ show_error=True
668
+ )
669
+ except Exception as e:
670
+ print(f"Failed to start Gradio interface: {e}")
671
+ print("Falling back to console interface...")
672
+ GRADIO_AVAILABLE = False
673
+
674
+ if not GRADIO_AVAILABLE:
675
+ print("Using console interface...")
676
+ print("Note: Simulation GIFs will be saved to the 'outputs' directory")
677
+ print("")
678
+
679
+ # Get user input
680
+ vehicle_type = input("Choose vehicle type (robot/drone) [robot]: ").strip().lower()
681
+ if vehicle_type not in ["robot", "drone"]:
682
+ vehicle_type = "robot"
683
+
684
+ task_description = input("Enter task description (or press Enter for default): ").strip()
685
+ if not task_description:
686
+ task_description = f"Design a {vehicle_type} that can pass the 5cm high obstacle"
687
+
688
+ print("")
689
+
690
+ # Run the design process
691
+ success, final_specs = console_design_vehicle_for_obstacle(vehicle_type, task_description)
692
+
693
+ print("\n" + "=" * 60)
694
+ if success:
695
+ print("πŸŽ‰ MISSION ACCOMPLISHED!")
696
+ print(f"Final successful {vehicle_type} design: {final_specs}")
697
+ else:
698
+ print("❌ Mission failed, but valuable data collected.")
699
+ print(f"Best attempt specs: {final_specs}")
700
+
701
+ print("\nCheck the 'outputs' directory for simulation GIFs.")
702
+ print("=" * 60)
mcp_server.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MCP Server for LLM-Agent-Designed Obstacle-Passing Vehicle System
3
+
4
+ This server exposes vehicle design, simulation, and evaluation tools via the Model Context Protocol.
5
+ Supports both robots and drones for crossing a 5cm obstacle.
6
+ """
7
+
8
+ from mcp.server.fastmcp import FastMCP
9
+ import json
10
+ import time
11
+ import os
12
+ import imageio
13
+ from typing import Dict, Any, List, Optional, Literal
14
+
15
+ # Import our existing modules
16
+ import simulation_env_enhanced as simulation_env
17
+ import llm_interface
18
+ import evaluation
19
+
20
+ # Create the MCP server
21
+ mcp = FastMCP("Robot-Drone-Design-Server")
22
+
23
+ # Global configuration
24
+ MAX_ITERATIONS = 5
25
+ SIMULATION_DURATION_SEC = 10
26
+ OBSTACLE_FAR_EDGE_X = 0.8
27
+
28
+ @mcp.tool()
29
+ def get_obstacle_info() -> Dict[str, Any]:
30
+ """Get information about the obstacle that vehicles need to cross"""
31
+ return simulation_env.get_obstacle_info()
32
+
33
+ @mcp.tool()
34
+ def design_vehicle(
35
+ vehicle_type: Literal["robot", "drone"],
36
+ specifications: Dict[str, Any],
37
+ design_reasoning: str = ""
38
+ ) -> Dict[str, Any]:
39
+ """
40
+ Design a robot or drone with specified parameters
41
+
42
+ Args:
43
+ vehicle_type: Either "robot" or "drone"
44
+ specifications: Vehicle parameters (depends on type)
45
+ design_reasoning: Explanation of design choices
46
+
47
+ Returns:
48
+ Design validation and summary
49
+ """
50
+ try:
51
+ # Add vehicle_type to specifications
52
+ specs = specifications.copy()
53
+ specs["vehicle_type"] = vehicle_type
54
+
55
+ # Validate specifications based on vehicle type
56
+ if vehicle_type == "robot":
57
+ required_params = ["wheel_type", "body_clearance_cm", "main_material"]
58
+ valid_wheel_types = ["small_high_grip", "large_smooth", "tracked_base"]
59
+ valid_materials = ["light_plastic", "sturdy_metal_alloy"]
60
+
61
+ # Validate parameters
62
+ for param in required_params:
63
+ if param not in specs:
64
+ return {"error": f"Missing required parameter: {param}"}
65
+
66
+ if specs["wheel_type"] not in valid_wheel_types:
67
+ return {"error": f"Invalid wheel_type. Must be one of: {valid_wheel_types}"}
68
+
69
+ if specs["main_material"] not in valid_materials:
70
+ return {"error": f"Invalid main_material. Must be one of: {valid_materials}"}
71
+
72
+ if not (1 <= specs["body_clearance_cm"] <= 10):
73
+ return {"error": "body_clearance_cm must be between 1 and 10"}
74
+
75
+ elif vehicle_type == "drone":
76
+ required_params = ["propeller_size", "flight_height_cm", "main_material"]
77
+ valid_propeller_sizes = ["small_agile", "medium", "large_stable"]
78
+ valid_materials = ["light_carbon_fiber", "sturdy_aluminum"]
79
+
80
+ # Validate parameters
81
+ for param in required_params:
82
+ if param not in specs:
83
+ return {"error": f"Missing required parameter: {param}"}
84
+
85
+ if specs["propeller_size"] not in valid_propeller_sizes:
86
+ return {"error": f"Invalid propeller_size. Must be one of: {valid_propeller_sizes}"}
87
+
88
+ if specs["main_material"] not in valid_materials:
89
+ return {"error": f"Invalid main_material. Must be one of: {valid_materials}"}
90
+
91
+ if not (10 <= specs["flight_height_cm"] <= 50):
92
+ return {"error": "flight_height_cm must be between 10 and 50"}
93
+
94
+ else:
95
+ return {"error": f"Invalid vehicle_type: {vehicle_type}. Must be 'robot' or 'drone'"}
96
+
97
+ return {
98
+ "success": True,
99
+ "vehicle_type": vehicle_type,
100
+ "specifications": specs,
101
+ "design_reasoning": design_reasoning,
102
+ "status": "Design validated and ready for simulation"
103
+ }
104
+
105
+ except Exception as e:
106
+ return {"error": f"Design validation failed: {str(e)}"}
107
+
108
+ @mcp.tool()
109
+ def simulate_vehicle(
110
+ vehicle_type: Literal["robot", "drone"],
111
+ specifications: Dict[str, Any],
112
+ simulation_duration: float = 10.0
113
+ ) -> Dict[str, Any]:
114
+ """
115
+ Run physics simulation of a vehicle attempting to cross the obstacle
116
+
117
+ Args:
118
+ vehicle_type: Either "robot" or "drone"
119
+ specifications: Vehicle parameters
120
+ simulation_duration: How long to run simulation (seconds)
121
+
122
+ Returns:
123
+ Simulation results and performance data
124
+ """
125
+ try:
126
+ # Add vehicle_type to specifications
127
+ specs = specifications.copy()
128
+ specs["vehicle_type"] = vehicle_type
129
+
130
+ # Setup PyBullet environment
131
+ obstacle_id, plane_id = simulation_env.setup_pybullet_environment()
132
+
133
+ # Create vehicle
134
+ if vehicle_type == "robot":
135
+ vehicle_id, joint_indices, v_type = simulation_env.create_robot(specs)
136
+ vehicle_props = None
137
+ elif vehicle_type == "drone":
138
+ vehicle_id, joint_indices, v_type, vehicle_props = simulation_env.create_drone(specs)
139
+ else:
140
+ return {"error": f"Invalid vehicle_type: {vehicle_type}"}
141
+
142
+ # Run simulation
143
+ start_time = time.time()
144
+ final_feedback = {}
145
+ simulation_steps = int(simulation_duration * 240) # 240 Hz
146
+
147
+ for step in range(simulation_steps):
148
+ # Run simulation step
149
+ simulation_env.run_simulation_step(
150
+ vehicle_id, joint_indices, {}, vehicle_type,
151
+ vehicle_props if vehicle_type == "drone" else None
152
+ )
153
+
154
+ current_sim_time = time.time() - start_time
155
+
156
+ # Get feedback every 60 steps (4 times per second)
157
+ if step % 60 == 0:
158
+ feedback = simulation_env.get_simulation_feedback(
159
+ vehicle_id, obstacle_id, start_time, current_sim_time, vehicle_type
160
+ )
161
+ final_feedback = feedback
162
+
163
+ # Check for early exit
164
+ vehicle_x_pos = feedback['robot_position'][0] # Using robot_position for compatibility
165
+ is_stable = feedback['is_robot_upright'] # Using is_robot_upright for compatibility
166
+
167
+ if vehicle_x_pos > OBSTACLE_FAR_EDGE_X + 0.1 or not is_stable:
168
+ break
169
+
170
+ if current_sim_time > simulation_duration:
171
+ break
172
+
173
+ # Cleanup
174
+ simulation_env.reset_simulation()
175
+
176
+ return {
177
+ "success": True,
178
+ "vehicle_type": vehicle_type,
179
+ "specifications": specs,
180
+ "simulation_feedback": final_feedback,
181
+ "simulation_duration": current_sim_time
182
+ }
183
+
184
+ except Exception as e:
185
+ # Cleanup on error
186
+ try:
187
+ simulation_env.reset_simulation()
188
+ except:
189
+ pass
190
+
191
+ return {"error": f"Simulation failed: {str(e)}"}
192
+
193
+ @mcp.tool()
194
+ def evaluate_performance(simulation_feedback: Dict[str, Any]) -> Dict[str, Any]:
195
+ """
196
+ Evaluate vehicle performance based on simulation feedback
197
+
198
+ Args:
199
+ simulation_feedback: Feedback from simulation
200
+
201
+ Returns:
202
+ Performance evaluation and success criteria analysis
203
+ """
204
+ try:
205
+ # Use existing evaluation logic
206
+ eval_results = evaluation.evaluate_simulation_outcome(
207
+ simulation_feedback, OBSTACLE_FAR_EDGE_X
208
+ )
209
+ feedback_str = evaluation.format_feedback_for_llm(eval_results)
210
+
211
+ return {
212
+ "success": True,
213
+ "evaluation_results": eval_results,
214
+ "feedback_summary": feedback_str,
215
+ "success_criteria": {
216
+ "crossed_obstacle": eval_results["robot_crossed_obstacle"],
217
+ "remained_stable": eval_results["robot_remains_upright"],
218
+ "minimal_collision": eval_results["no_significant_collision_with_obstacle_during_pass"],
219
+ "overall_success": eval_results["overall_success"]
220
+ }
221
+ }
222
+
223
+ except Exception as e:
224
+ return {"error": f"Evaluation failed: {str(e)}"}
225
+
226
+ @mcp.tool()
227
+ def iterative_design_process(
228
+ vehicle_type: Literal["robot", "drone"],
229
+ task_description: str = "Cross the 5cm obstacle",
230
+ max_iterations: int = 5
231
+ ) -> Dict[str, Any]:
232
+ """
233
+ Run complete iterative LLM-driven vehicle design process
234
+
235
+ Args:
236
+ vehicle_type: Either "robot" or "drone"
237
+ task_description: Description of the task
238
+ max_iterations: Maximum number of design iterations
239
+
240
+ Returns:
241
+ Complete design process results with all iterations
242
+ """
243
+ try:
244
+ all_attempts = []
245
+ iteration = 1
246
+
247
+ while iteration <= max_iterations:
248
+ # Generate design prompt
249
+ if iteration == 1:
250
+ if vehicle_type == "robot":
251
+ prompt = llm_interface.generate_initial_robot_design_prompt()
252
+ else: # drone
253
+ prompt = llm_interface.generate_initial_drone_design_prompt()
254
+ else:
255
+ last_attempt = all_attempts[-1]
256
+ if vehicle_type == "robot":
257
+ prompt = llm_interface.generate_iterative_robot_design_prompt(
258
+ last_attempt, iteration
259
+ )
260
+ else: # drone
261
+ prompt = llm_interface.generate_iterative_drone_design_prompt(
262
+ last_attempt, iteration
263
+ )
264
+
265
+ # Get LLM design
266
+ llm_response = llm_interface.call_llm_api(prompt)
267
+ vehicle_specs = llm_response.get('robot_specs', {}) # robot_specs used for both
268
+ design_reasoning = llm_response.get('design_reasoning', '')
269
+
270
+ # Add vehicle type
271
+ vehicle_specs["vehicle_type"] = vehicle_type
272
+
273
+ # Run simulation
274
+ sim_result = simulate_vehicle(vehicle_type, vehicle_specs, SIMULATION_DURATION_SEC)
275
+
276
+ if sim_result.get("error"):
277
+ all_attempts.append({
278
+ "iteration": iteration,
279
+ "error": sim_result["error"],
280
+ "specifications": vehicle_specs
281
+ })
282
+ iteration += 1
283
+ continue
284
+
285
+ # Evaluate performance
286
+ eval_result = evaluate_performance(sim_result["simulation_feedback"])
287
+
288
+ # Store attempt
289
+ attempt_data = {
290
+ "iteration": iteration,
291
+ "vehicle_type": vehicle_type,
292
+ "specifications": vehicle_specs,
293
+ "design_reasoning": design_reasoning,
294
+ "simulation_result": sim_result,
295
+ "evaluation_result": eval_result,
296
+ "success": eval_result.get("evaluation_results", {}).get("overall_success", False)
297
+ }
298
+ all_attempts.append(attempt_data)
299
+
300
+ # Check for success
301
+ if attempt_data["success"]:
302
+ return {
303
+ "success": True,
304
+ "vehicle_type": vehicle_type,
305
+ "iterations_needed": iteration,
306
+ "successful_design": vehicle_specs,
307
+ "all_attempts": all_attempts,
308
+ "final_evaluation": eval_result
309
+ }
310
+
311
+ iteration += 1
312
+
313
+ # Max iterations reached
314
+ best_attempt = max(all_attempts,
315
+ key=lambda x: x.get("evaluation_result", {}).get("evaluation_results", {}).get("final_robot_x_position", 0),
316
+ default={})
317
+
318
+ return {
319
+ "success": False,
320
+ "vehicle_type": vehicle_type,
321
+ "max_iterations_reached": True,
322
+ "best_attempt": best_attempt,
323
+ "all_attempts": all_attempts
324
+ }
325
+
326
+ except Exception as e:
327
+ return {"error": f"Iterative design process failed: {str(e)}"}
328
+
329
+ @mcp.tool()
330
+ def get_design_parameters(vehicle_type: Literal["robot", "drone"]) -> Dict[str, Any]:
331
+ """
332
+ Get available design parameters for robots or drones
333
+
334
+ Args:
335
+ vehicle_type: Either "robot" or "drone"
336
+
337
+ Returns:
338
+ Available parameters and their valid ranges/options
339
+ """
340
+ if vehicle_type == "robot":
341
+ return {
342
+ "vehicle_type": "robot",
343
+ "parameters": {
344
+ "wheel_type": {
345
+ "type": "enum",
346
+ "options": ["small_high_grip", "large_smooth", "tracked_base"],
347
+ "description": "Type of wheels for traction and obstacle climbing"
348
+ },
349
+ "body_clearance_cm": {
350
+ "type": "integer",
351
+ "range": [1, 10],
352
+ "description": "Ground clearance in centimeters (obstacle is 5cm high)"
353
+ },
354
+ "approach_sensor_enabled": {
355
+ "type": "boolean",
356
+ "description": "Enable obstacle detection sensors (conceptual for MVP)"
357
+ },
358
+ "main_material": {
359
+ "type": "enum",
360
+ "options": ["light_plastic", "sturdy_metal_alloy"],
361
+ "description": "Material affects weight and durability"
362
+ }
363
+ }
364
+ }
365
+ elif vehicle_type == "drone":
366
+ return {
367
+ "vehicle_type": "drone",
368
+ "parameters": {
369
+ "propeller_size": {
370
+ "type": "enum",
371
+ "options": ["small_agile", "medium", "large_stable"],
372
+ "description": "Propeller size affects thrust and maneuverability"
373
+ },
374
+ "flight_height_cm": {
375
+ "type": "integer",
376
+ "range": [10, 50],
377
+ "description": "Target flight height in centimeters (obstacle is 5cm high)"
378
+ },
379
+ "stability_mode": {
380
+ "type": "enum",
381
+ "options": ["auto_hover", "manual_control"],
382
+ "description": "Flight stability control mode"
383
+ },
384
+ "main_material": {
385
+ "type": "enum",
386
+ "options": ["light_carbon_fiber", "sturdy_aluminum"],
387
+ "description": "Material affects weight and flight characteristics"
388
+ }
389
+ }
390
+ }
391
+ else:
392
+ return {"error": f"Invalid vehicle_type: {vehicle_type}. Must be 'robot' or 'drone'"}
393
+
394
+ # Run the MCP server
395
+ if __name__ == "__main__":
396
+ mcp.run()
record_trial_videos.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Record trial videos with timestamps to avoid overriding existing files
4
+ """
5
+
6
+ import os
7
+ import time
8
+ from datetime import datetime
9
+ import main_orchestrator_enhanced
10
+
11
+ def record_trial_videos():
12
+ """Run trial and record videos with unique timestamps"""
13
+
14
+ # Create timestamp for this session
15
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
16
+ print(f"🎬 Recording Trial Videos - Session: {timestamp}")
17
+ print("=" * 60)
18
+
19
+ # Ensure outputs directory exists
20
+ os.makedirs("outputs", exist_ok=True)
21
+
22
+ # List existing videos
23
+ existing_videos = [f for f in os.listdir("outputs") if f.endswith('.gif')]
24
+ print(f"πŸ“ Existing videos: {len(existing_videos)} files")
25
+ for video in existing_videos[:5]: # Show first 5
26
+ print(f" β€’ {video}")
27
+ if len(existing_videos) > 5:
28
+ print(f" β€’ ... and {len(existing_videos) - 5} more")
29
+
30
+ print(f"\nπŸš€ Starting Enhanced Trial with Video Recording...")
31
+ print(f"πŸ“Ή New videos will be saved with timestamp: {timestamp}")
32
+
33
+ try:
34
+ # Test Robot Design
35
+ print(f"\nπŸ€– Testing Robot Design...")
36
+ robot_success, robot_specs = main_orchestrator_enhanced.console_design_vehicle_for_obstacle(
37
+ vehicle_type='robot',
38
+ user_task_description=f'Design a robot that can pass the 5cm high obstacle - Session {timestamp}'
39
+ )
40
+
41
+ print(f"βœ… Robot trial completed!")
42
+ print(f" Success: {robot_success}")
43
+ print(f" Final specs: {robot_specs}")
44
+
45
+ # Test Drone Design
46
+ print(f"\n🚁 Testing Drone Design...")
47
+ drone_success, drone_specs = main_orchestrator_enhanced.console_design_vehicle_for_obstacle(
48
+ vehicle_type='drone',
49
+ user_task_description=f'Design a drone that can pass the 5cm high obstacle - Session {timestamp}'
50
+ )
51
+
52
+ print(f"βœ… Drone trial completed!")
53
+ print(f" Success: {drone_success}")
54
+ print(f" Final specs: {drone_specs}")
55
+
56
+ # List new videos created
57
+ print(f"\nπŸ“Ή VIDEO RECORDING RESULTS:")
58
+ new_videos = [f for f in os.listdir("outputs") if f.endswith('.gif')]
59
+ new_count = len(new_videos) - len(existing_videos)
60
+
61
+ print(f"Total videos now: {len(new_videos)} files (+{new_count} new)")
62
+
63
+ # Show newest videos (those created in this session)
64
+ newest_videos = sorted([f for f in new_videos if f not in existing_videos])
65
+ if newest_videos:
66
+ print(f"\n🎬 NEW VIDEOS CREATED:")
67
+ for video in newest_videos:
68
+ file_path = os.path.join("outputs", video)
69
+ file_size = os.path.getsize(file_path)
70
+ print(f" β€’ {video} ({file_size:,} bytes)")
71
+
72
+ # Check JSON files
73
+ json_files = [f for f in os.listdir(".") if f.startswith("best_") and f.endswith(".json")]
74
+ latest_json = sorted(json_files)[-2:] if len(json_files) >= 2 else json_files
75
+
76
+ if latest_json:
77
+ print(f"\nπŸ“„ LATEST JSON RESULTS:")
78
+ for json_file in latest_json:
79
+ print(f" β€’ {json_file}")
80
+
81
+ print(f"\n🎯 TRIAL SESSION {timestamp} COMPLETED SUCCESSFULLY!")
82
+ print(f" Robot moved: {robot_success}")
83
+ print(f" Drone moved: {drone_success}")
84
+ print(f" Videos created: {new_count}")
85
+ print(f" No existing videos were overridden βœ…")
86
+
87
+ return {
88
+ "timestamp": timestamp,
89
+ "robot_success": robot_success,
90
+ "drone_success": drone_success,
91
+ "videos_created": new_count,
92
+ "newest_videos": newest_videos
93
+ }
94
+
95
+ except Exception as e:
96
+ print(f"❌ Error during trial recording: {e}")
97
+ import traceback
98
+ traceback.print_exc()
99
+ return None
100
+
101
+ if __name__ == "__main__":
102
+ result = record_trial_videos()
103
+
104
+ if result:
105
+ print(f"\nπŸ† FINAL SUMMARY:")
106
+ print(f"Session: {result['timestamp']}")
107
+ print(f"Robot performance: {'βœ… Success' if result['robot_success'] else '❌ Failed'}")
108
+ print(f"Drone performance: {'βœ… Success' if result['drone_success'] else '❌ Failed'}")
109
+ print(f"Videos recorded: {result['videos_created']}")
110
+ print(f"All videos preserved! 🎬")
111
+ else:
112
+ print("❌ Trial recording failed")
run_trial.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Trial script for testing the enhanced orchestrator with multiple iterations
4
+ """
5
+
6
+ import main_orchestrator_enhanced
7
+ import json
8
+ import os
9
+ from datetime import datetime
10
+
11
+ def run_trial():
12
+ print("πŸš€ Starting Enhanced Orchestrator Trial...")
13
+ print("=" * 60)
14
+
15
+ # Test with robot first
16
+ print("\nπŸ€– Testing Robot Design (up to 5 iterations)...")
17
+ try:
18
+ success, final_specs = main_orchestrator_enhanced.console_design_vehicle_for_obstacle(
19
+ vehicle_type='robot',
20
+ user_task_description='Design a robot that can pass the 5cm high obstacle'
21
+ )
22
+
23
+ print(f"\nβœ… Robot trial completed!")
24
+ print(f"Success: {success}")
25
+ print(f"Final specs: {final_specs}")
26
+
27
+ # Check if JSON file was created
28
+ json_files = [f for f in os.listdir('.') if f.startswith('best_robot_design_') and f.endswith('.json')]
29
+ if json_files:
30
+ latest_json = max(json_files, key=os.path.getctime)
31
+ print(f"\nπŸ“„ Best design saved to: {latest_json}")
32
+
33
+ # Show summary of best design
34
+ with open(latest_json, 'r') as f:
35
+ best_design = json.load(f)
36
+
37
+ print(f"\nπŸ† BEST ROBOT DESIGN SUMMARY:")
38
+ print(f" β€’ Final Position: {best_design['performance_results']['final_robot_x_position']:.3f}m")
39
+ print(f" β€’ Success: {best_design['performance_results']['overall_success']}")
40
+ print(f" β€’ Wheel Type: {best_design['robot_specifications']['wheel_type']}")
41
+ print(f" β€’ Body Clearance: {best_design['robot_specifications']['body_clearance_cm']}cm")
42
+ print(f" β€’ Material: {best_design['robot_specifications']['main_material']}")
43
+
44
+ except Exception as e:
45
+ print(f"❌ Robot trial failed: {e}")
46
+ import traceback
47
+ traceback.print_exc()
48
+
49
+ print("\n" + "=" * 60)
50
+
51
+ # Test with drone
52
+ print("\n🚁 Testing Drone Design (up to 5 iterations)...")
53
+ try:
54
+ success, final_specs = main_orchestrator_enhanced.console_design_vehicle_for_obstacle(
55
+ vehicle_type='drone',
56
+ user_task_description='Design a drone that can pass the 5cm high obstacle'
57
+ )
58
+
59
+ print(f"\nβœ… Drone trial completed!")
60
+ print(f"Success: {success}")
61
+ print(f"Final specs: {final_specs}")
62
+
63
+ # Check if JSON file was created
64
+ json_files = [f for f in os.listdir('.') if f.startswith('best_drone_design_') and f.endswith('.json')]
65
+ if json_files:
66
+ latest_json = max(json_files, key=os.path.getctime)
67
+ print(f"\nπŸ“„ Best design saved to: {latest_json}")
68
+
69
+ # Show summary of best design
70
+ with open(latest_json, 'r') as f:
71
+ best_design = json.load(f)
72
+
73
+ print(f"\nπŸ† BEST DRONE DESIGN SUMMARY:")
74
+ print(f" β€’ Final Position: {best_design['performance_results']['final_robot_x_position']:.3f}m")
75
+ print(f" β€’ Success: {best_design['performance_results']['overall_success']}")
76
+ print(f" β€’ Propeller Size: {best_design['drone_specifications']['propeller_size']}")
77
+ print(f" β€’ Flight Height: {best_design['drone_specifications']['flight_height_cm']}cm")
78
+ print(f" β€’ Material: {best_design['drone_specifications']['main_material']}")
79
+
80
+ except Exception as e:
81
+ print(f"❌ Drone trial failed: {e}")
82
+ import traceback
83
+ traceback.print_exc()
84
+
85
+ print("\n🎯 Trial completed! Check the generated JSON files and GIF files for detailed results.")
86
+
87
+ if __name__ == "__main__":
88
+ run_trial()