File size: 2,978 Bytes
872ad5b
fa668db
5b91549
 
 
872ad5b
5b91549
872ad5b
 
 
 
 
85dc538
872ad5b
225a300
fa668db
872ad5b
fa668db
 
872ad5b
5b91549
 
872ad5b
5b91549
 
fa668db
 
872ad5b
5b91549
 
872ad5b
5b91549
 
fa668db
 
225a300
872ad5b
 
5b91549
 
872ad5b
5b91549
 
872ad5b
 
 
 
5b91549
 
872ad5b
 
 
225a300
872ad5b
fa668db
 
 
 
872ad5b
fa668db
 
5b91549
 
872ad5b
 
5b91549
872ad5b
fa668db
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
import gradio as gr
from coordinator.task_parser import parse_brief_with_reasoning
from coordinator.task_assigner import assign_tasks_with_reasoning
from frontend_agent.ui_generator import generate_react_component_llm
from frontend_agent.live_frontend import save_react_component
from backend_agent.api_generator import generate_backend_code_llm
from backend_agent.live_api import app as fastapi_app, add_dynamic_route, run_server
import threading

os.makedirs("frontend_live", exist_ok=True)
os.makedirs("backend_live", exist_ok=True)

# Start FastAPI server in a separate thread
threading.Thread(target=run_server, daemon=True).start()

def process_brief_live(project_brief):
    output_text = ""

    # Step 1: Parsing Tasks
    output_text += "### Step 1: Parsing Tasks\n"
    parsing_result = parse_brief_with_reasoning(project_brief)
    output_text += "**LLM Reasoning:**\n" + parsing_result["reasoning"] + "\n\n"
    output_text += "**Parsed Tasks:**\n"
    for t in parsing_result["tasks"]:
        output_text += f"- {t}\n"

    # Step 2: Assigning Tasks
    output_text += "\n### Step 2: Assigning Tasks\n"
    assignment_result = assign_tasks_with_reasoning(parsing_result["tasks"])
    output_text += "**LLM Reasoning:**\n" + assignment_result["reasoning"] + "\n\n"
    output_text += "**Assigned Tasks:**\n"
    for task, agent in assignment_result["assignments"].items():
        output_text += f"- {task} -> {agent}\n"

    # Step 3: Generate Live Frontend & Backend
    output_text += "\n### Step 3: Live Code Generation\n"

    for task, agent in assignment_result["assignments"].items():
        output_text += f"\n#### {task} ({agent})\n"

        if "frontend" in agent.lower():
            code = generate_react_component_llm(task)
            filename = save_react_component(task, code)
            output_text += f"**Frontend Component Generated:** {filename}\n"
            output_text += f"Preview: <iframe src='/{filename}' width='100%' height='300'></iframe>\n"

        elif "backend" in agent.lower():
            code = generate_backend_code_llm(task)
            backend_file = f"backend_live/{task.replace(' ', '_')}.py"
            with open(backend_file, "w") as f:
                f.write(code)
            add_dynamic_route(task, "pass")  # placeholder
            output_text += f"**Backend API Created:** http://127.0.0.1:8000/{task.replace(' ','_').lower()}\n"

    return output_text

with gr.Blocks() as demo:
    gr.Markdown("## AI Project Coordinator - Live Frontend & Backend")
    project_brief_input = gr.Textbox(lines=3, placeholder="Enter your project brief here...")
    submit_button = gr.Button("Submit")
    console_output = gr.Textbox(
        label="Processing Log",
        placeholder="LLM reasoning and live code output will appear here...",
        lines=30
    )
    submit_button.click(fn=process_brief_live, inputs=project_brief_input, outputs=console_output)

if __name__ == "__main__":
    demo.launch()