Spaces:
Runtime error
Runtime error
File size: 5,214 Bytes
0ee2ec4 71f1dd5 0ee2ec4 71f1dd5 0ee2ec4 71f1dd5 0ee2ec4 71f1dd5 0ee2ec4 71f1dd5 0ee2ec4 71f1dd5 0ee2ec4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import gradio as gr
import asyncio
from typing import Generator, List, Dict, Any
from utils.langgraph_pipeline import run_pipeline_and_save
class AgentInference:
def __init__(self):
self.current_agent = None
self.chat_log = []
self.is_running = False
async def stream_agent_output(self, agent_name: str, prompt: str) -> Generator[str, None, None]:
"""Stream output from a single agent"""
self.current_agent = agent_name
# Simulate streaming output with delays
yield f"๐ค {agent_name} is thinking..."
await asyncio.sleep(1)
# Get agent output
result = await self.get_agent_output(agent_name, prompt)
# Stream the output word by word
words = result.split()
for word in words:
yield f"{word} "
await asyncio.sleep(0.1)
self.chat_log.append({"role": agent_name, "content": result})
yield "\n\n"
async def get_agent_output(self, agent_name: str, prompt: str) -> str:
"""Get output from a specific agent"""
# This would be replaced with actual agent calls
agents = {
"Product Manager": "Analyzing requirements and defining product specifications...",
"Project Manager": "Creating project timeline and resource allocation...",
"Software Architect": "Designing system architecture and technical specifications...",
"UI Designer": "Creating beautiful and user-friendly interface designs...",
"Software Engineer": "Implementing the UI components and functionality...",
"Quality Assurance": "Reviewing and testing the implementation..."
}
return agents.get(agent_name, "Processing...")
async def run_inference(self, prompt: str) -> Generator[Dict[str, Any], None, None]:
"""Run inference through all agents with streaming output"""
self.is_running = True
self.chat_log = []
agents = [
"Product Manager",
"Project Manager",
"Software Architect",
"UI Designer",
"Software Engineer",
"Quality Assurance"
]
for agent in agents:
if not self.is_running:
break
async for output in self.stream_agent_output(agent, prompt):
yield {
"agent": agent,
"output": output,
"chat_log": self.chat_log
}
# Add a small delay between agents
await asyncio.sleep(0.5)
# Generate final output
yield {
"agent": "System",
"output": "๐ UI Generation Complete!",
"chat_log": self.chat_log
}
inference_engine = AgentInference()
def format_chat_log(chat_log: List[Dict[str, Any]]) -> List[tuple]:
"""Format chat log for display"""
formatted_log = []
for entry in chat_log:
role = entry["role"]
content = entry["content"]
formatted_log.append((f"**{role}**:", content))
return formatted_log
async def handle_run(prompt: str) -> Generator[tuple, None, None]:
"""Handle the run button click with streaming output"""
async for update in inference_engine.run_inference(prompt):
formatted_log = format_chat_log(update["chat_log"])
yield formatted_log, None # None for file_output until complete
with gr.Blocks() as demo:
gr.Markdown("""
# ๐ง Multi-Agent UI Generator (Real-time Inference)
This system uses multiple AI agents working together to generate beautiful UI designs in real-time:
1. Product Manager: Defines requirements
2. Project Manager: Creates project plan
3. Software Architect: Designs system architecture
4. UI Designer: Creates beautiful UI design
5. Software Engineer: Implements the code
6. Quality Assurance: Reviews and suggests improvements
Watch as each agent contributes to the design in real-time!
""")
with gr.Row():
with gr.Column(scale=2):
input_box = gr.Textbox(
lines=4,
label="Enter your product idea prompt",
placeholder="Describe the website or UI you want to create..."
)
run_btn = gr.Button("Generate Website", variant="primary")
stop_btn = gr.Button("Stop Generation", variant="stop")
with gr.Column(scale=3):
chatbox = gr.Chatbot(
label="Agent Conversation Log",
type="messages",
height=600
)
file_output = gr.File(label="Download UI ZIP")
# Handle run button click
run_btn.click(
fn=handle_run,
inputs=[input_box],
outputs=[chatbox, file_output],
api_name="generate"
)
# Handle stop button click
def stop_generation():
inference_engine.is_running = False
return "Generation stopped by user"
stop_btn.click(
fn=stop_generation,
outputs=[chatbox]
)
demo.queue()
demo.launch()
|