agents/product_manager_agent.py CHANGED
@@ -1,8 +1,6 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
4
- import asyncio
5
- from typing import Generator, Dict, Any
6
 
7
  MODEL_REPO = "Rahul-8799/product_manager_mistral"
8
 
@@ -13,43 +11,15 @@ model = AutoModelForCausalLM.from_pretrained(
13
  device_map="auto"
14
  )
15
 
16
- async def stream_inference(prompt: str) -> Generator[str, None, None]:
17
- """Stream the model's output token by token"""
18
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
19
-
20
- # Generate tokens one by one
21
- for _ in range(100): # Limit to 100 tokens for streaming demo
22
- output_ids = model.generate(
23
- input_ids,
24
- max_new_tokens=1,
25
- pad_token_id=tokenizer.eos_token_id
26
- )
27
-
28
- # Get the new token
29
- new_token = output_ids[0][-1]
30
- if new_token == tokenizer.eos_token_id:
31
- break
32
-
33
- # Decode and yield the token
34
- token_text = tokenizer.decode([new_token])
35
- yield token_text
36
-
37
- # Update input_ids for next iteration
38
- input_ids = output_ids
39
-
40
- # Small delay to simulate streaming
41
- await asyncio.sleep(0.05)
42
-
43
- async def run(state: Dict[str, Any]) -> Dict[str, Any]:
44
- """Product Manager generates structured product requirements with streaming output"""
45
  messages = state["messages"]
46
  prompt = messages[-1].content
47
-
48
- # Stream the output
49
- output = ""
50
- async for token in stream_inference(prompt):
51
- output += token
52
-
53
  return {
54
  "messages": [AIMessage(content=output)],
55
  "chat_log": state["chat_log"] + [{"role": "Product Manager", "content": output}],
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
 
 
4
 
5
  MODEL_REPO = "Rahul-8799/product_manager_mistral"
6
 
 
11
  device_map="auto"
12
  )
13
 
14
+ def run(state: dict) -> dict:
15
+ """Generates structured product requirements from user input prompt."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  messages = state["messages"]
17
  prompt = messages[-1].content
18
+
19
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
20
+ output_ids = model.generate(input_ids, max_new_tokens=3000)
21
+ output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
22
+
 
23
  return {
24
  "messages": [AIMessage(content=output)],
25
  "chat_log": state["chat_log"] + [{"role": "Product Manager", "content": output}],
agents/software_engineer_agent.py CHANGED
@@ -1,8 +1,6 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
4
- import asyncio
5
- from typing import Generator, Dict, Any
6
 
7
  MODEL_REPO = "Rahul-8799/software_engineer_mellum"
8
 
@@ -13,79 +11,79 @@ model = AutoModelForCausalLM.from_pretrained(
13
  device_map="auto"
14
  )
15
 
16
- async def stream_inference(prompt: str) -> Generator[str, None, None]:
17
- """Stream the model's output token by token"""
18
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
19
-
20
- for _ in range(100):
21
- output_ids = model.generate(
22
- input_ids,
23
- max_new_tokens=1,
24
- pad_token_id=tokenizer.eos_token_id
25
- )
26
-
27
- new_token = output_ids[0][-1]
28
- if new_token == tokenizer.eos_token_id:
29
- break
30
-
31
- token_text = tokenizer.decode([new_token])
32
- yield token_text
33
-
34
- input_ids = output_ids
35
- await asyncio.sleep(0.05)
36
-
37
- async def run(state: Dict[str, Any]) -> Dict[str, Any]:
38
- """Software Engineer generates responsive and interactive UI code"""
39
  messages = state["messages"]
40
  prompt = messages[-1].content
41
 
42
- # Enhance the prompt with modern web development requirements
43
  enhanced_prompt = f"""
44
- Generate modern, responsive, and interactive UI code following these requirements:
45
- 1. Use Tailwind CSS for responsive design
46
- 2. Implement JavaScript for interactivity
47
- 3. Add smooth animations and transitions
48
- 4. Ensure mobile-first approach
49
- 5. Include proper error handling
50
- 6. Add loading states and feedback
51
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  Original requirements: {prompt}
53
-
54
- Generate the following files:
55
-
56
- 1. index.html - Main HTML structure
57
- 2. styles.css - Custom styles (if needed beyond Tailwind)
58
- 3. script.js - Interactive features
59
- 4. tailwind.config.js - Tailwind configuration
60
-
61
- Format the output as:
62
-
63
- ## HTML Structure
64
- ```html
65
- [HTML code]
66
- ```
67
-
68
- ## CSS Styles
69
- ```css
70
- [CSS code]
71
- ```
72
-
73
- ## JavaScript
74
- ```javascript
75
- [JavaScript code]
76
- ```
77
-
78
- ## Tailwind Config
79
- ```javascript
80
- [Tailwind configuration]
81
- ```
82
  """
83
 
84
- # Stream the output
85
- output = ""
86
- async for token in stream_inference(enhanced_prompt):
87
- output += token
88
-
89
  return {
90
  "messages": [AIMessage(content=output)],
91
  "chat_log": state["chat_log"] + [{"role": "Software Engineer", "content": output}],
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
 
 
4
 
5
  MODEL_REPO = "Rahul-8799/software_engineer_mellum"
6
 
 
11
  device_map="auto"
12
  )
13
 
14
+ def run(state: dict) -> dict:
15
+ """Software Engineer generates clean, modern UI code using best practices"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  messages = state["messages"]
17
  prompt = messages[-1].content
18
 
19
+ # Enhance the prompt with UI implementation guidelines
20
  enhanced_prompt = f"""
21
+ Objective
22
+
23
+ Generate modern, responsive, and accessible UI code that is visually appealing and adheres to current frontend development best practices.
24
+
25
+ 1. Styling Framework: Tailwind CSS
26
+ • Use Tailwind CSS utility classes for styling all elements.
27
+ • Apply spacing, typography, sizing, and layout using Tailwind classes.
28
+ • Follow a mobile-first design approach.
29
+ • Use Tailwind’s built-in responsive breakpoints (sm, md, lg, xl, 2xl) to adapt layouts for different screen sizes.
30
+
31
+ 2. Layout Techniques
32
+ • Use CSS Grid for complex, multi-column or two-dimensional layouts.
33
+ • Use Flexbox for flexible alignment of components like navigation bars, cards, buttons, and modals.
34
+ • Maintain consistent spacing with utility classes such as gap, space-x, space-y, p-*, and m-*.
35
+
36
+ 3. Semantic HTML
37
+ • Use semantic HTML tags appropriately: <header>, <nav>, <main>, <section>, <article>, <footer>, etc.
38
+ • Avoid unnecessary <div> elements to prevent cluttered and unstructured markup.
39
+ • Ensure proper nesting and hierarchy of elements.
40
+
41
+ 4. Accessibility
42
+ • Add ARIA labels, role attributes, and alt text where needed for screen reader support.
43
+ • Ensure keyboard accessibility with tabindex, proper focus states, and interactive elements being navigable.
44
+ • Use <label> elements properly linked to form fields via the for attribute.
45
+
46
+ 5. Responsive Design
47
+ • Use Tailwind’s responsive utilities to adjust layouts across various screen sizes.
48
+ • Design components to be fully usable on both desktop and mobile devices.
49
+ • Use collapsible or toggleable UI patterns (e.g., hamburger menus) for smaller viewports.
50
+
51
+ 6. Theming and Styling Consistency
52
+ • Define and use CSS variables (--primary-color, --font-family, etc.) for theme consistency across components.
53
+ • Maintain a clear visual hierarchy with consistent font sizes, weights, and colors.
54
+ • Customize Tailwind’s theme configuration if needed for project-specific design tokens.
55
+
56
+ 7. JavaScript and Interactivity
57
+ • Add interactivity using plain JavaScript, Alpine.js, or React if specified.
58
+ • Implement common UI components such as modals, dropdowns, tooltips, accordions with appropriate open/close behavior.
59
+ • Provide user feedback through form validations, dynamic updates, and transitions.
60
+
61
+ 8. Loading and Error States
62
+ • Implement loading states using spinners, skeleton screens, or placeholders while data is being fetched or actions are processing.
63
+ • Show error states using alerts, banners, or toast messages when applicable.
64
+ • Use conditional rendering or state flags to handle visibility and transitions between states.
65
+
66
+ 9. Component Structure and Reusability
67
+ • Break down the UI into modular, reusable components (e.g., Button, Card, Modal, Form).
68
+ • Each component should:
69
+ • Be self-contained with a clear purpose.
70
+ • Accept inputs or props when necessary.
71
+ • Maintain responsive and accessible markup by default.
72
+
73
+ 10. Code Quality Standards
74
+ • Write clean, readable, and maintainable code.
75
+ • Remove unused classes, scripts, or markup.
76
+ • Follow consistent naming conventions and indentation rules.
77
+ • Add comments only when necessary for clarity.
78
+
79
+
80
  Original requirements: {prompt}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  """
82
 
83
+ input_ids = tokenizer(enhanced_prompt, return_tensors="pt").input_ids.to(model.device)
84
+ output_ids = model.generate(input_ids, max_new_tokens=3000)
85
+ output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
86
+
 
87
  return {
88
  "messages": [AIMessage(content=output)],
89
  "chat_log": state["chat_log"] + [{"role": "Software Engineer", "content": output}],
agents/ui_designer_agent.py CHANGED
@@ -1,10 +1,8 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
4
- import asyncio
5
- from typing import Generator, Dict, Any
6
 
7
- MODEL_REPO = "Rahul-8799/ui_designer_mistral"
8
 
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
10
  model = AutoModelForCausalLM.from_pretrained(
@@ -13,64 +11,30 @@ model = AutoModelForCausalLM.from_pretrained(
13
  device_map="auto"
14
  )
15
 
16
- async def stream_inference(prompt: str) -> Generator[str, None, None]:
17
- """Stream the model's output token by token"""
18
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
19
-
20
- for _ in range(100):
21
- output_ids = model.generate(
22
- input_ids,
23
- max_new_tokens=1,
24
- pad_token_id=tokenizer.eos_token_id
25
- )
26
-
27
- new_token = output_ids[0][-1]
28
- if new_token == tokenizer.eos_token_id:
29
- break
30
-
31
- token_text = tokenizer.decode([new_token])
32
- yield token_text
33
-
34
- input_ids = output_ids
35
- await asyncio.sleep(0.05)
36
-
37
- async def run(state: Dict[str, Any]) -> Dict[str, Any]:
38
- """UI Designer creates responsive and interactive UI designs"""
39
  messages = state["messages"]
40
  prompt = messages[-1].content
41
 
42
- # Enhance the prompt with responsive design requirements
43
  enhanced_prompt = f"""
44
- Create a modern, responsive UI design following these requirements:
45
- 1. Mobile-first approach with responsive breakpoints
46
- 2. Modern CSS features (Flexbox, Grid, CSS Variables)
47
- 3. Interactive elements with JavaScript
48
- 4. Smooth animations and transitions
49
- 5. Accessibility features
50
- 6. Cross-browser compatibility
 
 
51
 
52
  Original requirements: {prompt}
53
-
54
- Provide the design in this format:
55
-
56
- ## Responsive Layout
57
- [Describe the responsive layout structure]
58
-
59
- ## CSS Framework
60
- [Specify CSS framework and custom styles]
61
-
62
- ## JavaScript Features
63
- [List interactive features and animations]
64
-
65
- ## Component Structure
66
- [Describe component hierarchy and relationships]
67
  """
68
 
69
- # Stream the output
70
- output = ""
71
- async for token in stream_inference(enhanced_prompt):
72
- output += token
73
-
74
  return {
75
  "messages": [AIMessage(content=output)],
76
  "chat_log": state["chat_log"] + [{"role": "UI Designer", "content": output}],
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
 
 
4
 
5
+ MODEL_REPO = "Rahul-8799/product_manager_mistral"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(
 
11
  device_map="auto"
12
  )
13
 
14
+ def run(state: dict) -> dict:
15
+ """UI Designer creates beautiful and structured UI designs with proper spacing and layout"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  messages = state["messages"]
17
  prompt = messages[-1].content
18
 
19
+ # Enhance the prompt with UI design principles
20
  enhanced_prompt = f"""
21
+ Create a beautiful and well-structured UI design following these principles:
22
+ 1. Use proper spacing and padding (recommended: 1rem/16px for padding, 2rem/32px for margins)
23
+ 2. Implement a consistent color scheme
24
+ 3. Ensure proper hierarchy with clear headings
25
+ 4. Use responsive design principles
26
+ 5. Implement proper grid system
27
+ 6. Add smooth transitions and hover effects
28
+ 7. Ensure proper contrast and readability
29
+ 8. Use modern UI components and patterns
30
 
31
  Original requirements: {prompt}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  """
33
 
34
+ input_ids = tokenizer(enhanced_prompt, return_tensors="pt").input_ids.to(model.device)
35
+ output_ids = model.generate(input_ids, max_new_tokens=3000)
36
+ output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
37
+
 
38
  return {
39
  "messages": [AIMessage(content=output)],
40
  "chat_log": state["chat_log"] + [{"role": "UI Designer", "content": output}],
app.py CHANGED
@@ -1,148 +1,21 @@
1
  import gradio as gr
2
- import asyncio
3
- from typing import Generator, List, Dict, Any
4
  from utils.langgraph_pipeline import run_pipeline_and_save
5
 
6
- class AgentInference:
7
- def __init__(self):
8
- self.current_agent = None
9
- self.chat_log = []
10
- self.is_running = False
11
-
12
- async def stream_agent_output(self, agent_name: str, prompt: str) -> Generator[str, None, None]:
13
- """Stream output from a single agent"""
14
- self.current_agent = agent_name
15
- # Simulate streaming output with delays
16
- yield f"🤖 {agent_name} is thinking..."
17
- await asyncio.sleep(1)
18
-
19
- # Get agent output
20
- result = await self.get_agent_output(agent_name, prompt)
21
-
22
- # Stream the output word by word
23
- words = result.split()
24
- for word in words:
25
- yield f"{word} "
26
- await asyncio.sleep(0.1)
27
-
28
- self.chat_log.append({"role": agent_name, "content": result})
29
- yield "\n\n"
30
-
31
- async def get_agent_output(self, agent_name: str, prompt: str) -> str:
32
- """Get output from a specific agent"""
33
- # This would be replaced with actual agent calls
34
- agents = {
35
- "Product Manager": "Analyzing requirements and defining product specifications...",
36
- "Project Manager": "Creating project timeline and resource allocation...",
37
- "Software Architect": "Designing system architecture and technical specifications...",
38
- "UI Designer": "Creating beautiful and user-friendly interface designs...",
39
- "Software Engineer": "Implementing the UI components and functionality...",
40
- "Quality Assurance": "Reviewing and testing the implementation..."
41
- }
42
- return agents.get(agent_name, "Processing...")
43
-
44
- async def run_inference(self, prompt: str) -> Generator[Dict[str, Any], None, None]:
45
- """Run inference through all agents with streaming output"""
46
- self.is_running = True
47
- self.chat_log = []
48
-
49
- agents = [
50
- "Product Manager",
51
- "Project Manager",
52
- "Software Architect",
53
- "UI Designer",
54
- "Software Engineer",
55
- "Quality Assurance"
56
- ]
57
-
58
- for agent in agents:
59
- if not self.is_running:
60
- break
61
-
62
- async for output in self.stream_agent_output(agent, prompt):
63
- yield {
64
- "agent": agent,
65
- "output": output,
66
- "chat_log": self.chat_log
67
- }
68
-
69
- # Add a small delay between agents
70
- await asyncio.sleep(0.5)
71
-
72
- # Generate final output
73
- yield {
74
- "agent": "System",
75
- "output": "🎉 UI Generation Complete!",
76
- "chat_log": self.chat_log
77
- }
78
-
79
- inference_engine = AgentInference()
80
-
81
- def format_chat_log(chat_log: List[Dict[str, Any]]) -> List[tuple]:
82
- """Format chat log for display"""
83
- formatted_log = []
84
- for entry in chat_log:
85
- role = entry["role"]
86
- content = entry["content"]
87
- formatted_log.append((f"**{role}**:", content))
88
- return formatted_log
89
-
90
- async def handle_run(prompt: str) -> Generator[tuple, None, None]:
91
- """Handle the run button click with streaming output"""
92
- async for update in inference_engine.run_inference(prompt):
93
- formatted_log = format_chat_log(update["chat_log"])
94
- yield formatted_log, None # None for file_output until complete
95
 
96
  with gr.Blocks() as demo:
97
- gr.Markdown("""
98
- # 🔧 Multi-Agent UI Generator (Real-time Inference)
99
-
100
- This system uses multiple AI agents working together to generate beautiful UI designs in real-time:
101
- 1. Product Manager: Defines requirements
102
- 2. Project Manager: Creates project plan
103
- 3. Software Architect: Designs system architecture
104
- 4. UI Designer: Creates beautiful UI design
105
- 5. Software Engineer: Implements the code
106
- 6. Quality Assurance: Reviews and suggests improvements
107
-
108
- Watch as each agent contributes to the design in real-time!
109
- """)
110
-
111
- with gr.Row():
112
- with gr.Column(scale=2):
113
- input_box = gr.Textbox(
114
- lines=4,
115
- label="Enter your product idea prompt",
116
- placeholder="Describe the website or UI you want to create..."
117
- )
118
- run_btn = gr.Button("Generate Website", variant="primary")
119
- stop_btn = gr.Button("Stop Generation", variant="stop")
120
-
121
- with gr.Column(scale=3):
122
- chatbox = gr.Chatbot(
123
- label="Agent Conversation Log",
124
- type="messages",
125
- height=600
126
- )
127
- file_output = gr.File(label="Download UI ZIP")
128
-
129
- # Handle run button click
130
  run_btn.click(
131
  fn=handle_run,
132
  inputs=[input_box],
133
  outputs=[chatbox, file_output],
134
- api_name="generate"
135
- )
136
-
137
- # Handle stop button click
138
- def stop_generation():
139
- inference_engine.is_running = False
140
- return "Generation stopped by user"
141
-
142
- stop_btn.click(
143
- fn=stop_generation,
144
- outputs=[chatbox]
145
  )
146
 
147
- demo.queue()
148
  demo.launch()
 
1
  import gradio as gr
 
 
2
  from utils.langgraph_pipeline import run_pipeline_and_save
3
 
4
+ def handle_run(prompt):
5
+ chat_log, zip_path = run_pipeline_and_save(prompt)
6
+ return chat_log, zip_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  with gr.Blocks() as demo:
9
+ gr.Markdown("# 🔧 Multi-Agent UI Generator")
10
+ input_box = gr.Textbox(lines=4, label="Enter your product idea prompt")
11
+ run_btn = gr.Button("Generate Website")
12
+ chatbox = gr.Chatbot(label="Agent Conversation Log", type="messages")
13
+ file_output = gr.File(label="Download UI ZIP")
14
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  run_btn.click(
16
  fn=handle_run,
17
  inputs=[input_box],
18
  outputs=[chatbox, file_output],
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
 
 
21
  demo.launch()
utils/langgraph_pipeline.py CHANGED
@@ -1,4 +1,4 @@
1
- import uuid, zipfile, re, json
2
  from pathlib import Path
3
  from typing import TypedDict, List, Dict, Any, Tuple
4
 
@@ -21,8 +21,6 @@ from agents import (
21
  class InputState(TypedDict):
22
  messages: List[BaseMessage]
23
  chat_log: List[Dict[str, Any]]
24
- iteration: int
25
- feedback: str
26
 
27
  class OutputState(TypedDict):
28
  pm_output: str
@@ -32,8 +30,6 @@ class OutputState(TypedDict):
32
  dev_output: str
33
  qa_output: str
34
  chat_log: List[Dict[str, Any]]
35
- iteration: int
36
- feedback: str
37
 
38
  # ——————————————
39
  # 2) Wrap agents so they see full history
@@ -41,21 +37,12 @@ class OutputState(TypedDict):
41
  def wrap_agent(agent_run, output_key: str):
42
  def node(state: Dict[str, Any]) -> Dict[str, Any]:
43
  history = state["messages"]
44
- log = state["chat_log"]
45
- iteration = state.get("iteration", 0)
46
- feedback = state.get("feedback", "")
47
-
48
- # Add feedback to the prompt if it exists
49
- if feedback:
50
- history = history + [AIMessage(content=f"Previous feedback: {feedback}")]
51
-
52
- result = agent_run({"messages": history, "chat_log": log})
53
  return {
54
  "messages": history + result["messages"],
55
- "chat_log": result["chat_log"],
56
- output_key: result[output_key],
57
- "iteration": iteration,
58
- "feedback": feedback
59
  }
60
  return node
61
 
@@ -64,84 +51,50 @@ def wrap_agent(agent_run, output_key: str):
64
  # ——————————————
65
  def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
66
  history = state["messages"]
67
- log = state["chat_log"]
68
- iteration = state.get("iteration", 0)
69
- feedback = state.get("feedback", "")
70
-
71
  if not history or not isinstance(history[-1], HumanMessage):
72
  raise ValueError("bridge_to_pm expected a HumanMessage at history end")
73
-
74
  prompt = history[-1].content
75
  spec_prompt = (
76
- f"# Stakeholder Prompt (Iteration {iteration})\n\n"
77
  f"\"{prompt}\"\n\n"
78
- )
79
-
80
- if feedback:
81
- spec_prompt += f"Previous feedback to consider:\n{feedback}\n\n"
82
-
83
- spec_prompt += (
84
  "Generate a structured product specification including:\n"
85
  "- Goals\n"
86
  "- Key features\n"
87
  "- User stories\n"
88
  "- Success metrics\n"
89
  )
90
-
91
  return {
92
  "messages": [AIMessage(content=spec_prompt)],
93
  "chat_log": log + [{"role": "System", "content": spec_prompt}],
94
- "iteration": iteration,
95
- "feedback": feedback
96
  }
97
 
98
  # ——————————————
99
- # 4) Feedback Loop Handler
100
- # ——————————————
101
- def handle_feedback(state: Dict[str, Any]) -> Dict[str, Any]:
102
- qa_output = state["qa_output"]
103
- iteration = state.get("iteration", 0)
104
-
105
- # Check if we need another iteration
106
- if iteration < 3: # Maximum 3 iterations
107
- return {
108
- "messages": state["messages"],
109
- "chat_log": state["chat_log"],
110
- "iteration": iteration + 1,
111
- "feedback": f"Iteration {iteration + 1} feedback: {qa_output}"
112
- }
113
- return END
114
-
115
- # ——————————————
116
- # 5) Build & compile the LangGraph
117
  # ——————————————
118
  graph = StateGraph(input=InputState, output=OutputState)
119
 
120
- # Add nodes
121
- graph.add_node("BridgePM", bridge_to_pm)
122
- graph.add_node("ProductManager", wrap_agent(product_manager_agent.run, "pm_output"))
123
- graph.add_node("ProjectManager", wrap_agent(project_manager_agent.run, "proj_output"))
124
- graph.add_node("SoftwareArchitect", wrap_agent(software_architect_agent.run, "arch_output"))
125
- graph.add_node("UIDesigner", wrap_agent(ui_designer_agent.run, "ui_design_output"))
126
- graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run, "dev_output"))
127
- graph.add_node("QualityAssurance", wrap_agent(quality_assurance_agent.run, "qa_output"))
128
- graph.add_node("FeedbackHandler", handle_feedback)
129
 
130
- # Add edges with feedback loop
131
  graph.set_entry_point("BridgePM")
132
- graph.add_edge("BridgePM", "ProductManager")
133
- graph.add_edge("ProductManager", "ProjectManager")
134
- graph.add_edge("ProjectManager", "SoftwareArchitect")
135
- graph.add_edge("SoftwareArchitect", "UIDesigner")
136
- graph.add_edge("UIDesigner", "SoftwareEngineer")
137
  graph.add_edge("SoftwareEngineer", "QualityAssurance")
138
- graph.add_edge("QualityAssurance", "FeedbackHandler")
139
- graph.add_edge("FeedbackHandler", "BridgePM") # Feedback loop back to start
140
 
141
  compiled_graph = graph.compile()
142
 
143
  # ——————————————
144
- # 6) Parse spec into sections
145
  # ——————————————
146
  def parse_spec(spec: str) -> Dict[str, List[str]]:
147
  sections: Dict[str, List[str]] = {}
@@ -152,115 +105,75 @@ def parse_spec(spec: str) -> Dict[str, List[str]]:
152
  return sections
153
 
154
  # ——————————————
155
- # 7) Run pipeline, generate site, zip, return (chat_log, zip_path)
156
  # ——————————————
157
  def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
158
  # a) invoke agents
159
- initial_state = {"messages": [HumanMessage(content=prompt)], "chat_log": [], "iteration": 0, "feedback": ""}
160
- final_state = compiled_graph.invoke(initial_state)
161
-
162
- chat_log = final_state["chat_log"]
163
- dev_output = final_state["dev_output"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
- # b) parse the developer output to extract code sections
166
- sections = parse_code_sections(dev_output)
167
-
168
- # c) write & zip
169
- site_id = uuid.uuid4().hex
170
- out_dir = Path("output")
171
  site_dir = out_dir / f"site_{site_id}"
172
  site_dir.mkdir(parents=True, exist_ok=True)
173
 
174
- # Write HTML file
175
- (site_dir / "index.html").write_text(sections.get("HTML Structure", ""), encoding="utf-8")
176
-
177
- # Write CSS file
178
- (site_dir / "styles.css").write_text(sections.get("CSS Styles", ""), encoding="utf-8")
179
-
180
- # Write JavaScript file
181
- (site_dir / "script.js").write_text(sections.get("JavaScript", ""), encoding="utf-8")
182
-
183
- # Write Tailwind config
184
- (site_dir / "tailwind.config.js").write_text(sections.get("Tailwind Config", ""), encoding="utf-8")
185
-
186
- # Create package.json for dependencies
187
- package_json = {
188
- "name": f"site_{site_id}",
189
- "version": "1.0.0",
190
- "description": "Generated responsive website",
191
- "scripts": {
192
- "build": "tailwindcss -i ./styles.css -o ./dist/output.css",
193
- "watch": "tailwindcss -i ./styles.css -o ./dist/output.css --watch"
194
- },
195
- "dependencies": {
196
- "tailwindcss": "^3.4.1",
197
- "alpinejs": "^3.13.3"
198
- }
199
- }
200
-
201
- (site_dir / "package.json").write_text(
202
- json.dumps(package_json, indent=2),
203
- encoding="utf-8"
204
- )
205
-
206
- # Create README
207
- readme_content = f"""# Generated Website
208
-
209
- This is a responsive website generated by the Multi-Agent UI Generator.
210
-
211
- ## Setup
212
-
213
- 1. Install dependencies:
214
- ```bash
215
- npm install
216
- ```
217
-
218
- 2. Build the CSS:
219
- ```bash
220
- npm run build
221
- ```
222
-
223
- 3. For development with live reload:
224
- ```bash
225
- npm run watch
226
- ```
227
 
228
- ## Features
229
-
230
- - Responsive design using Tailwind CSS
231
- - Interactive elements with JavaScript
232
- - Modern animations and transitions
233
- - Mobile-first approach
234
- """
235
-
236
- (site_dir / "README.md").write_text(readme_content, encoding="utf-8")
237
-
238
- # Create zip file
239
  zip_path = out_dir / f"site_{site_id}.zip"
240
  with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
241
  for f in site_dir.iterdir():
242
  zf.write(f, arcname=f.name)
243
 
244
  return chat_log, str(zip_path)
245
-
246
- def parse_code_sections(output: str) -> Dict[str, str]:
247
- """Parse code sections from the developer output"""
248
- sections = {}
249
- current_section = None
250
- current_code = []
251
-
252
- for line in output.split("\n"):
253
- if line.startswith("## "):
254
- if current_section:
255
- sections[current_section] = "\n".join(current_code)
256
- current_section = line[3:].strip()
257
- current_code = []
258
- elif line.startswith("```"):
259
- continue
260
- elif current_section:
261
- current_code.append(line)
262
-
263
- if current_section:
264
- sections[current_section] = "\n".join(current_code)
265
-
266
- return sections
 
1
+ import uuid, zipfile, re
2
  from pathlib import Path
3
  from typing import TypedDict, List, Dict, Any, Tuple
4
 
 
21
  class InputState(TypedDict):
22
  messages: List[BaseMessage]
23
  chat_log: List[Dict[str, Any]]
 
 
24
 
25
  class OutputState(TypedDict):
26
  pm_output: str
 
30
  dev_output: str
31
  qa_output: str
32
  chat_log: List[Dict[str, Any]]
 
 
33
 
34
  # ——————————————
35
  # 2) Wrap agents so they see full history
 
37
  def wrap_agent(agent_run, output_key: str):
38
  def node(state: Dict[str, Any]) -> Dict[str, Any]:
39
  history = state["messages"]
40
+ log = state["chat_log"]
41
+ result = agent_run({"messages": history, "chat_log": log})
 
 
 
 
 
 
 
42
  return {
43
  "messages": history + result["messages"],
44
+ "chat_log": result["chat_log"],
45
+ output_key: result[output_key],
 
 
46
  }
47
  return node
48
 
 
51
  # ——————————————
52
  def bridge_to_pm(state: Dict[str, Any]) -> Dict[str, Any]:
53
  history = state["messages"]
54
+ log = state["chat_log"]
 
 
 
55
  if not history or not isinstance(history[-1], HumanMessage):
56
  raise ValueError("bridge_to_pm expected a HumanMessage at history end")
 
57
  prompt = history[-1].content
58
  spec_prompt = (
59
+ f"# Stakeholder Prompt\n\n"
60
  f"\"{prompt}\"\n\n"
 
 
 
 
 
 
61
  "Generate a structured product specification including:\n"
62
  "- Goals\n"
63
  "- Key features\n"
64
  "- User stories\n"
65
  "- Success metrics\n"
66
  )
 
67
  return {
68
  "messages": [AIMessage(content=spec_prompt)],
69
  "chat_log": log + [{"role": "System", "content": spec_prompt}],
 
 
70
  }
71
 
72
  # ——————————————
73
+ # 4) Build & compile the LangGraph
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  # ——————————————
75
  graph = StateGraph(input=InputState, output=OutputState)
76
 
77
+ graph.add_node("BridgePM", bridge_to_pm)
78
+ graph.add_node("ProductManager", wrap_agent(product_manager_agent.run, "pm_output"))
79
+ graph.add_node("ProjectManager", wrap_agent(project_manager_agent.run, "proj_output"))
80
+ graph.add_node("SoftwareArchitect",wrap_agent(software_architect_agent.run, "arch_output"))
81
+ graph.add_node("UIDesigner", wrap_agent(ui_designer_agent.run, "ui_design_output"))
82
+ graph.add_node("SoftwareEngineer", wrap_agent(software_engineer_agent.run, "dev_output"))
83
+ graph.add_node("QualityAssurance", wrap_agent(quality_assurance_agent.run, "qa_output"))
 
 
84
 
 
85
  graph.set_entry_point("BridgePM")
86
+ graph.add_edge("BridgePM", "ProductManager")
87
+ graph.add_edge("ProductManager", "ProjectManager")
88
+ graph.add_edge("ProjectManager", "SoftwareArchitect")
89
+ graph.add_edge("SoftwareArchitect","UIDesigner")
90
+ graph.add_edge("UIDesigner", "SoftwareEngineer")
91
  graph.add_edge("SoftwareEngineer", "QualityAssurance")
92
+ graph.add_edge("QualityAssurance", END)
 
93
 
94
  compiled_graph = graph.compile()
95
 
96
  # ——————————————
97
+ # 5) Parse spec into sections
98
  # ——————————————
99
  def parse_spec(spec: str) -> Dict[str, List[str]]:
100
  sections: Dict[str, List[str]] = {}
 
105
  return sections
106
 
107
  # ——————————————
108
+ # 6) Run pipeline, generate site, zip, return (chat_log, zip_path)
109
  # ——————————————
110
  def run_pipeline_and_save(prompt: str) -> Tuple[List[Dict[str, Any]], str]:
111
  # a) invoke agents
112
+ initial_state = {"messages": [HumanMessage(content=prompt)], "chat_log": []}
113
+ final_state = compiled_graph.invoke(initial_state)
114
+
115
+ chat_log = final_state["chat_log"]
116
+ qa_output = final_state["qa_output"]
117
+
118
+ # b) parse spec
119
+ spec = parse_spec(qa_output)
120
+ features = spec.get("Key features", [])
121
+ testimonials = spec.get("User stories", [])
122
+
123
+ # c) build HTML
124
+ title = prompt.title()
125
+ domain = prompt.replace(" ", "").lower() + ".com"
126
+ cards_html = "\n".join(f"<div class='card'><h3>{f}</h3></div>" for f in features)
127
+ test_html = "\n".join(f"<blockquote>{t}</blockquote>" for t in testimonials)
128
+
129
+ html_code = f"""<!DOCTYPE html>
130
+ <html lang="en">
131
+ <head>
132
+ <meta charset="UTF-8">
133
+ <meta name="viewport" content="width=device-width,initial-scale=1">
134
+ <title>{title}</title>
135
+ <link rel="stylesheet" href="styles.css">
136
+ </head>
137
+ <body>
138
+ <header><h1>{title}</h1></header>
139
+ <section id="features">
140
+ <h2>Features</h2>
141
+ <div class="cards">
142
+ {cards_html}
143
+ </div>
144
+ </section>
145
+ <section id="testimonials">
146
+ <h2>Testimonials</h2>
147
+ {test_html or '<p>No testimonials provided.</p>'}
148
+ </section>
149
+ <section id="contact">
150
+ <h2>Contact Us</h2>
151
+ <p>Email: info@{domain}</p>
152
+ </section>
153
+ </body>
154
+ </html>"""
155
+
156
+ # d) basic CSS
157
+ css_code = """
158
+ body { font-family: Arial, sans-serif; margin: 1em; line-height: 1.5; }
159
+ header { text-align: center; margin-bottom: 2em; }
160
+ .cards { display: grid; grid-template-columns: repeat(auto-fit,minmax(150px,1fr)); gap: 1em; }
161
+ .card { background: #f9f9f9; padding: 1em; border-radius: 8px; box-shadow: 0 2px 5px rgba(0,0,0,0.1); text-align: center; }
162
+ blockquote { font-style: italic; margin: 1em; padding: 0.5em; background: #eef; border-left: 4px solid #99f; }
163
+ """
164
 
165
+ # e) write & zip
166
+ site_id = uuid.uuid4().hex
167
+ out_dir = Path("output")
 
 
 
168
  site_dir = out_dir / f"site_{site_id}"
169
  site_dir.mkdir(parents=True, exist_ok=True)
170
 
171
+ (site_dir / "index.html").write_text(html_code, encoding="utf-8")
172
+ (site_dir / "styles.css").write_text(css_code, encoding="utf-8")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
 
 
 
 
 
 
 
 
 
 
 
174
  zip_path = out_dir / f"site_{site_id}.zip"
175
  with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
176
  for f in site_dir.iterdir():
177
  zf.write(f, arcname=f.name)
178
 
179
  return chat_log, str(zip_path)