File size: 4,025 Bytes
b59ba2e d00ad25 a813071 31d5e67 0f32032 e917a44 91d19f3 a813071 91d19f3 a813071 91d19f3 1b1b0f8 31d5e67 1b1b0f8 31d5e67 a813071 248dc42 91d19f3 248dc42 a813071 248dc42 67651c9 baf0f9f 35b004a 91d19f3 248dc42 67651c9 542b4de 248dc42 542b4de 35b004a 31d5e67 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# ✅ Set up Hugging Face client with Phi-3
client = InferenceClient(
model="deepseek-ai/DeepSeek-R1-0528",
token=os.environ["HF_TOKEN"]
)
# 🧠 Function to generate software architecture
def generate_software_spec(name, description, architecture, components, deployment, platform, extra):
prompt = f"""
You are a software architect assistant. Based on the following input, generate:
1. A **Sequence Diagram** in Mermaid syntax
2. A **Business Process Flow** in Mermaid syntax
3. Code Snippets for each component based on the selected tech stack.
App Name: {name}
Description: {description}
Architecture: {architecture}
Deployment: {deployment}
Platform: {platform}
Components & Tech Stack:
{components}
Extra requirements: {extra or "None"}
Return the result in this format:
### Sequence Diagram (Mermaid)
```mermaid
<sequence_diagram_here>
```
### Business Process Flow (Mermaid)
```mermaid
<flowchart_here>
```
### Code Snippets
#### Component: <Component 1>
```<language>
<code_here>
```
#### Component: <Component 2>
```<language>
<code_here>
```
"""
print(prompt)
response = client.chat_completion(
messages=[
{"role": "system", "content": "You are a helpful software architecture assistant."},
{"role": "user", "content": prompt}
]
)
print(response)
return response.choices[0].message.content if response and response.choices else "❌ Error: No response received."
# 🎨 Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## 🧠 AI Software Architecture Assistant")
with gr.Row():
with gr.Column():
name = gr.Textbox(label="App Name", placeholder="MyApp")
description = gr.Textbox(label="Short Description", lines=2, placeholder="A system for ...")
architecture = gr.Radio(["Monolithic", "Backend-Frontend"], label="Architecture Style", value="Backend-Frontend")
deployment = gr.Radio(["Serverless", "VM"], label="Deployment Style", value="Serverless")
platform = gr.Radio(["Web App", "Mobile App"], label="Target Platform", value="Web App")
components = gr.Textbox(label="Components & Tech Stack", lines=4, placeholder="Backend: Python + Flask, Frontend: React")
extra = gr.Textbox(label="Extra Requirements (optional)", lines=2)
submit = gr.Button("Generate Design")
status = gr.Markdown(visible=False)
with gr.Column():
output = gr.HTML(label="Output")
def wrapper(name, description, architecture, components, deployment, platform, extra):
result = generate_software_spec(name, description, architecture, components, deployment, platform, extra)
# Extract Mermaid blocks and wrap them properly
def render_mermaid_blocks(text):
import re
blocks = re.findall(r"```mermaid\n(.*?)```", text, re.DOTALL)
html_blocks = "".join([f"<div class='mermaid'>{block}</div>" for block in blocks])
return html_blocks or text
mermaid_html = render_mermaid_blocks(result)
final_html = f"""
<link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.css\">
<script src=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.js\"></script>
<script>mermaid.initialize({{startOnLoad:true}});</script>
{mermaid_html}
<pre style='white-space: pre-wrap;'>{result}</pre>
"""
return final_html, gr.update(visible=False)
def show_spinner():
return gr.update(visible=True, value="<center>⏳ Generating... Please wait.</center>")
submit.click(fn=show_spinner, inputs=[], outputs=[status], queue=False)
submit.click(
fn=wrapper,
inputs=[name, description, architecture, components, deployment, platform, extra],
outputs=[output, status],
preprocess=False,
postprocess=False,
show_progress=True
)
demo.launch() |