softAi / app.py
Bofandra's picture
Update app.py
e917a44 verified
import os
import gradio as gr
from huggingface_hub import InferenceClient
# ✅ Set up Hugging Face client with Phi-3
client = InferenceClient(
model="deepseek-ai/DeepSeek-R1-0528",
token=os.environ["HF_TOKEN"]
)
# 🧠 Function to generate software architecture
def generate_software_spec(name, description, architecture, components, deployment, platform, extra):
prompt = f"""
You are a software architect assistant. Based on the following input, generate:
1. A **Sequence Diagram** in Mermaid syntax
2. A **Business Process Flow** in Mermaid syntax
3. Code Snippets for each component based on the selected tech stack.
App Name: {name}
Description: {description}
Architecture: {architecture}
Deployment: {deployment}
Platform: {platform}
Components & Tech Stack:
{components}
Extra requirements: {extra or "None"}
Return the result in this format:
### Sequence Diagram (Mermaid)
```mermaid
<sequence_diagram_here>
```
### Business Process Flow (Mermaid)
```mermaid
<flowchart_here>
```
### Code Snippets
#### Component: <Component 1>
```<language>
<code_here>
```
#### Component: <Component 2>
```<language>
<code_here>
```
"""
print(prompt)
response = client.chat_completion(
messages=[
{"role": "system", "content": "You are a helpful software architecture assistant."},
{"role": "user", "content": prompt}
]
)
print(response)
return response.choices[0].message.content if response and response.choices else "❌ Error: No response received."
# 🎨 Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## 🧠 AI Software Architecture Assistant")
with gr.Row():
with gr.Column():
name = gr.Textbox(label="App Name", placeholder="MyApp")
description = gr.Textbox(label="Short Description", lines=2, placeholder="A system for ...")
architecture = gr.Radio(["Monolithic", "Backend-Frontend"], label="Architecture Style", value="Backend-Frontend")
deployment = gr.Radio(["Serverless", "VM"], label="Deployment Style", value="Serverless")
platform = gr.Radio(["Web App", "Mobile App"], label="Target Platform", value="Web App")
components = gr.Textbox(label="Components & Tech Stack", lines=4, placeholder="Backend: Python + Flask, Frontend: React")
extra = gr.Textbox(label="Extra Requirements (optional)", lines=2)
submit = gr.Button("Generate Design")
status = gr.Markdown(visible=False)
with gr.Column():
output = gr.HTML(label="Output")
def wrapper(name, description, architecture, components, deployment, platform, extra):
result = generate_software_spec(name, description, architecture, components, deployment, platform, extra)
# Extract Mermaid blocks and wrap them properly
def render_mermaid_blocks(text):
import re
blocks = re.findall(r"```mermaid\n(.*?)```", text, re.DOTALL)
html_blocks = "".join([f"<div class='mermaid'>{block}</div>" for block in blocks])
return html_blocks or text
mermaid_html = render_mermaid_blocks(result)
final_html = f"""
<link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.css\">
<script src=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.js\"></script>
<script>mermaid.initialize({{startOnLoad:true}});</script>
{mermaid_html}
<pre style='white-space: pre-wrap;'>{result}</pre>
"""
return final_html, gr.update(visible=False)
def show_spinner():
return gr.update(visible=True, value="<center>⏳ Generating... Please wait.</center>")
submit.click(fn=show_spinner, inputs=[], outputs=[status], queue=False)
submit.click(
fn=wrapper,
inputs=[name, description, architecture, components, deployment, platform, extra],
outputs=[output, status],
preprocess=False,
postprocess=False,
show_progress=True
)
demo.launch()