Bofandra commited on
Commit
91d19f3
·
verified ·
1 Parent(s): 6d9191c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -70
app.py CHANGED
@@ -2,68 +2,55 @@ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
- # ✅ Set up Hugging Face client with token from environment
6
  client = InferenceClient(
7
- provider="auto",
8
- api_key=os.environ["HF_TOKEN"],
9
- )
10
 
11
  # 🧠 Function to generate software architecture
12
  def generate_software_spec(name, description, architecture, components, deployment, platform, extra):
13
- print("generate_software_spec")
14
  prompt = f"""
15
- You are a software architect assistant. Based on the following input, generate:
16
- 1. A **Sequence Diagram** in Mermaid syntax
17
- 2. A **Business Process Flow** in Mermaid syntax
18
- 3. Code Snippets for each component based on the selected tech stack.
19
-
20
- ---
21
-
22
- App Name: {name}
23
- Description: {description}
24
- Architecture: {architecture}
25
- Deployment: {deployment}
26
- Platform: {platform}
27
- Components & Tech Stack:
28
- {components}
29
- Extra requirements: {extra or "None"}
30
-
31
- Return the result in this format:
32
-
33
- ### Sequence Diagram (Mermaid)
34
- ```mermaid
35
- <sequence_diagram_here>
36
- ```
37
-
38
- ### Business Process Flow (Mermaid)
39
- ```mermaid
40
- <flowchart_here>
41
- ```
42
-
43
- ### Code Snippets
44
- #### Component: <Component 1>
45
- ```<language>
46
- <code_here>
47
- ```
48
-
49
- #### Component: <Component 2>
50
- ```<language>
51
- <code_here>
52
- ```
53
- """
54
 
55
- print(prompt)
56
- # Generate response from Hugging Face Inference Client
57
- response = client.chat_completion(
58
- messages=[
59
- {"role": "system", "content": "You are a helpful software architecture assistant."},
60
- {"role": "user", "content": prompt}
61
- ],
62
- model="deepseek-ai/DeepSeek-R1-0528",
63
- max_tokens=2048,
64
- )
65
- print(response)
66
- return response.choices[0].message.content if response and response.choices else "Error: No response received."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # 🎨 Gradio UI
69
  with gr.Blocks() as demo:
@@ -72,11 +59,11 @@ with gr.Blocks() as demo:
72
  with gr.Row():
73
  with gr.Column():
74
  name = gr.Textbox(label="App Name", placeholder="MyApp")
75
- description = gr.Textbox(label="Short Description", lines=2, placeholder="A system that manages online tutoring...")
76
- architecture = gr.Radio(["Monolithic", "Backend-Frontend"], label="Architecture Style")
77
- deployment = gr.Radio(["Serverless", "VM"], label="Deployment Style")
78
- platform = gr.Radio(["Web App", "Mobile App"], label="Target Platform")
79
- components = gr.Textbox(label="Components & Tech Stack (e.g. Backend: Python + Flask, Frontend: React)", lines=4)
80
  extra = gr.Textbox(label="Extra Requirements (optional)", lines=2)
81
  submit = gr.Button("Generate Design")
82
  status = gr.Markdown(visible=False)
@@ -86,15 +73,23 @@ with gr.Blocks() as demo:
86
 
87
  def wrapper(name, description, architecture, components, deployment, platform, extra):
88
  result = generate_software_spec(name, description, architecture, components, deployment, platform, extra)
89
- html_content = """
90
- <div>{}</div>
91
- <script type='module'>
92
- import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
93
- mermaid.initialize({{startOnLoad: true}});
94
- </script>
95
- """.format(result)
96
-
97
- return html_content, gr.update(visible=False)
 
 
 
 
 
 
 
 
98
 
99
  def show_spinner():
100
  return gr.update(visible=True, value="<center>⏳ Generating... Please wait.</center>")
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # ✅ Set up Hugging Face client
6
  client = InferenceClient(
7
+ model="deepseek-ai/DeepSeek-R1-0528",
8
+ token=os.environ["HF_TOKEN"]
9
+ )
10
 
11
  # 🧠 Function to generate software architecture
12
  def generate_software_spec(name, description, architecture, components, deployment, platform, extra):
 
13
  prompt = f"""
14
+ You are a software architect assistant. Based on the following input, generate:
15
+ 1. A **Sequence Diagram** in Mermaid syntax
16
+ 2. A **Business Process Flow** in Mermaid syntax
17
+ 3. Code Snippets for each component based on the selected tech stack.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ App Name: {name}
20
+ Description: {description}
21
+ Architecture: {architecture}
22
+ Deployment: {deployment}
23
+ Platform: {platform}
24
+ Components & Tech Stack:
25
+ {components}
26
+ Extra requirements: {extra or "None"}
27
+
28
+ Return the result in this format:
29
+
30
+ ### Sequence Diagram (Mermaid)
31
+ ```mermaid
32
+ <sequence_diagram_here>
33
+ ```
34
+
35
+ ### Business Process Flow (Mermaid)
36
+ ```mermaid
37
+ <flowchart_here>
38
+ ```
39
+
40
+ ### Code Snippets
41
+ #### Component: <Component 1>
42
+ ```<language>
43
+ <code_here>
44
+ ```
45
+
46
+ #### Component: <Component 2>
47
+ ```<language>
48
+ <code_here>
49
+ ```
50
+ """
51
+ # 🔄 Get text response
52
+ response = client.text_generation(prompt=prompt, max_tokens=2048)
53
+ return response if response else "❌ Error: No response received."
54
 
55
  # 🎨 Gradio UI
56
  with gr.Blocks() as demo:
 
59
  with gr.Row():
60
  with gr.Column():
61
  name = gr.Textbox(label="App Name", placeholder="MyApp")
62
+ description = gr.Textbox(label="Short Description", lines=2, placeholder="A system for ...")
63
+ architecture = gr.Radio(["Monolithic", "Backend-Frontend"], label="Architecture Style", value="Backend-Frontend")
64
+ deployment = gr.Radio(["Serverless", "VM"], label="Deployment Style", value="Serverless")
65
+ platform = gr.Radio(["Web App", "Mobile App"], label="Target Platform", value="Web App")
66
+ components = gr.Textbox(label="Components & Tech Stack", lines=4, placeholder="Backend: Python + Flask, Frontend: React")
67
  extra = gr.Textbox(label="Extra Requirements (optional)", lines=2)
68
  submit = gr.Button("Generate Design")
69
  status = gr.Markdown(visible=False)
 
73
 
74
  def wrapper(name, description, architecture, components, deployment, platform, extra):
75
  result = generate_software_spec(name, description, architecture, components, deployment, platform, extra)
76
+
77
+ # Extract Mermaid blocks and wrap them properly
78
+ def render_mermaid_blocks(text):
79
+ import re
80
+ blocks = re.findall(r"```mermaid\n(.*?)```", text, re.DOTALL)
81
+ html_blocks = "".join([f"<div class='mermaid'>{block}</div>" for block in blocks])
82
+ return html_blocks or text
83
+
84
+ mermaid_html = render_mermaid_blocks(result)
85
+ final_html = f"""
86
+ <link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.css\">
87
+ <script src=\"https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.js\"></script>
88
+ <script>mermaid.initialize({{startOnLoad:true}});</script>
89
+ {mermaid_html}
90
+ <pre style='white-space: pre-wrap;'>{result}</pre>
91
+ """
92
+ return final_html, gr.update(visible=False)
93
 
94
  def show_spinner():
95
  return gr.update(visible=True, value="<center>⏳ Generating... Please wait.</center>")