Nihal2000 commited on
Commit
85edf4d
Β·
verified Β·
1 Parent(s): 04e85a2

Update ui/gradio_interface.py

Browse files
Files changed (1) hide show
  1. ui/gradio_interface.py +267 -166
ui/gradio_interface.py CHANGED
@@ -33,27 +33,21 @@ class DebugGenieUI:
33
  ):
34
  """Main analysis handler with progressive updates."""
35
  try:
36
- # Initialize outputs
37
- chat_history = []
38
- solutions_html = ""
39
- viz_html = ""
40
- voice_audio = None
41
- analysis_json = {}
42
-
43
  # Validate inputs
44
  if not error_text and screenshot is None:
45
  return (
46
- [{"role": "user", "content": "Analyze error"},
47
- {"role": "assistant", "content": "❌ Error: Please provide either an error message or a screenshot."}],
48
- "<div>No analysis performed.</div>",
49
- "<div>No visualization available.</div>",
50
  None,
51
  {},
52
- "Status: ❌ Missing input"
 
 
 
53
  )
54
 
55
- progress(0.1, desc="Starting analysis...")
56
- chat_history.append({"role": "user", "content": f"Analyze this error:\n```\n{error_text[:200]}...\n```"})
57
 
58
  # Build context
59
  context = {
@@ -62,71 +56,53 @@ class DebugGenieUI:
62
  'code_context': ""
63
  }
64
 
65
- # Add screenshot context if provided
66
  if screenshot is not None:
67
- context['type'] = 'ide' # Could be auto-detected
68
 
69
- progress(0.2, desc="Running multi-agent analysis...")
70
 
71
  # Run backend analysis
72
  result = await self.backend.analyze(context)
73
 
74
- progress(0.7, desc="Generating visualizations...")
75
-
76
- # Build chat response
77
- response_text = f"""
78
- ## 🎯 Root Cause
79
- {result.root_cause}
80
-
81
- ## βœ… Recommended Solutions
82
- """
83
- for idx, sol in enumerate(result.solutions[:3], 1):
84
- response_text += f"\n### {idx}. {sol.get('title', 'Solution')}\n"
85
- response_text += f"{sol.get('description', '')}\n"
86
- response_text += f"**Confidence:** {sol.get('probability', 0):.0%}\n"
87
 
88
- chat_history.append({"role": "assistant", "content": response_text})
 
89
 
90
- # Generate solutions accordion HTML
91
- solutions_html = self._generate_solutions_html(result.solutions)
92
 
93
- # Generate 3D visualization if we have stack trace
94
- # For demo, create a mock trace
95
  mock_trace = self.visualizer.generate_mock_trace()
96
  viz_html = self.visualizer.generate_flow(mock_trace)
97
 
98
- # Build analysis JSON
99
  analysis_json = {
100
  "execution_time": f"{result.execution_time:.2f}s",
101
- "confidence": result.confidence_score,
102
  "agents_used": list(result.agent_metrics.keys()),
103
- "metrics": result.agent_metrics
104
  }
105
 
106
- # Generate voice explanation for top solution
107
- progress(0.9, desc="Generating voice explanation...")
 
108
  if self.voice_explainer and result.solutions:
109
  try:
110
- # Convert first solution to RankedSolution format
111
  top_solution = result.solutions[0]
112
  ranked_sol = RankedSolution(
113
  rank=1,
114
  title=top_solution.get('title', 'Solution'),
115
  description=top_solution.get('description', ''),
116
- steps=[], # Would parse from fix_instructions
117
  confidence=top_solution.get('probability', 0.5),
118
  sources=[],
119
- why_ranked_here=f"Top ranked solution with {top_solution.get('probability', 0)*100:.0f}% confidence",
120
  trade_offs=[]
121
  )
122
 
123
- audio_bytes = self.voice_explainer.generate_explanation(
124
- ranked_sol,
125
- mode="walkthrough"
126
- )
127
-
128
  if audio_bytes:
129
- # Save to temp file for Gradio
130
  voice_path = self.voice_explainer.save_audio(
131
  audio_bytes,
132
  f"explanation_{hash(error_text[:100])}.mp3"
@@ -134,9 +110,10 @@ class DebugGenieUI:
134
  voice_audio = voice_path
135
  except Exception as e:
136
  logger.warning(f"Voice generation failed: {e}")
137
- voice_audio = None
138
 
139
- progress(1.0, desc="Complete!")
 
 
140
 
141
  return (
142
  chat_history,
@@ -144,161 +121,254 @@ class DebugGenieUI:
144
  viz_html,
145
  voice_audio,
146
  analysis_json,
147
- f"Status: βœ… Analysis complete in {result.execution_time:.2f}s"
 
 
 
148
  )
149
 
150
  except Exception as e:
151
  logger.error(f"Analysis failed: {e}")
152
  return (
153
- [{"role": "user", "content": "Analyze error"},
154
- {"role": "assistant", "content": f"❌ Error: Analysis failed: {str(e)}"}],
155
- f"<div class='error'>Error: {str(e)}</div>",
156
- "<div>Visualization unavailable</div>",
157
  None,
158
  {"error": str(e)},
159
- f"Status: ❌ Failed - {str(e)}"
 
 
 
160
  )
161
 
162
- def _generate_solutions_html(self, solutions: List[Dict]) -> str:
163
- """Generate HTML for solutions accordion."""
164
  if not solutions:
165
- return "<div>No solutions found.</div>"
166
 
167
- html = "<div style='font-family: sans-serif;'>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
- for idx, sol in enumerate(solutions, 1):
170
  title = sol.get('title', f'Solution {idx}')
171
- desc = sol.get('description', 'No description')
172
  prob = sol.get('probability', 0.5)
173
 
174
- # Color code by probability
175
- color = "green" if prob > 0.7 else "orange" if prob > 0.4 else "red"
 
 
 
 
 
 
 
 
176
 
177
  html += f"""
178
- <details style='border: 2px solid {color}; border-radius: 8px; padding: 16px; margin: 12px 0;'>
179
- <summary style='font-size: 18px; font-weight: bold; cursor: pointer;'>
180
- {idx}. {title}
181
- <span style='color: {color}; float: right;'>
182
- {prob:.0%} confidence
183
- </span>
184
- </summary>
185
- <div style='margin-top: 12px; padding: 12px; background: #f5f5f5; border-radius: 4px;'>
186
- <p>{desc}</p>
187
  </div>
188
- </details>
 
 
189
  """
190
 
191
  html += "</div>"
192
  return html
193
 
194
  def create_interface(backend: DebugBackend):
195
- """Create the main Gradio interface."""
196
  ui = DebugGenieUI(backend)
197
 
198
- # Create Blocks without theme, css parameters (moved to launch)
199
  with gr.Blocks() as demo:
200
 
201
- gr.Markdown(
202
- """
203
- # 🧞 DebugGenie - AI Debugging Assistant
204
- ### Multi-Agent AI System for Intelligent Error Analysis
205
-
206
- Powered by Claude, Gemini, and GPT-4 working together to solve your bugs.
207
- """
208
- )
209
-
210
  with gr.Row():
211
- with gr.Column(scale=1):
212
- gr.Markdown("## πŸ“ Input")
213
-
214
- error_input = gr.Code(
215
- label="Paste Error Message / Stack Trace",
216
- language="python",
217
- lines=10
218
- )
 
 
 
 
 
 
219
 
220
- screenshot_input = gr.Image(
221
- label="Upload Screenshot (Optional)",
222
- type="pil",
223
- sources=["upload", "clipboard"]
224
- )
 
 
 
225
 
226
- codebase_files = gr.File(
227
- label="Upload Codebase Files (Optional)",
228
- file_count="multiple"
229
- )
 
 
 
 
 
 
 
 
 
230
 
231
  analyze_btn = gr.Button(
232
  "πŸ” Analyze Error",
233
  variant="primary",
234
- size="lg"
 
235
  )
236
 
237
- gr.Markdown(
238
- """
239
- ---
240
- **Tips:**
241
- - Paste complete error traces for best results
242
- - Screenshots help with IDE or browser errors
243
- - Upload related code files for deeper analysis
244
- """
245
  )
246
 
247
- with gr.Column(scale=2):
248
- gr.Markdown("## 🎯 Results")
 
249
 
250
- status_text = gr.Markdown("**Status:** Ready to analyze")
 
 
 
 
 
 
 
 
 
251
 
252
- with gr.Tabs():
253
- with gr.Tab("πŸ’¬ Chat"):
254
- # Updated to use type="messages" (default in Gradio 6)
255
- chatbot = gr.Chatbot(
256
- height=500,
257
- type="messages",
258
- avatar_images=(
259
- None,
260
- "https://em-content.zobj.net/thumbs/120/apple/354/genie_1f9de.png"
261
- )
262
- )
263
-
264
- with gr.Tab("🎯 Solutions"):
265
- solutions_accordion = gr.HTML(
266
- value="<div>No solutions yet. Analyze an error to get started.</div>"
267
- )
268
-
269
- with gr.Tab("🎨 3D Error Flow"):
270
- viz_3d = gr.HTML(
271
- value="<div style='text-align: center; padding: 40px;'>Visualization will appear here after analysis.</div>"
272
- )
273
-
274
- with gr.Tab("πŸ“Š Analysis Details"):
275
- analysis_details = gr.JSON(
276
- label="Detailed Metrics"
277
- )
278
 
279
- # Voice explanation (collapsed by default)
280
- with gr.Accordion("πŸ”Š Voice Explanation", open=False):
281
- voice_output = gr.Audio(
282
- label="AI-Generated Explanation",
283
- autoplay=False
284
- )
 
 
 
 
 
 
 
 
 
285
 
286
- # Examples - Updated to use messages format
 
287
  gr.Examples(
288
  examples=[
289
  [
290
- "Traceback (most recent call last):\n File \"app.py\", line 42, in process_data\n result = json.loads(data)\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)",
 
 
 
291
  None,
292
  None
293
  ],
294
  [
295
- "TypeError: 'NoneType' object is not subscriptable\n File \"main.py\", line 15, in get_user\n return users[user_id]['name']",
 
 
 
 
 
 
 
 
 
296
  None,
297
  None
298
  ]
299
  ],
300
  inputs=[error_input, screenshot_input, codebase_files],
301
- label="πŸ“š Example Errors"
 
302
  )
303
 
304
  # Event handlers
@@ -306,11 +376,14 @@ def create_interface(backend: DebugBackend):
306
  fn=ui.handle_analyze,
307
  inputs=[error_input, screenshot_input, codebase_files],
308
  outputs=[
309
- chatbot,
310
- solutions_accordion,
311
- viz_3d,
312
- voice_output,
313
- analysis_details,
 
 
 
314
  status_text
315
  ]
316
  )
@@ -318,29 +391,57 @@ def create_interface(backend: DebugBackend):
318
  return demo
319
 
320
  if __name__ == "__main__":
321
- # Default to local backend for direct execution
322
  backend = LocalBackend()
323
  demo = create_interface(backend)
324
 
325
- # Launch with theme and css parameters (moved from Blocks constructor)
326
  demo.launch(
327
  server_name="127.0.0.1",
328
  server_port=7860,
329
  share=False,
330
  show_error=True,
331
  theme=gr.themes.Soft(
332
- primary_hue="blue",
333
- secondary_hue="purple"
 
 
 
334
  ),
335
  css="""
336
- .gradio-container {
337
- font-family: 'Inter', sans-serif;
 
 
 
 
 
338
  }
339
- .error {
340
- color: red;
341
- padding: 16px;
342
- background: #fee;
343
  border-radius: 8px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
  }
345
  """
346
  )
 
33
  ):
34
  """Main analysis handler with progressive updates."""
35
  try:
 
 
 
 
 
 
 
36
  # Validate inputs
37
  if not error_text and screenshot is None:
38
  return (
39
+ [{"role": "assistant", "content": "⚠️ **Please provide either an error message or a screenshot to begin analysis.**"}],
40
+ "",
41
+ "",
 
42
  None,
43
  {},
44
+ gr.update(visible=False),
45
+ gr.update(visible=False),
46
+ gr.update(visible=False),
47
+ "⚠️ Waiting for input..."
48
  )
49
 
50
+ progress(0.1, desc="πŸ” Analyzing error...")
 
51
 
52
  # Build context
53
  context = {
 
56
  'code_context': ""
57
  }
58
 
 
59
  if screenshot is not None:
60
+ context['type'] = 'ide'
61
 
62
+ progress(0.3, desc="πŸ€– Running AI agents...")
63
 
64
  # Run backend analysis
65
  result = await self.backend.analyze(context)
66
 
67
+ progress(0.7, desc="πŸ“Š Generating insights...")
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ # Build main response
70
+ response_parts = [f"## 🎯 Root Cause\n\n{result.root_cause}\n\n---\n"]
71
 
72
+ # Generate solutions HTML
73
+ solutions_html = self._generate_modern_solutions(result.solutions)
74
 
75
+ # Generate visualization
 
76
  mock_trace = self.visualizer.generate_mock_trace()
77
  viz_html = self.visualizer.generate_flow(mock_trace)
78
 
79
+ # Build metrics
80
  analysis_json = {
81
  "execution_time": f"{result.execution_time:.2f}s",
82
+ "confidence": f"{result.confidence_score:.1%}",
83
  "agents_used": list(result.agent_metrics.keys()),
84
+ "solutions_found": len(result.solutions)
85
  }
86
 
87
+ # Voice explanation
88
+ progress(0.9, desc="πŸŽ™οΈ Generating explanation...")
89
+ voice_audio = None
90
  if self.voice_explainer and result.solutions:
91
  try:
 
92
  top_solution = result.solutions[0]
93
  ranked_sol = RankedSolution(
94
  rank=1,
95
  title=top_solution.get('title', 'Solution'),
96
  description=top_solution.get('description', ''),
97
+ steps=[],
98
  confidence=top_solution.get('probability', 0.5),
99
  sources=[],
100
+ why_ranked_here=f"Top solution with {top_solution.get('probability', 0)*100:.0f}% confidence",
101
  trade_offs=[]
102
  )
103
 
104
+ audio_bytes = self.voice_explainer.generate_explanation(ranked_sol, mode="walkthrough")
 
 
 
 
105
  if audio_bytes:
 
106
  voice_path = self.voice_explainer.save_audio(
107
  audio_bytes,
108
  f"explanation_{hash(error_text[:100])}.mp3"
 
110
  voice_audio = voice_path
111
  except Exception as e:
112
  logger.warning(f"Voice generation failed: {e}")
 
113
 
114
+ progress(1.0, desc="βœ… Complete!")
115
+
116
+ chat_history = [{"role": "assistant", "content": "".join(response_parts)}]
117
 
118
  return (
119
  chat_history,
 
121
  viz_html,
122
  voice_audio,
123
  analysis_json,
124
+ gr.update(visible=True),
125
+ gr.update(visible=True),
126
+ gr.update(visible=True),
127
+ f"βœ… Analysis complete β€’ {result.execution_time:.1f}s β€’ {len(result.solutions)} solutions"
128
  )
129
 
130
  except Exception as e:
131
  logger.error(f"Analysis failed: {e}")
132
  return (
133
+ [{"role": "assistant", "content": f"## ❌ Analysis Failed\n\n{str(e)}\n\nPlease check your input and try again."}],
134
+ "",
135
+ "",
 
136
  None,
137
  {"error": str(e)},
138
+ gr.update(visible=False),
139
+ gr.update(visible=False),
140
+ gr.update(visible=False),
141
+ f"❌ Error: {str(e)}"
142
  )
143
 
144
+ def _generate_modern_solutions(self, solutions: List[Dict]) -> str:
145
+ """Generate modern, card-based solutions UI."""
146
  if not solutions:
147
+ return "<div style='text-align: center; padding: 40px; color: #666;'>No solutions found.</div>"
148
 
149
+ html = """
150
+ <style>
151
+ .solutions-grid {
152
+ display: grid;
153
+ gap: 20px;
154
+ margin-top: 10px;
155
+ }
156
+ .solution-card {
157
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
158
+ border-radius: 16px;
159
+ padding: 24px;
160
+ color: white;
161
+ box-shadow: 0 10px 30px rgba(0,0,0,0.15);
162
+ transition: transform 0.2s, box-shadow 0.2s;
163
+ cursor: pointer;
164
+ }
165
+ .solution-card:hover {
166
+ transform: translateY(-4px);
167
+ box-shadow: 0 15px 40px rgba(0,0,0,0.25);
168
+ }
169
+ .solution-card.high-confidence {
170
+ background: linear-gradient(135deg, #11998e 0%, #38ef7d 100%);
171
+ }
172
+ .solution-card.medium-confidence {
173
+ background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
174
+ }
175
+ .solution-card.low-confidence {
176
+ background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
177
+ }
178
+ .solution-header {
179
+ display: flex;
180
+ justify-content: space-between;
181
+ align-items: flex-start;
182
+ margin-bottom: 16px;
183
+ }
184
+ .solution-rank {
185
+ background: rgba(255,255,255,0.3);
186
+ border-radius: 50%;
187
+ width: 36px;
188
+ height: 36px;
189
+ display: flex;
190
+ align-items: center;
191
+ justify-content: center;
192
+ font-weight: bold;
193
+ font-size: 18px;
194
+ }
195
+ .solution-confidence {
196
+ background: rgba(255,255,255,0.25);
197
+ padding: 6px 14px;
198
+ border-radius: 20px;
199
+ font-size: 13px;
200
+ font-weight: 600;
201
+ backdrop-filter: blur(10px);
202
+ }
203
+ .solution-title {
204
+ font-size: 20px;
205
+ font-weight: 700;
206
+ margin-bottom: 12px;
207
+ line-height: 1.3;
208
+ }
209
+ .solution-description {
210
+ font-size: 15px;
211
+ line-height: 1.6;
212
+ opacity: 0.95;
213
+ }
214
+ </style>
215
+ <div class="solutions-grid">
216
+ """
217
 
218
+ for idx, sol in enumerate(solutions[:5], 1):
219
  title = sol.get('title', f'Solution {idx}')
220
+ desc = sol.get('description', 'No description available')
221
  prob = sol.get('probability', 0.5)
222
 
223
+ # Determine confidence class
224
+ if prob > 0.7:
225
+ conf_class = "high-confidence"
226
+ conf_label = "High Confidence"
227
+ elif prob > 0.4:
228
+ conf_class = "medium-confidence"
229
+ conf_label = "Medium Confidence"
230
+ else:
231
+ conf_class = "low-confidence"
232
+ conf_label = "Low Confidence"
233
 
234
  html += f"""
235
+ <div class="solution-card {conf_class}">
236
+ <div class="solution-header">
237
+ <div class="solution-rank">#{idx}</div>
238
+ <div class="solution-confidence">{prob:.0%} β€’ {conf_label}</div>
 
 
 
 
 
239
  </div>
240
+ <div class="solution-title">{title}</div>
241
+ <div class="solution-description">{desc}</div>
242
+ </div>
243
  """
244
 
245
  html += "</div>"
246
  return html
247
 
248
  def create_interface(backend: DebugBackend):
249
+ """Create the main Gradio interface with modern UX."""
250
  ui = DebugGenieUI(backend)
251
 
 
252
  with gr.Blocks() as demo:
253
 
254
+ # Hero Section
 
 
 
 
 
 
 
 
255
  with gr.Row():
256
+ gr.Markdown(
257
+ """
258
+ # πŸ§žβ€β™‚οΈ DebugGenie
259
+ ### Your AI-Powered Debugging Assistant
260
+ Upload an error, get instant solutions powered by multi-agent AI
261
+ """,
262
+ elem_classes="hero-section"
263
+ )
264
+
265
+ # Main Content Area
266
+ with gr.Row(equal_height=True):
267
+ # Left Panel - Input Section
268
+ with gr.Column(scale=2, min_width=400):
269
+ gr.Markdown("### πŸ“‹ Error Details")
270
 
271
+ with gr.Group():
272
+ error_input = gr.Code(
273
+ label="Error Message or Stack Trace",
274
+ language="python",
275
+ lines=12,
276
+ placeholder="Paste your error message, stack trace, or exception here...\n\nExample:\nTraceback (most recent call last):\n File 'app.py', line 42\n ValueError: invalid literal for int()",
277
+ show_label=False
278
+ )
279
 
280
+ with gr.Accordion("🎨 Additional Context (Optional)", open=False):
281
+ screenshot_input = gr.Image(
282
+ label="Screenshot",
283
+ type="pil",
284
+ sources=["upload", "clipboard"],
285
+ height=200
286
+ )
287
+
288
+ codebase_files = gr.File(
289
+ label="Related Code Files",
290
+ file_count="multiple",
291
+ file_types=[".py", ".js", ".java", ".cpp", ".ts", ".jsx", ".tsx"]
292
+ )
293
 
294
  analyze_btn = gr.Button(
295
  "πŸ” Analyze Error",
296
  variant="primary",
297
+ size="lg",
298
+ scale=1
299
  )
300
 
301
+ status_text = gr.Markdown(
302
+ "πŸ’‘ Ready to debug β€’ Paste an error to get started",
303
+ elem_classes="status-bar"
 
 
 
 
 
304
  )
305
 
306
+ # Right Panel - Results Section
307
+ with gr.Column(scale=3, min_width=600):
308
+ gr.Markdown("### 🎯 Analysis Results")
309
 
310
+ # Main Analysis Display
311
+ with gr.Group():
312
+ chatbot = gr.Chatbot(
313
+ height=400,
314
+ type="messages",
315
+ show_copy_button=False,
316
+ avatar_images=(None, "🧞"),
317
+ bubble_full_width=False,
318
+ render_markdown=True
319
+ )
320
 
321
+ # Collapsible Sections
322
+ with gr.Group() as solutions_group:
323
+ with gr.Accordion("πŸ’‘ Recommended Solutions", open=True, visible=False) as solutions_accordion:
324
+ solutions_html = gr.HTML()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
+ with gr.Group() as viz_group:
327
+ with gr.Accordion("πŸ“Š Error Flow Visualization", open=False, visible=False) as viz_accordion:
328
+ viz_3d = gr.HTML()
329
+
330
+ with gr.Group() as details_group:
331
+ with gr.Accordion("πŸ”¬ Technical Details", open=False, visible=False) as details_accordion:
332
+ with gr.Row():
333
+ with gr.Column(scale=1):
334
+ analysis_details = gr.JSON(label="Metrics")
335
+ with gr.Column(scale=1):
336
+ voice_output = gr.Audio(
337
+ label="πŸŽ™οΈ Voice Explanation",
338
+ autoplay=False,
339
+ show_download_button=False
340
+ )
341
 
342
+ # Quick Start Examples
343
+ gr.Markdown("### πŸ“š Quick Start Examples")
344
  gr.Examples(
345
  examples=[
346
  [
347
+ """Traceback (most recent call last):
348
+ File "app.py", line 42, in process_data
349
+ result = json.loads(data)
350
+ json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)""",
351
  None,
352
  None
353
  ],
354
  [
355
+ """TypeError: 'NoneType' object is not subscriptable
356
+ File "main.py", line 15, in get_user
357
+ return users[user_id]['name']""",
358
+ None,
359
+ None
360
+ ],
361
+ [
362
+ """AttributeError: 'list' object has no attribute 'keys'
363
+ File "data_processor.py", line 28, in parse_config
364
+ for key in config.keys():""",
365
  None,
366
  None
367
  ]
368
  ],
369
  inputs=[error_input, screenshot_input, codebase_files],
370
+ label=None,
371
+ examples_per_page=3
372
  )
373
 
374
  # Event handlers
 
376
  fn=ui.handle_analyze,
377
  inputs=[error_input, screenshot_input, codebase_files],
378
  outputs=[
379
+ chatbot,
380
+ solutions_html,
381
+ viz_3d,
382
+ voice_output,
383
+ analysis_details,
384
+ solutions_accordion,
385
+ viz_accordion,
386
+ details_accordion,
387
  status_text
388
  ]
389
  )
 
391
  return demo
392
 
393
  if __name__ == "__main__":
 
394
  backend = LocalBackend()
395
  demo = create_interface(backend)
396
 
 
397
  demo.launch(
398
  server_name="127.0.0.1",
399
  server_port=7860,
400
  share=False,
401
  show_error=True,
402
  theme=gr.themes.Soft(
403
+ primary_hue="indigo",
404
+ secondary_hue="purple",
405
+ neutral_hue="slate",
406
+ radius_size="lg",
407
+ font=["Inter", "system-ui", "sans-serif"]
408
  ),
409
  css="""
410
+ .hero-section {
411
+ text-align: center;
412
+ padding: 20px 0;
413
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
414
+ -webkit-background-clip: text;
415
+ -webkit-text-fill-color: transparent;
416
+ background-clip: text;
417
  }
418
+ .status-bar {
419
+ text-align: center;
420
+ padding: 12px;
421
+ background: #f8fafc;
422
  border-radius: 8px;
423
+ margin-top: 16px;
424
+ font-weight: 500;
425
+ }
426
+ .gradio-container {
427
+ max-width: 1400px !important;
428
+ margin: auto;
429
+ }
430
+ button {
431
+ border-radius: 12px !important;
432
+ font-weight: 600 !important;
433
+ transition: all 0.2s !important;
434
+ }
435
+ button:hover {
436
+ transform: translateY(-2px);
437
+ box-shadow: 0 8px 20px rgba(0,0,0,0.15) !important;
438
+ }
439
+ .gr-group {
440
+ border-radius: 12px !important;
441
+ border: 1px solid #e2e8f0 !important;
442
+ }
443
+ .gr-accordion {
444
+ border-radius: 12px !important;
445
  }
446
  """
447
  )