Nguyen5 commited on
Commit
c411e11
·
1 Parent(s): e51bcdb
Files changed (1) hide show
  1. app.py +589 -380
app.py CHANGED
@@ -1,81 +1,43 @@
1
- # app.py – Prüfungsrechts-Chatbot (Đơn giản như ChatGPT)
 
2
  import os
3
  import time
4
- import tempfile
5
  from typing import Optional, Dict, Any
6
  import gradio as gr
7
  from gradio_pdf import PDF
8
  import numpy as np
9
- import soundfile as sf
10
 
11
- from openai import OpenAI
 
 
 
 
 
 
12
 
13
- from speech_io import transcribe_with_openai, synthesize_speech
14
-
15
- # =====================================================
16
- # CONFIGURATION
17
- # =====================================================
18
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
19
- # Initialize OpenAI client only when key is available
20
- openai_client = OpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
21
-
22
- # Language configuration
23
  ASR_LANGUAGE_HINT = os.getenv("ASR_LANGUAGE", "de")
 
 
 
 
24
 
25
  # =====================================================
26
- # INITIALIZATION - RAG Components / Demo Mode
27
- # =====================================================
28
- DEMO_MODE = os.getenv("DEMO_MODE", "false").lower() == "true"
29
-
30
- retriever = None
31
- llm = None
32
- pdf_meta = {"pdf_url": ""}
33
- hg_url = None
34
-
35
- if not DEMO_MODE:
36
- from load_documents import load_all_documents
37
- from split_documents import split_documents
38
- from vectorstore import build_vectorstore
39
- from retriever import get_retriever
40
- from llm import load_llm
41
- from rag_pipeline import answer
42
-
43
- print("📚 Lade Dokumente…")
44
- docs = load_all_documents()
45
-
46
- print("🔪 Splitte Dokumente…")
47
- chunks = split_documents(docs)
48
-
49
- print("🔍 Erstelle VectorStore…")
50
- vs = build_vectorstore(chunks)
51
-
52
- print("🔎 Erzeuge Retriever…")
53
- retriever = get_retriever(vs)
54
-
55
- print("🤖 Lade LLM…")
56
- llm = load_llm()
57
-
58
- pdf_meta = next(d.metadata for d in docs if d.metadata.get("type") == "pdf")
59
- hg_meta = next(d.metadata for d in docs if d.metadata.get("type") == "hg")
60
- hg_url = hg_meta.get("viewer_url")
61
-
62
- def generate_demo_answer(message: str) -> str:
63
- return (
64
- "Chế độ demo: trả lời mẫu cho câu hỏi của bạn. "
65
- "Phiên bản đầy đủ sẽ tham chiếu đến nguồn và luật liên quan."
66
- )
67
-
68
- # =====================================================
69
- # STATE MANAGEMENT
70
  # =====================================================
 
71
  class ConversationState:
72
- """Quản lý trạng thái hội thoại đơn giản"""
 
 
 
 
 
 
 
 
73
 
74
- def __init__(self):
75
- self.messages = []
76
- self.current_mode = "text" # "text" hoặc "audio"
77
- self.is_audio_recording = False
78
-
79
  def add_message(self, role: str, content: str):
80
  """Thêm message vào hội thoại"""
81
  self.messages.append({
@@ -86,124 +48,132 @@ class ConversationState:
86
  # Giới hạn lịch sử
87
  if len(self.messages) > 20:
88
  self.messages = self.messages[-20:]
89
-
90
- def get_chat_history(self):
91
- """Chuyển đổi sang format cho Gradio Chatbot"""
92
- history = []
93
- for msg in self.messages:
94
- if msg["role"] == "user":
95
- history.append([msg["content"], None])
96
- elif msg["role"] == "assistant":
97
- if history and history[-1][1] is None:
98
- history[-1][1] = msg["content"]
99
- else:
100
- history.append([None, msg["content"]])
101
- return history
 
 
 
 
 
 
 
 
 
 
102
 
103
  def reset(self):
104
  """Reset trạng thái hội thoại"""
105
  self.messages = []
106
- self.is_audio_recording = False
 
 
 
107
 
108
  # Khởi tạo state
109
  state = ConversationState()
110
 
111
  # =====================================================
112
- # AUDIO PROCESSING FUNCTIONS
113
  # =====================================================
114
- def process_audio_input(audio_data: Optional[tuple], history) -> tuple:
115
- """
116
- Xử audio input từ microphone
117
- """
118
- if audio_data is None:
119
- return history, "", "Warten auf Audioaufnahme..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  try:
122
- # Lấy sample rate và audio data
123
- sample_rate, audio_array = audio_data
124
-
125
- # Tạo file tạm để lưu audio
126
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
127
- temp_path = tmp.name
128
- # Lưu audio data
129
- sf.write(temp_path, audio_array, int(sample_rate))
130
-
131
- print("DEBUG: Audio saved to temp file, transcribing...")
132
-
133
- # Transcribe audio bằng OpenAI Whisper
134
- transcribed_text = transcribe_with_openai(temp_path, language=ASR_LANGUAGE_HINT)
135
-
136
- # Xóa file tạm
137
- os.unlink(temp_path)
138
-
139
- if not transcribed_text or not transcribed_text.strip():
140
- return history, "", "Keine Sprache erkannt. Bitte versuchen Sie es erneut."
141
-
142
- print(f"DEBUG: Transcribed text: {transcribed_text}")
143
-
144
- # Thêm vào history
145
- new_history = history + [[transcribed_text, None]]
146
-
147
- # Process với RAG
148
- if retriever and llm:
149
- ans, sources = answer(transcribed_text, retriever, llm)
150
- full_response = ans + format_sources(sources)
151
- else:
152
- ans = generate_demo_answer(transcribed_text)
153
- full_response = ans
154
-
155
- # Cập nhật history với response
156
- new_history[-1][1] = full_response
157
-
158
- # Thêm vào state
159
- state.add_message("user", transcribed_text)
160
- state.add_message("assistant", ans)
161
 
162
- return new_history, transcribed_text, "Antwort generiert ✓"
 
 
 
 
163
 
 
 
 
 
 
164
  except Exception as e:
165
- print(f"DEBUG: Error processing audio: {e}")
166
- return history, "", f"Fehler: {str(e)[:50]}"
167
 
168
- def toggle_audio_mode(mode_choice: str, history):
169
- """Chuyển đổi giữa text và audio mode"""
170
- if mode_choice == "Audio (Sprachmodus)":
171
- state.current_mode = "audio"
172
- state.is_audio_recording = True
173
- mode_text = "🎤 Sprachmodus aktiv - Klicken und Sprechen"
 
 
 
 
 
174
  else:
175
- state.current_mode = "text"
176
- state.is_audio_recording = False
177
- mode_text = "⌨️ Textmodus aktiv"
178
-
179
- return (
180
- gr.update(visible=(mode_choice == "Audio (Sprachmodus)")),
181
- gr.update(visible=(mode_choice == "Text (Schreibmodus)")),
182
- gr.update(visible=(mode_choice == "Text (Schreibmodus)")),
183
- mode_text
184
- )
185
 
186
- def process_text_input(message: str, history):
187
- if not message or not message.strip():
188
- return history, ""
189
- new_history = history + [[message, None]]
190
- try:
191
- if retriever and llm:
192
- ans, sources = answer(message, retriever, llm)
193
- full_response = ans + format_sources(sources)
194
- else:
195
- ans = generate_demo_answer(message)
196
- full_response = ans
197
- new_history[-1][1] = full_response
198
- state.add_message("user", message)
199
- state.add_message("assistant", ans)
200
- except Exception as e:
201
- error_msg = f"Entschuldigung, es gab einen Fehler: {str(e)[:100]}"
202
- new_history[-1][1] = error_msg
203
- return new_history, ""
 
 
 
 
204
 
 
 
 
205
  def format_sources(src):
206
- """Format sources cho display"""
207
  if not src:
208
  return ""
209
 
@@ -217,47 +187,226 @@ def format_sources(src):
217
 
218
  return "\n".join(out)
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  def clear_conversation():
221
  """Xóa hội thoại"""
222
  state.reset()
223
- return [], "Konversation gelöscht"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
- def speak_last_response(history):
 
 
 
226
  """Đọc câu trả lời cuối cùng"""
227
  if not history:
228
- return None, "Keine Antwort zum Vorlesen"
229
-
230
- # Tìm câu trả lời cuối cùng
231
- for i in range(len(history)-1, -1, -1):
232
- if history[i][1]: # assistant response exists
233
- response_text = history[i][1]
234
- # Loại bỏ phần sources
235
- if "## 📚 Quellen" in response_text:
236
- response_text = response_text.split("## 📚 Quellen")[0].strip()
 
237
 
238
- # Tạo speech
239
- audio_result = synthesize_speech(response_text[:500]) # Giới hạn độ dài
240
  if audio_result:
241
- sr, audio_data = audio_result
242
- return (sr, audio_data), "Audio wird abgespielt..."
243
 
244
- return None, "Keine passende Antwort gefunden"
 
245
 
246
  # =====================================================
247
- # UI – GRADIO INTERFACE (Đơn giản như ChatGPT)
248
  # =====================================================
249
- with gr.Blocks(
250
- title="🧑‍⚖️ Prüfungsrechts-Chatbot",
251
- ) as demo:
252
-
253
- # CSS Styling đơn giản
254
  gr.HTML("""
255
  <style>
256
  .gradio-container {
257
- max-width: 900px;
258
  margin: 0 auto;
259
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
260
  padding: 20px;
 
261
  }
262
 
263
  .header {
@@ -265,252 +414,310 @@ with gr.Blocks(
265
  margin-bottom: 30px;
266
  padding: 20px;
267
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
268
- border-radius: 12px;
269
  color: white;
270
  }
271
 
272
- .mode-selector {
273
  background: #f8f9fa;
274
- padding: 15px;
275
- border-radius: 10px;
276
  margin-bottom: 20px;
277
- display: flex;
278
- align-items: center;
279
- gap: 15px;
280
  border: 1px solid #e2e8f0;
281
  }
282
 
283
- .mode-indicator {
284
- padding: 8px 16px;
285
- border-radius: 20px;
286
- font-weight: 600;
287
- background: #e0e7ff;
288
- color: #4f46e5;
289
- }
290
-
291
- .input-area {
292
  background: white;
293
- border-radius: 12px;
294
- padding: 15px;
295
- border: 2px solid #e2e8f0;
296
- margin-top: 20px;
297
  }
298
 
299
  .input-row {
 
 
 
 
 
300
  display: flex;
301
- gap: 10px;
302
  align-items: center;
 
303
  }
304
 
305
- .audio-visualizer {
306
- padding: 10px;
307
- text-align: center;
308
- color: #666;
309
- font-style: italic;
310
  }
311
 
312
- .tts-btn {
313
- margin-top: 10px;
314
- padding: 8px 16px;
315
- background: #10b981;
316
- color: white;
317
- border: none;
318
- border-radius: 8px;
319
- cursor: pointer;
 
 
 
320
  }
321
 
322
- .tts-btn:hover {
323
- background: #059669;
 
324
  }
325
 
326
- .clear-btn {
327
- background: #ef4444;
328
- color: white;
329
- border: none;
330
- border-radius: 8px;
331
- padding: 8px 16px;
332
- cursor: pointer;
333
- margin-left: 10px;
334
  }
335
 
336
- .clear-btn:hover {
337
- background: #dc2626;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  }
339
  </style>
340
  """)
341
 
342
- # Header đơn giản
343
  with gr.Column(elem_classes=["header"]):
344
  gr.Markdown("# 🧑‍⚖️ Prüfungsrechts-Chatbot")
345
- gr.Markdown("### Stellen Sie Fragen zu Prüfungsordnung und Hochschulgesetz NRW")
346
-
347
- # Mode Selector
348
- with gr.Column(elem_classes=["mode-selector"]):
 
 
 
 
 
 
 
 
 
 
349
  with gr.Row():
350
- mode_selector = gr.Radio(
351
- choices=["Text (Schreibmodus)", "Audio (Sprachmodus)"],
352
- value="Text (Schreibmodus)",
353
- label="",
354
- scale=3,
355
- elem_id="mode-selector"
356
- )
357
- mode_indicator = gr.Textbox(
358
- value="⌨️ Textmodus aktiv",
359
- label="Status",
360
- interactive=False,
361
- scale=2
362
- )
363
- clear_btn = gr.Button("🗑️ Löschen", elem_classes=["clear-btn"], scale=1)
364
-
365
- # Main Chat Interface
366
- chatbot = gr.Chatbot(
367
- label="Konversation",
368
- height=500,
369
- avatar_images=(
370
- "https://em-content.zobj.net/source/microsoft-teams/363/bust-in-silhouette_1f464.png",
371
- "https://em-content.zobj.net/source/microsoft-teams/363/robot_1f916.png"
372
- )
373
- )
374
-
375
- # Input Area (thay đổi theo mode)
376
- with gr.Column(elem_classes=["input-area"], visible=True) as input_area:
377
- # Text Input (visible khi text mode)
378
- with gr.Column(visible=True) as text_input_container:
379
- with gr.Row(elem_classes=["input-row"]):
380
- text_input = gr.Textbox(
381
- label="",
382
- placeholder="Stellen Sie eine juristische Frage... (Enter zum Senden)",
383
- lines=2,
384
- max_lines=4,
385
- scale=8,
386
- show_label=False,
387
- container=False,
388
- autofocus=True
389
  )
390
- text_send_btn = gr.Button(
391
- "Senden",
392
- variant="primary",
393
- scale=1,
394
- min_width=80
 
395
  )
396
- with gr.Row():
397
- sug1 = gr.Button("Tóm tắt quy định thi", variant="secondary")
398
- sug2 = gr.Button("Quy trình khiếu nại kết quả thi", variant="secondary")
399
- sug3 = gr.Button("Điều kiện được thi lại", variant="secondary")
400
-
401
- # Audio Input (visible khi audio mode)
402
- with gr.Column(visible=False) as audio_input_container:
403
- gr.Markdown("### 🎤 Klicken und Sprechen")
404
- with gr.Row():
405
- audio_input = gr.Audio(
406
- sources=["microphone"],
407
- type="numpy",
408
- streaming=False,
409
- show_label=False,
410
- interactive=True,
411
- scale=8
412
  )
413
- audio_status = gr.Textbox(
414
- label="Status",
415
- value="Warten auf Aufnahme...",
416
- interactive=False,
417
- scale=2
 
 
418
  )
419
- gr.Markdown("*Drücken Sie aufnehmen, sprechen Sie Ihre Frage, dann stoppen*", elem_classes=["audio-visualizer"])
420
-
421
- # TTS Controls
422
- with gr.Row():
423
- tts_btn = gr.Button("🔊 Letzte Antwort vorlesen", variant="secondary", size="sm")
424
- tts_audio = gr.Audio(label="", interactive=False, visible=False)
425
- tts_status = gr.Textbox(label="", interactive=False, visible=False)
426
-
427
- if not DEMO_MODE:
428
- with gr.Accordion("📚 Dokumente & Quellen anzeigen", open=False):
429
- with gr.Tabs():
430
- with gr.TabItem("📄 Prüfungsordnung"):
431
- PDF(pdf_meta["pdf_url"], height=350)
432
- with gr.TabItem("📘 Hochschulgesetz NRW"):
433
- if hg_url:
434
- gr.HTML(f'''
435
- <div style="padding: 10px;">
436
- <h4>Hochschulgesetz NRW Viewer</h4>
437
- <a href="{hg_url}" target="_blank" style="display: inline-block; padding: 8px 16px; background: #3b82f6; color: white; text-decoration: none; border-radius: 5px; margin-bottom: 10px;">
438
- Im Viewer öffnen ↗
439
- </a>
440
- <iframe src="{hg_url}" width="100%" height="400px" style="border: 1px solid #ddd; border-radius: 6px;"></iframe>
441
- </div>
442
- ''')
443
- else:
444
- gr.Markdown("Viewer-Link nicht verfügbar.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
 
446
  # =====================================================
447
  # EVENT HANDLERS
448
  # =====================================================
449
 
450
- # Mode toggle
451
- mode_selector.change(
452
- toggle_audio_mode,
453
- inputs=[mode_selector, chatbot],
454
- outputs=[
455
- audio_input_container,
456
- text_input_container,
457
- text_send_btn,
458
- mode_indicator
459
- ]
460
  )
461
 
462
- # Text input handling
463
- text_send_btn.click(
464
- process_text_input,
465
- inputs=[text_input, chatbot],
466
- outputs=[chatbot, text_input]
467
  )
468
 
469
- text_input.submit(
470
- process_text_input,
471
- inputs=[text_input, chatbot],
472
- outputs=[chatbot, text_input]
 
 
 
473
  )
474
 
475
- # Audio input handling
476
- def handle_audio_complete(audio_data, history):
477
- """Xử khi audio recording hoàn tất"""
478
- return process_audio_input(audio_data, history)
479
-
480
- audio_input.stop_recording(
481
- handle_audio_complete,
482
- inputs=[audio_input, chatbot],
483
- outputs=[chatbot, audio_status, audio_status]
484
- ).then(
485
- lambda: ("", "Warten auf neue Aufnahme..."),
486
- outputs=[audio_input, audio_status]
487
- ).then(
488
- speak_last_response,
489
- inputs=[chatbot],
490
- outputs=[tts_audio, tts_status]
 
491
  ).then(
492
- lambda: gr.Audio(visible=True),
493
- outputs=[tts_audio]
 
 
 
 
 
 
 
494
  ).then(
495
- lambda: gr.Textbox(visible=True),
496
- outputs=[tts_status]
497
  )
498
-
499
- sug1.click(lambda history: process_text_input("Bitte fassen Sie die relevanten Prüfungsregeln zusammen.", history), inputs=[chatbot], outputs=[chatbot, text_input])
500
-
501
- sug2.click(lambda history: process_text_input("Wie ist der Ablauf einer Prüfungsanfechtung?", history), inputs=[chatbot], outputs=[chatbot, text_input])
502
-
503
- sug3.click(lambda history: process_text_input("Unter welchen Bedingungen kann man eine Prüfung wiederholen?", history), inputs=[chatbot], outputs=[chatbot, text_input])
504
 
505
- # Clear conversation
506
- clear_btn.click(
507
- clear_conversation,
508
- outputs=[chatbot, mode_indicator]
 
 
 
 
 
 
 
 
 
 
509
  )
510
 
511
- # TTS button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
  tts_btn.click(
513
- speak_last_response,
514
  inputs=[chatbot],
515
  outputs=[tts_audio, tts_status]
516
  ).then(
@@ -522,4 +729,6 @@ with gr.Blocks(
522
  )
523
 
524
  if __name__ == "__main__":
525
- demo.queue().launch(show_error=True)
 
 
 
1
+ # app.py – Prüfungsrechts-Chatbot (RAG + Sprache, UI kiểu ChatGPT) với các tính năng nâng cao
2
+ #
3
  import os
4
  import time
5
+ from dataclasses import dataclass, field
6
  from typing import Optional, Dict, Any
7
  import gradio as gr
8
  from gradio_pdf import PDF
9
  import numpy as np
 
10
 
11
+ from load_documents import load_all_documents
12
+ from split_documents import split_documents
13
+ from vectorstore import build_vectorstore
14
+ from retriever import get_retriever
15
+ from llm import load_llm
16
+ from rag_pipeline import answer
17
+ from speech_io import transcribe_audio, synthesize_speech, transcribe_with_groq, detect_voice_activity
18
 
19
+ # Cấu hình môi trường
 
 
 
 
 
 
 
 
 
20
  ASR_LANGUAGE_HINT = os.getenv("ASR_LANGUAGE", "de")
21
+ USE_GROQ = os.getenv("USE_GROQ", "false").lower() == "true"
22
+ GROQ_MODEL = os.getenv("GROQ_MODEL", "whisper-large-v3-turbo")
23
+ ENABLE_VAD = os.getenv("ENABLE_VAD", "true").lower() == "true"
24
+ VAD_THRESHOLD = float(os.getenv("VAD_THRESHOLD", "0.3"))
25
 
26
  # =====================================================
27
+ # STATE MANAGEMENT - Quản trạng thái hội thoại liền mạch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  # =====================================================
29
+ @dataclass
30
  class ConversationState:
31
+ """Quản lý trạng thái hội thoại"""
32
+ messages: list = field(default_factory=list)
33
+ last_audio_time: float = field(default_factory=time.time)
34
+ is_listening: bool = False
35
+ vad_confidence: float = 0.0
36
+ conversation_context: str = ""
37
+ whisper_model: str = field(default_factory=lambda: os.getenv("WHISPER_MODEL", "base"))
38
+ language: str = field(default_factory=lambda: ASR_LANGUAGE_HINT)
39
+ current_audio_path: Optional[str] = None
40
 
 
 
 
 
 
41
  def add_message(self, role: str, content: str):
42
  """Thêm message vào hội thoại"""
43
  self.messages.append({
 
48
  # Giới hạn lịch sử
49
  if len(self.messages) > 20:
50
  self.messages = self.messages[-20:]
51
+
52
+ # Cập nhật context
53
+ self._update_context()
54
+
55
+ def _update_context(self):
56
+ """Cập nhật context từ hội thoại"""
57
+ if not self.messages:
58
+ self.conversation_context = ""
59
+ return
60
+
61
+ context_parts = []
62
+ for msg in self.messages[-5:]: # Giữ 5 message gần nhất
63
+ prefix = "User" if msg["role"] == "user" else "Assistant"
64
+ context_parts.append(f"{prefix}: {msg['content'][:200]}") # Giới hạn độ dài
65
+ self.conversation_context = "\n".join(context_parts)
66
+
67
+ def get_recent_context(self, num_messages: int = 3) -> str:
68
+ """Lấy context gần đây"""
69
+ if not self.messages or num_messages <= 0:
70
+ return ""
71
+
72
+ recent = self.messages[-num_messages:] if len(self.messages) >= num_messages else self.messages
73
+ return "\n".join([f"{m['role']}: {m['content']}" for m in recent])
74
 
75
  def reset(self):
76
  """Reset trạng thái hội thoại"""
77
  self.messages = []
78
+ self.conversation_context = ""
79
+ self.is_listening = False
80
+ self.vad_confidence = 0.0
81
+ self.current_audio_path = None
82
 
83
  # Khởi tạo state
84
  state = ConversationState()
85
 
86
  # =====================================================
87
+ # INITIALISIERUNG (global)
88
  # =====================================================
89
+
90
+ print("📚 Lade Dokumente…")
91
+ docs = load_all_documents()
92
+
93
+ print("🔪 Splitte Dokumente…")
94
+ chunks = split_documents(docs)
95
+
96
+ print("🔍 Erstelle VectorStore…")
97
+ vs = build_vectorstore(chunks)
98
+
99
+ print("🔎 Erzeuge Retriever…")
100
+ retriever = get_retriever(vs)
101
+
102
+ print("🤖 Lade LLM…")
103
+ llm = load_llm()
104
+
105
+ # Dokument-Metadaten für UI
106
+ pdf_meta = next(d.metadata for d in docs if d.metadata.get("type") == "pdf")
107
+ hg_meta = next(d.metadata for d in docs if d.metadata.get("type") == "hg")
108
+ hg_url = hg_meta.get("viewer_url")
109
+
110
+ # =====================================================
111
+ # VOICE ACTIVITY DETECTION
112
+ # =====================================================
113
+ def handle_voice_activity(audio_data: Optional[np.ndarray], sample_rate: int) -> Dict[str, Any]:
114
+ """Xử lý phát hiện hoạt động giọng nói"""
115
+ if audio_data is None or len(audio_data) == 0:
116
+ return {"is_speech": False, "confidence": 0.0, "status": "No audio data"}
117
 
118
  try:
119
+ vad_result = detect_voice_activity(audio_data, sample_rate, threshold=VAD_THRESHOLD)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
+ # Cập nhật state
122
+ state.is_listening = vad_result["is_speech"]
123
+ if vad_result["is_speech"]:
124
+ state.last_audio_time = time.time()
125
+ state.vad_confidence = vad_result["confidence"]
126
 
127
+ return {
128
+ "is_speech": vad_result["is_speech"],
129
+ "confidence": vad_result["confidence"],
130
+ "status": f"Speech detected: {vad_result['is_speech']} (conf: {vad_result['confidence']:.2f})"
131
+ }
132
  except Exception as e:
133
+ print(f"VAD error: {e}")
134
+ return {"is_speech": False, "confidence": 0.0, "status": f"VAD error: {e}"}
135
 
136
+ # =====================================================
137
+ # TRANSCRIBE WITH OPTIMIZED PIPELINE
138
+ # =====================================================
139
+ def transcribe_audio_optimized(audio_path: str, language: Optional[str] = None) -> str:
140
+ """Transcribe audio với pipeline tối ưu"""
141
+ if not audio_path or not os.path.exists(audio_path):
142
+ return ""
143
+
144
+ if USE_GROQ and GROQ_MODEL:
145
+ print("Using Groq for transcription...")
146
+ return transcribe_with_groq(audio_path, language=language)
147
  else:
148
+ return transcribe_audio(audio_path, language=language)
 
 
 
 
 
 
 
 
 
149
 
150
+ # =====================================================
151
+ # CONVERSATIONAL INTELLIGENCE
152
+ # =====================================================
153
+ def enhance_conversation_context(user_input: str, history: list) -> str:
154
+ """Tăng cường context hội thoại"""
155
+ if not user_input:
156
+ return user_input
157
+
158
+ # Thêm context đơn giản từ history
159
+ if history and len(history) > 0:
160
+ # Lấy 3 tin nhắn gần nhất từ history
161
+ recent_history = history[-3:] if len(history) >= 3 else history
162
+ context_parts = ["Previous conversation:"]
163
+ for msg in recent_history:
164
+ role = "User" if msg.get("role") == "user" else "Assistant"
165
+ content = msg.get("content", "")[:100] # Giới hạn độ dài
166
+ context_parts.append(f"{role}: {content}")
167
+
168
+ context = "\n".join(context_parts)
169
+ return f"{context}\n\nCurrent question: {user_input}"
170
+
171
+ return user_input
172
 
173
+ # =====================================================
174
+ # Quellen formatieren – Markdown für Chat
175
+ # =====================================================
176
  def format_sources(src):
 
177
  if not src:
178
  return ""
179
 
 
187
 
188
  return "\n".join(out)
189
 
190
+ # =====================================================
191
+ # CORE CHAT-FUNKTION với tất cả tính năng mới
192
+ # =====================================================
193
+ def chat_fn(text_input, audio_path, history, lang_sel, use_vad):
194
+ """
195
+ Main chat function với xử lý VAD và transcription
196
+ """
197
+ print(f"DEBUG: chat_fn called - text_input: '{text_input}', audio_path: {audio_path}, history length: {len(history) if history else 0}")
198
+
199
+ # Khởi tạo history nếu None
200
+ if history is None:
201
+ history = []
202
+
203
+ # Biến để lưu text cần xử lý
204
+ text_to_process = ""
205
+
206
+ # Xử lý audio nếu có
207
+ if audio_path and os.path.exists(audio_path):
208
+ print(f"DEBUG: Processing audio file: {audio_path}")
209
+
210
+ # Lưu đường dẫn audio vào state
211
+ state.current_audio_path = audio_path
212
+
213
+ # Kiểm tra VAD nếu được bật
214
+ if use_vad and ENABLE_VAD:
215
+ try:
216
+ import soundfile as sf
217
+ audio_data, sample_rate = sf.read(audio_path)
218
+ print(f"DEBUG: Audio loaded - shape: {audio_data.shape}, sample_rate: {sample_rate}")
219
+
220
+ vad_result = handle_voice_activity(audio_data, sample_rate)
221
+ print(f"DEBUG: VAD result: {vad_result}")
222
+
223
+ # Nếu VAD phát hiện có giọng nói, hoặc nếu VAD không bật, tiến hành transcribe
224
+ if vad_result.get("is_speech", True):
225
+ # Transcribe audio
226
+ transcribed_text = transcribe_audio_optimized(audio_path, language=lang_sel)
227
+ if transcribed_text and transcribed_text.strip():
228
+ text_to_process = transcribed_text.strip()
229
+ print(f"DEBUG: Transcribed text: {text_to_process}")
230
+ else:
231
+ print("DEBUG: VAD detected no speech, skipping transcription")
232
+ except Exception as e:
233
+ print(f"DEBUG: Error in VAD/transcription: {e}")
234
+ # Fallback: transcribe ngay cả khi có lỗi
235
+ transcribed_text = transcribe_audio_optimized(audio_path, language=lang_sel)
236
+ if transcribed_text and transcribed_text.strip():
237
+ text_to_process = transcribed_text.strip()
238
+ else:
239
+ # Nếu VAD không bật, transcribe trực tiếp
240
+ transcribed_text = transcribe_audio_optimized(audio_path, language=lang_sel)
241
+ if transcribed_text and transcribed_text.strip():
242
+ text_to_process = transcribed_text.strip()
243
+ print(f"DEBUG: Transcribed text (no VAD): {text_to_process}")
244
+
245
+ # Nếu có text input từ textbox, ưu tiên sử dụng nó
246
+ if text_input and text_input.strip():
247
+ text_to_process = text_input.strip()
248
+ print(f"DEBUG: Using text input: {text_to_process}")
249
+
250
+ # Nếu không có gì để xử lý
251
+ if not text_to_process:
252
+ print("DEBUG: No text to process")
253
+ # Trả về history hiện tại và status
254
+ status_text = f"Bereit | VAD: {'On' if use_vad and ENABLE_VAD else 'Off'} | Model: {state.whisper_model}"
255
+ if history is None:
256
+ history = []
257
+ return history, "", None, status_text
258
+
259
+ print(f"DEBUG: Processing text: {text_to_process}")
260
+
261
+ # Tăng cường context cho câu hỏi
262
+ enhanced_question = enhance_conversation_context(text_to_process, history)
263
+
264
+ try:
265
+ # RAG-Antwort berechnen
266
+ ans, sources = answer(enhanced_question, retriever, llm)
267
+ bot_msg = ans + format_sources(sources)
268
+
269
+ # Thêm vào state
270
+ state.add_message("user", text_to_process)
271
+ state.add_message("assistant", ans)
272
+
273
+ # History aktualisieren (ChatGPT-Style)
274
+ history.append({"role": "user", "content": text_to_process})
275
+ history.append({"role": "assistant", "content": bot_msg})
276
+
277
+ print(f"DEBUG: Answer generated, history length: {len(history)}")
278
+
279
+ except Exception as e:
280
+ print(f"DEBUG: Error in RAG pipeline: {e}")
281
+ # Fallback response
282
+ error_msg = "Entschuldigung, es gab einen Fehler bei der Verarbeitung Ihrer Anfrage. Bitte versuchen Sie es erneut."
283
+ history.append({"role": "user", "content": text_to_process})
284
+ history.append({"role": "assistant", "content": error_msg})
285
+
286
+ status_text = f"Bereit | VAD: {'On' if use_vad and ENABLE_VAD else 'Off'} | Model: {state.whisper_model}"
287
+ return history, "", None, status_text
288
+
289
+ # =====================================================
290
+ # FUNCTIONS FOR UI CONTROLS
291
+ # =====================================================
292
+ def toggle_vad(use_vad):
293
+ """Toggle Voice Activity Detection"""
294
+ global ENABLE_VAD
295
+ ENABLE_VAD = use_vad
296
+ status = "EIN" if use_vad else "AUS"
297
+ return f"Voice Activity Detection: {status} | Model: {state.whisper_model}"
298
+
299
+ def change_whisper_model(model_size):
300
+ """Đổi Whisper model"""
301
+ state.whisper_model = model_size
302
+ os.environ["WHISPER_MODEL"] = model_size
303
+ return f"Whisper Model: {model_size} | VAD: {'On' if ENABLE_VAD else 'Off'}"
304
+
305
  def clear_conversation():
306
  """Xóa hội thoại"""
307
  state.reset()
308
+ return [], "Konversation gelöscht | Bereit"
309
+
310
+ def update_vad_indicator():
311
+ """Cập nhật VAD indicator"""
312
+ if state.is_listening:
313
+ indicator_html = """
314
+ <div style="display: flex; align-items: center; gap: 8px;">
315
+ <div style="width: 12px; height: 12px; border-radius: 50%; background-color: #10b981; box-shadow: 0 0 10px #10b981; animation: pulse 1.5s infinite;"></div>
316
+ <span style="color: #10b981; font-weight: bold;">Sprache erkannt</span>
317
+ </div>
318
+ <style>
319
+ @keyframes pulse {
320
+ 0% { opacity: 0.7; }
321
+ 50% { opacity: 1; }
322
+ 100% { opacity: 0.7; }
323
+ }
324
+ </style>
325
+ """
326
+ else:
327
+ indicator_html = """
328
+ <div style="display: flex; align-items: center; gap: 8px;">
329
+ <div style="width: 12px; height: 12px; border-radius: 50%; background-color: #6b7280;"></div>
330
+ <span>Bereit</span>
331
+ </div>
332
+ """
333
+
334
+ return indicator_html
335
+
336
+ # =====================================================
337
+ # AUDIO STREAMING HANDLER
338
+ # =====================================================
339
+ def handle_audio_stream(audio_path, use_vad):
340
+ """Xử lý audio streaming real-time"""
341
+ if not audio_path or not os.path.exists(audio_path):
342
+ return "", update_vad_indicator(), "Keine Audiodatei"
343
+
344
+ try:
345
+ import soundfile as sf
346
+ audio_data, sample_rate = sf.read(audio_path)
347
+
348
+ # Cập nhật VAD indicator
349
+ vad_html = update_vad_indicator()
350
+
351
+ if use_vad and ENABLE_VAD:
352
+ vad_result = handle_voice_activity(audio_data, sample_rate)
353
+
354
+ if vad_result.get("is_speech", False):
355
+ # Nếu phát hiện giọng nói, transcribe
356
+ text = transcribe_audio_optimized(audio_path, language=state.language)
357
+ status = f"Sprache erkannt ({vad_result.get('confidence', 0):.2f})"
358
+ return text, vad_html, status
359
+ else:
360
+ status = "Keine Sprache erkannt"
361
+ return "", vad_html, status
362
+ else:
363
+ # Nếu VAD không bật, vẫn transcribe nhưng hiển thị trạng thái khác
364
+ text = transcribe_audio_optimized(audio_path, language=state.language)
365
+ status = "Transkription (VAD aus)"
366
+ return text, vad_html, status
367
+
368
+ except Exception as e:
369
+ print(f"Error in audio stream handler: {e}")
370
+ return "", update_vad_indicator(), f"Fehler: {str(e)[:50]}"
371
 
372
+ # =====================================================
373
+ # TTS FUNCTION
374
+ # =====================================================
375
+ def read_last_answer(history):
376
  """Đọc câu trả lời cuối cùng"""
377
  if not history:
378
+ print("DEBUG: No history for TTS")
379
+ return None
380
+
381
+ # Tìm câu trả lời cuối cùng của assistant
382
+ for msg in reversed(history):
383
+ if isinstance(msg, dict) and msg.get("role") == "assistant":
384
+ content = msg.get("content", "")
385
+ # Loại bỏ phần sources từ câu trả lời
386
+ if "## 📚 Quellen" in content:
387
+ content = content.split("## 📚 Quellen")[0].strip()
388
 
389
+ print(f"DEBUG: Synthesizing speech for: {content[:100]}...")
390
+ audio_result = synthesize_speech(content)
391
  if audio_result:
392
+ print("DEBUG: TTS successful")
393
+ return audio_result
394
 
395
+ print("DEBUG: No assistant message found for TTS")
396
+ return None
397
 
398
  # =====================================================
399
+ # UI – GRADIO với tất cả tính năng mới
400
  # =====================================================
401
+ with gr.Blocks(title="Prüfungsrechts-Chatbot (RAG + Sprache) - Enhanced") as demo:
402
+ # CSS Styling nâng cao
 
 
 
403
  gr.HTML("""
404
  <style>
405
  .gradio-container {
406
+ max-width: 1200px;
407
  margin: 0 auto;
 
408
  padding: 20px;
409
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
410
  }
411
 
412
  .header {
 
414
  margin-bottom: 30px;
415
  padding: 20px;
416
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
417
+ border-radius: 15px;
418
  color: white;
419
  }
420
 
421
+ .control-panel {
422
  background: #f8f9fa;
423
+ padding: 20px;
424
+ border-radius: 15px;
425
  margin-bottom: 20px;
 
 
 
426
  border: 1px solid #e2e8f0;
427
  }
428
 
429
+ .chat-container {
 
 
 
 
 
 
 
 
430
  background: white;
431
+ border-radius: 15px;
432
+ padding: 20px;
433
+ box-shadow: 0 4px 20px rgba(0,0,0,0.1);
434
+ margin-bottom: 20px;
435
  }
436
 
437
  .input-row {
438
+ background: #f8fafc;
439
+ border-radius: 25px;
440
+ padding: 10px 20px;
441
+ border: 2px solid #e2e8f0;
442
+ transition: all 0.3s ease;
443
  display: flex;
 
444
  align-items: center;
445
+ gap: 10px;
446
  }
447
 
448
+ .input-row:focus-within {
449
+ border-color: #667eea;
450
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1);
 
 
451
  }
452
 
453
+ .send-btn {
454
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
455
+ color: white !important;
456
+ border: none !important;
457
+ border-radius: 50% !important;
458
+ width: 44px !important;
459
+ height: 44px !important;
460
+ display: flex !important;
461
+ align-items: center !important;
462
+ justify-content: center !important;
463
+ cursor: pointer !important;
464
  }
465
 
466
+ .send-btn:hover {
467
+ transform: scale(1.05);
468
+ box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
469
  }
470
 
471
+ .vad-indicator-container {
472
+ padding: 10px;
473
+ background: #f1f5f9;
474
+ border-radius: 10px;
475
+ margin: 10px 0;
476
+ display: flex;
477
+ align-items: center;
478
+ gap: 10px;
479
  }
480
 
481
+ .feature-badge {
482
+ display: inline-block;
483
+ padding: 4px 12px;
484
+ background: #e0e7ff;
485
+ color: #4f46e5;
486
+ border-radius: 20px;
487
+ font-size: 12px;
488
+ font-weight: 500;
489
+ margin: 2px;
490
+ }
491
+
492
+ .chatbot {
493
+ min-height: 400px;
494
+ max-height: 500px;
495
+ overflow-y: auto;
496
+ }
497
+
498
+ /* Responsive design */
499
+ @media (max-width: 768px) {
500
+ .gradio-container {
501
+ padding: 10px;
502
+ }
503
+
504
+ .input-row {
505
+ flex-direction: column;
506
+ gap: 10px;
507
+ }
508
+
509
+ .send-btn {
510
+ width: 100% !important;
511
+ height: 44px !important;
512
+ border-radius: 10px !important;
513
+ }
514
  }
515
  </style>
516
  """)
517
 
518
+ # Header
519
  with gr.Column(elem_classes=["header"]):
520
  gr.Markdown("# 🧑‍⚖️ Prüfungsrechts-Chatbot")
521
+ gr.Markdown("### Intelligent Voice Interface with Advanced Features")
522
+
523
+ # Feature badges
524
+ gr.HTML("""
525
+ <div style="text-align: center; margin: 10px 0;">
526
+ <span class="feature-badge">🎤 Voice Activity Detection</span>
527
+ <span class="feature-badge">⚡ Fast Transcription</span>
528
+ <span class="feature-badge">🧠 Conversational AI</span>
529
+ <span class="feature-badge">📚 Document RAG</span>
530
+ </div>
531
+ """)
532
+
533
+ # Control Panel
534
+ with gr.Column(elem_classes=["control-panel"]):
535
  with gr.Row():
536
+ with gr.Column(scale=2):
537
+ # Model Selection
538
+ model_selector = gr.Dropdown(
539
+ choices=["tiny", "base", "small", "medium"],
540
+ value=state.whisper_model,
541
+ label="Whisper Model",
542
+ info="Wählen Sie das Modell für Spracherkennung"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
  )
544
+
545
+ # VAD Control
546
+ vad_toggle = gr.Checkbox(
547
+ value=ENABLE_VAD,
548
+ label="Voice Activity Detection aktivieren",
549
+ info="Automatische Spracherkennung"
550
  )
551
+
552
+ # Language Selection
553
+ lang_selector = gr.Dropdown(
554
+ choices=["de", "en", "auto"],
555
+ value=ASR_LANGUAGE_HINT,
556
+ label="Spracherkennung Sprache"
 
 
 
 
 
 
 
 
 
 
557
  )
558
+
559
+ with gr.Column(scale=1):
560
+ # Status Display
561
+ status_display = gr.Textbox(
562
+ label="System Status",
563
+ value="Bereit",
564
+ interactive=False
565
  )
566
+
567
+ # Clear Conversation Button
568
+ clear_btn = gr.Button("🗑️ Konversation löschen", variant="secondary", size="sm")
569
+
570
+ # VAD Indicator
571
+ vad_indicator = gr.HTML(value=update_vad_indicator(), label="VAD Status")
572
+
573
+ # Main Chat Interface
574
+ with gr.Column(elem_classes=["chat-container"]):
575
+ # Chatbot Display
576
+ chatbot = gr.Chatbot(
577
+ label="Konversation",
578
+ height=400,
579
+ avatar_images=(None, "🤖")
580
+ )
581
+
582
+ # Input Row với VAD Indicator
583
+ with gr.Row(elem_classes=["input-row"]):
584
+ # Text Input
585
+ chat_text = gr.Textbox(
586
+ label=None,
587
+ placeholder="Stellen Sie eine Frage oder sprechen Sie ins Mikrofon...",
588
+ lines=1,
589
+ max_lines=4,
590
+ scale=8,
591
+ container=False,
592
+ show_label=False
593
+ )
594
+
595
+ # Audio Input
596
+ chat_audio = gr.Audio(
597
+ sources=["microphone"],
598
+ type="filepath",
599
+ format="wav",
600
+ streaming=True,
601
+ interactive=True,
602
+ show_label=False,
603
+ scale=1,
604
+ elem_id="audio-input"
605
+ )
606
+
607
+ # Send Button
608
+ send_btn = gr.Button("➤", variant="primary", elem_classes=["send-btn"], scale=1)
609
+
610
+ # TTS Controls
611
+ with gr.Row():
612
+ tts_btn = gr.Button("🔊 Antwort vorlesen", variant="secondary", size="sm")
613
+ tts_audio = gr.Audio(label="Audio Ausgabe", interactive=False, visible=False)
614
+ tts_status = gr.Textbox(label="TTS Status", interactive=False, visible=False)
615
+
616
+ # Documents Section
617
+ with gr.Accordion("📚 Quellen & Dokumente", open=False):
618
+ with gr.Tabs():
619
+ with gr.TabItem("📄 Prüfungsordnung (PDF)"):
620
+ PDF(pdf_meta["pdf_url"], height=300)
621
+
622
+ with gr.TabItem("📘 Hochschulgesetz NRW"):
623
+ if isinstance(hg_url, str) and hg_url.startswith("http"):
624
+ gr.Markdown(f"### [Im Viewer öffnen]({hg_url})")
625
+ gr.HTML(f'<iframe src="{hg_url}" width="100%" height="500px" style="border: 1px solid #ddd; border-radius: 8px;"></iframe>')
626
+ else:
627
+ gr.Markdown("Viewer-Link nicht verfügbar.")
628
 
629
  # =====================================================
630
  # EVENT HANDLERS
631
  # =====================================================
632
 
633
+ # Model Selection
634
+ model_selector.change(
635
+ change_whisper_model,
636
+ inputs=[model_selector],
637
+ outputs=[status_display]
 
 
 
 
 
638
  )
639
 
640
+ # VAD Toggle
641
+ vad_toggle.change(
642
+ toggle_vad,
643
+ inputs=[vad_toggle],
644
+ outputs=[status_display]
645
  )
646
 
647
+ # Clear Conversation
648
+ clear_btn.click(
649
+ clear_conversation,
650
+ outputs=[chatbot, status_display]
651
+ ).then(
652
+ lambda: update_vad_indicator(),
653
+ outputs=[vad_indicator]
654
  )
655
 
656
+ # Main Chat Function
657
+ def process_chat(text_input, audio_path, history, lang_sel, use_vad):
658
+ """Wrapper function để xử chat"""
659
+ try:
660
+ return chat_fn(text_input, audio_path, history, lang_sel, use_vad)
661
+ except Exception as e:
662
+ print(f"Error in process_chat: {e}")
663
+ error_msg = f"Fehler: {str(e)}"
664
+ if history is None:
665
+ history = []
666
+ return history, "", None, error_msg
667
+
668
+ # Send Button Click
669
+ send_btn.click(
670
+ process_chat,
671
+ inputs=[chat_text, chat_audio, chatbot, lang_selector, vad_toggle],
672
+ outputs=[chatbot, chat_text, chat_audio, status_display]
673
  ).then(
674
+ lambda: update_vad_indicator(),
675
+ outputs=[vad_indicator]
676
+ )
677
+
678
+ # Text Submit (Enter key)
679
+ chat_text.submit(
680
+ process_chat,
681
+ inputs=[chat_text, chat_audio, chatbot, lang_selector, vad_toggle],
682
+ outputs=[chatbot, chat_text, chat_audio, status_display]
683
  ).then(
684
+ lambda: update_vad_indicator(),
685
+ outputs=[vad_indicator]
686
  )
 
 
 
 
 
 
687
 
688
+ # Audio Change Handler
689
+ def on_audio_change(audio_path, use_vad):
690
+ """Xử lý khi audio thay đổi"""
691
+ if audio_path:
692
+ print(f"DEBUG: Audio changed: {audio_path}")
693
+ # Xử lý streaming
694
+ text, vad_html, status = handle_audio_stream(audio_path, use_vad)
695
+ return text, vad_html, status
696
+ return "", update_vad_indicator(), "Bereit"
697
+
698
+ chat_audio.change(
699
+ on_audio_change,
700
+ inputs=[chat_audio, vad_toggle],
701
+ outputs=[chat_text, vad_indicator, status_display]
702
  )
703
 
704
+ # Audio Streaming
705
+ chat_audio.stream(
706
+ on_audio_change,
707
+ inputs=[chat_audio, vad_toggle],
708
+ outputs=[chat_text, vad_indicator, status_display]
709
+ )
710
+
711
+ # TTS Button
712
+ def handle_tts(history):
713
+ """Xử lý TTS"""
714
+ audio_result = read_last_answer(history)
715
+ if audio_result:
716
+ return audio_result, "Audio wird abgespielt..."
717
+ return None, "Keine Antwort zum Vorlesen gefunden"
718
+
719
  tts_btn.click(
720
+ handle_tts,
721
  inputs=[chatbot],
722
  outputs=[tts_audio, tts_status]
723
  ).then(
 
729
  )
730
 
731
  if __name__ == "__main__":
732
+ demo.queue().launch(ssr_mode=False, show_error=True)
733
+
734
+