DreamStream-1 commited on
Commit
3e83fd3
·
verified ·
1 Parent(s): 619a0b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -262,10 +262,11 @@ def process_question(question, history):
262
  return "Please upload a document first.", history
263
  try:
264
  response = rag.ask_question(question)
265
- history.append((question, response))
 
266
  return "", history
267
  except Exception as e:
268
- return "", history + [(question, f"Error: {str(e)}")]
269
 
270
  def process_audio(audio_file, history):
271
  if audio_file is None:
@@ -275,10 +276,11 @@ def process_audio(audio_file, history):
275
  if not rag.thread_id:
276
  return "Please upload a document first.", history
277
  response = rag.ask_question(transcript)
278
- history.append((transcript, response))
 
279
  return "", history
280
  except Exception as e:
281
- return "", history + [("Audio input", f"Error: {str(e)}")]
282
 
283
  def process_audio_base64(audio_base64, history):
284
  if not audio_base64:
@@ -323,7 +325,7 @@ with gr.Blocks(title="Document Q&A System") as demo:
323
 
324
  with gr.Row():
325
  with gr.Column():
326
- chatbot = gr.Chatbot(height=400)
327
  question = gr.Textbox(label="Ask a question")
328
  question.submit(process_question, [question, chatbot], [question, chatbot])
329
 
@@ -333,8 +335,12 @@ with gr.Blocks(title="Document Q&A System") as demo:
333
  audio_input = gr.Audio(label="Voice Input", type="filepath", visible=False)
334
  audio_input.change(process_audio, [audio_input, chatbot], [audio_input, chatbot])
335
 
336
- # Handle audio data from custom recorder
337
- demo.load(None, None, None, _js="""
 
 
 
 
338
  function() {
339
  window.addEventListener('message', function(event) {
340
  if (event.data.type === 'audio_data') {
@@ -359,7 +365,8 @@ with gr.Blocks(title="Document Q&A System") as demo:
359
  }
360
  });
361
  }
362
- """)
 
363
 
364
  if __name__ == "__main__":
365
  demo.launch(
 
262
  return "Please upload a document first.", history
263
  try:
264
  response = rag.ask_question(question)
265
+ history.append({"role": "user", "content": question})
266
+ history.append({"role": "assistant", "content": response})
267
  return "", history
268
  except Exception as e:
269
+ return "", history + [{"role": "assistant", "content": f"Error: {str(e)}"}]
270
 
271
  def process_audio(audio_file, history):
272
  if audio_file is None:
 
276
  if not rag.thread_id:
277
  return "Please upload a document first.", history
278
  response = rag.ask_question(transcript)
279
+ history.append({"role": "user", "content": f"🎤 {transcript}"})
280
+ history.append({"role": "assistant", "content": response})
281
  return "", history
282
  except Exception as e:
283
+ return "", history + [{"role": "assistant", "content": f"Error: {str(e)}"}]
284
 
285
  def process_audio_base64(audio_base64, history):
286
  if not audio_base64:
 
325
 
326
  with gr.Row():
327
  with gr.Column():
328
+ chatbot = gr.Chatbot(height=400, type="messages")
329
  question = gr.Textbox(label="Ask a question")
330
  question.submit(process_question, [question, chatbot], [question, chatbot])
331
 
 
335
  audio_input = gr.Audio(label="Voice Input", type="filepath", visible=False)
336
  audio_input.change(process_audio, [audio_input, chatbot], [audio_input, chatbot])
337
 
338
+ # Handle audio data from custom recorder using JavaScript
339
+ demo.load(
340
+ fn=None,
341
+ inputs=None,
342
+ outputs=None,
343
+ _js="""
344
  function() {
345
  window.addEventListener('message', function(event) {
346
  if (event.data.type === 'audio_data') {
 
365
  }
366
  });
367
  }
368
+ """
369
+ )
370
 
371
  if __name__ == "__main__":
372
  demo.launch(