ArchCoder commited on
Commit
82c886b
·
verified ·
1 Parent(s): 427e302

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -139,7 +139,7 @@ def generate_answer(text_input):
139
  return f"Error: {str(e)}"
140
 
141
  def process_audio_stream(audio_path, question_text=None):
142
- """Streaming pipeline for Gradio UI"""
143
  start_time = time.time()
144
 
145
  # Step 1: Transcribe audio if provided
@@ -170,6 +170,7 @@ def process_audio_stream(audio_path, question_text=None):
170
  current_time = time.time() - start_time
171
  time_emoji = "🟢" if current_time < 3.0 else "🟡" if current_time < 3.5 else "🔴"
172
  timing_info = f"\n\n{time_emoji} **Timing:** Trans={transcription_time:.2f}s | Search={search_time:.2f}s | LLM={(time.time()-llm_start):.2f}s | **Total={current_time:.2f}s**"
 
173
  yield partial_answer + timing_info, current_time
174
 
175
  # Create Gradio interface
@@ -195,9 +196,10 @@ with gr.Blocks(title="Fast Q&A - Streaming Enabled", theme=gr.themes.Soft()) as
195
  audio_output = gr.Textbox(label="Answer (Streaming)", lines=8, show_copy_button=True)
196
  audio_time = gr.Number(label="Response Time (seconds)", precision=2)
197
 
 
198
  audio_submit.click(
199
- fn=lambda x: process_audio_stream(x, None),
200
- inputs=[audio_input],
201
  outputs=[audio_output, audio_time],
202
  api_name="audio_query_stream"
203
  )
@@ -216,8 +218,9 @@ with gr.Blocks(title="Fast Q&A - Streaming Enabled", theme=gr.themes.Soft()) as
216
  text_output = gr.Textbox(label="Answer (Streaming)", lines=8, show_copy_button=True)
217
  text_time = gr.Number(label="Response Time (seconds)", precision=2)
218
 
 
219
  text_submit.click(
220
- fn=lambda x: process_audio_stream(None, x),
221
  inputs=[text_input],
222
  outputs=[text_output, text_time],
223
  api_name="text_query_stream"
@@ -232,7 +235,7 @@ with gr.Blocks(title="Fast Q&A - Streaming Enabled", theme=gr.themes.Soft()) as
232
  inputs=text_input
233
  )
234
 
235
- # API endpoints for Pluely - Using Gradio's built-in interface functions
236
  with gr.Tab("🔌 Pluely Integration"):
237
  gr.Markdown("""
238
  ## Dedicated Endpoints for Pluely
@@ -278,8 +281,8 @@ with gr.Blocks(title="Fast Q&A - Streaming Enabled", theme=gr.themes.Soft()) as
278
  with gr.Row(visible=False):
279
  stt_input = gr.Textbox()
280
  stt_output = gr.JSON()
281
- ai_input = gr.Textbox()
282
- ai_output = gr.Textbox()
283
 
284
  # These create the /call/transcribe_stt and /call/answer_ai_stream endpoints
285
  stt_button = gr.Button("STT", visible=False)
@@ -290,11 +293,11 @@ with gr.Blocks(title="Fast Q&A - Streaming Enabled", theme=gr.themes.Soft()) as
290
  api_name="transcribe_stt"
291
  )
292
 
293
- ai_button = gr.Button("AI", visible=False)
294
- ai_button.click(
295
  fn=generate_answer_stream,
296
- inputs=[ai_input],
297
- outputs=[ai_output],
298
  api_name="answer_ai_stream"
299
  )
300
 
 
139
  return f"Error: {str(e)}"
140
 
141
  def process_audio_stream(audio_path, question_text=None):
142
+ """Streaming pipeline for Gradio UI - Returns tuple generator"""
143
  start_time = time.time()
144
 
145
  # Step 1: Transcribe audio if provided
 
170
  current_time = time.time() - start_time
171
  time_emoji = "🟢" if current_time < 3.0 else "🟡" if current_time < 3.5 else "🔴"
172
  timing_info = f"\n\n{time_emoji} **Timing:** Trans={transcription_time:.2f}s | Search={search_time:.2f}s | LLM={(time.time()-llm_start):.2f}s | **Total={current_time:.2f}s**"
173
+ # IMPORTANT: Must yield tuple (text, number) to match output components
174
  yield partial_answer + timing_info, current_time
175
 
176
  # Create Gradio interface
 
196
  audio_output = gr.Textbox(label="Answer (Streaming)", lines=8, show_copy_button=True)
197
  audio_time = gr.Number(label="Response Time (seconds)", precision=2)
198
 
199
+ # Fixed: Lambda wrapper ensures proper tuple unpacking
200
  audio_submit.click(
201
+ fn=process_audio_stream,
202
+ inputs=[audio_input, gr.Textbox(value=None, visible=False)],
203
  outputs=[audio_output, audio_time],
204
  api_name="audio_query_stream"
205
  )
 
218
  text_output = gr.Textbox(label="Answer (Streaming)", lines=8, show_copy_button=True)
219
  text_time = gr.Number(label="Response Time (seconds)", precision=2)
220
 
221
+ # Fixed: Proper function call with audio=None
222
  text_submit.click(
223
+ fn=lambda text: process_audio_stream(None, text),
224
  inputs=[text_input],
225
  outputs=[text_output, text_time],
226
  api_name="text_query_stream"
 
235
  inputs=text_input
236
  )
237
 
238
+ # API endpoints for Pluely
239
  with gr.Tab("🔌 Pluely Integration"):
240
  gr.Markdown("""
241
  ## Dedicated Endpoints for Pluely
 
281
  with gr.Row(visible=False):
282
  stt_input = gr.Textbox()
283
  stt_output = gr.JSON()
284
+ ai_stream_input = gr.Textbox()
285
+ ai_stream_output = gr.Textbox()
286
 
287
  # These create the /call/transcribe_stt and /call/answer_ai_stream endpoints
288
  stt_button = gr.Button("STT", visible=False)
 
293
  api_name="transcribe_stt"
294
  )
295
 
296
+ ai_stream_button = gr.Button("AI Stream", visible=False)
297
+ ai_stream_button.click(
298
  fn=generate_answer_stream,
299
+ inputs=[ai_stream_input],
300
+ outputs=[ai_stream_output],
301
  api_name="answer_ai_stream"
302
  )
303