jfforero commited on
Commit
825f28e
·
verified ·
1 Parent(s): 82b74a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -10
app.py CHANGED
@@ -319,6 +319,8 @@ def get_predictions(audio_input):
319
 
320
  return results
321
 
 
 
322
  # Create the Gradio interface with proper output handling
323
  with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
324
  gr.Markdown("# Affective Virtual Environments")
@@ -328,8 +330,18 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
328
  audio_input = gr.Audio(label="Input Audio", type="filepath", sources=["microphone", "upload"])
329
  process_btn = gr.Button("Process Audio", variant="primary")
330
 
 
 
 
 
 
 
 
 
 
331
  # Create output components for each chunk type
332
  output_containers = []
 
333
 
334
  # We'll create up to 10 chunk slots (adjust as needed)
335
  for i in range(10):
@@ -344,8 +356,8 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
344
  audio_output = gr.Audio(label="Generated Music")
345
  gr.HTML("<hr style='margin: 20px 0; border: 1px solid #ccc;'>")
346
 
 
347
  output_containers.append({
348
- 'group': chunk_group,
349
  'emotion': emotion_output,
350
  'transcription': transcription_output,
351
  'sentiment': sentiment_output,
@@ -354,16 +366,26 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
354
  })
355
 
356
  def process_and_display(audio_input):
 
 
 
 
 
 
 
 
 
357
  results = get_predictions(audio_input)
358
 
359
  # Initialize outputs list
360
  outputs = []
 
361
 
362
  # Process each result
363
  for i, result in enumerate(results):
364
  if i < len(output_containers):
 
365
  outputs.extend([
366
- gr.Group(visible=True), # Show the group
367
  result['emotion'],
368
  result['transcription'],
369
  result['sentiment'],
@@ -372,23 +394,22 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
372
  ])
373
  else:
374
  # If we have more results than containers, just extend with None
375
- outputs.extend([None] * 6)
 
376
 
377
  # Hide remaining containers
378
  for i in range(len(results), len(output_containers)):
379
- outputs.extend([
380
- gr.Group(visible=False),
381
- None, None, None, None, None
382
- ])
383
 
384
- return outputs
 
385
 
386
  # Set up the button click
387
  process_btn.click(
388
  fn=process_and_display,
389
  inputs=audio_input,
390
- outputs=[comp for container in output_containers for comp in [
391
- container['group'],
392
  container['emotion'],
393
  container['transcription'],
394
  container['sentiment'],
 
319
 
320
  return results
321
 
322
+ # ... (your existing imports remain the same)
323
+
324
  # Create the Gradio interface with proper output handling
325
  with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
326
  gr.Markdown("# Affective Virtual Environments")
 
330
  audio_input = gr.Audio(label="Input Audio", type="filepath", sources=["microphone", "upload"])
331
  process_btn = gr.Button("Process Audio", variant="primary")
332
 
333
+ # Add a loading indicator
334
+ loading_indicator = gr.HTML("""
335
+ <div id="loading" style="display: none; text-align: center; margin: 20px;">
336
+ <p style="font-size: 18px; color: #4a4a4a;">Processing audio chunks...</p>
337
+ <div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
338
+ <style>@keyframes spin {0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); }}</style>
339
+ </div>
340
+ """)
341
+
342
  # Create output components for each chunk type
343
  output_containers = []
344
+ group_components = [] # Store group components separately
345
 
346
  # We'll create up to 10 chunk slots (adjust as needed)
347
  for i in range(10):
 
356
  audio_output = gr.Audio(label="Generated Music")
357
  gr.HTML("<hr style='margin: 20px 0; border: 1px solid #ccc;'>")
358
 
359
+ group_components.append(chunk_group) # Store the group component
360
  output_containers.append({
 
361
  'emotion': emotion_output,
362
  'transcription': transcription_output,
363
  'sentiment': sentiment_output,
 
366
  })
367
 
368
  def process_and_display(audio_input):
369
+ # Show loading indicator
370
+ yield [gr.HTML("""
371
+ <div style="text-align: center; margin: 20px;">
372
+ <p style="font-size: 18px; color: #4a4a4a;">Processing audio chunks...</p>
373
+ <div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
374
+ <style>@keyframes spin {0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); }}</style>
375
+ </div>
376
+ """)] + [gr.Group(visible=False)] * len(group_components) + [None] * (len(output_containers) * 5)
377
+
378
  results = get_predictions(audio_input)
379
 
380
  # Initialize outputs list
381
  outputs = []
382
+ group_visibility = []
383
 
384
  # Process each result
385
  for i, result in enumerate(results):
386
  if i < len(output_containers):
387
+ group_visibility.append(gr.Group(visible=True))
388
  outputs.extend([
 
389
  result['emotion'],
390
  result['transcription'],
391
  result['sentiment'],
 
394
  ])
395
  else:
396
  # If we have more results than containers, just extend with None
397
+ group_visibility.append(gr.Group(visible=False))
398
+ outputs.extend([None] * 5)
399
 
400
  # Hide remaining containers
401
  for i in range(len(results), len(output_containers)):
402
+ group_visibility.append(gr.Group(visible=False))
403
+ outputs.extend([None] * 5)
 
 
404
 
405
+ # Hide loading indicator and show results
406
+ yield [gr.HTML("")] + group_visibility + outputs
407
 
408
  # Set up the button click
409
  process_btn.click(
410
  fn=process_and_display,
411
  inputs=audio_input,
412
+ outputs=[loading_indicator] + group_components + [comp for container in output_containers for comp in [
 
413
  container['emotion'],
414
  container['transcription'],
415
  container['sentiment'],