jfforero commited on
Commit
b8bc520
·
verified ·
1 Parent(s): d135ebf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -66
app.py CHANGED
@@ -480,60 +480,6 @@ def clear_all():
480
  return outputs
481
 
482
  # In the Gradio interface, replace the fade_animation_output with these two components
483
- with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
484
- # ... [rest of your interface code] ...
485
-
486
- # Replace the single fade animation output with these two components
487
- with gr.Row():
488
- fade_preview_output = gr.Image(
489
- label="Fade Animation Preview",
490
- interactive=False
491
- )
492
- fade_animation_output = gr.File(
493
- label="Download Fade Animation",
494
- type="binary",
495
- interactive=False
496
- )
497
-
498
- # Update the process_btn.click call to include both outputs
499
- process_btn.click(
500
- fn=process_and_display,
501
- inputs=[audio_input, generate_audio_checkbox, chunk_duration_input],
502
- outputs=[loading_indicator] + group_components + [comp for container in output_containers for comp in [
503
- container['emotion'],
504
- container['transcription'],
505
- container['sentiment'],
506
- container['image'],
507
- container['music']
508
- ]] + [fade_preview_output, fade_animation_output]
509
- )
510
-
511
- # Update the clear_btn.click call to include both outputs
512
- clear_btn.click(
513
- fn=clear_all,
514
- inputs=[],
515
- outputs=[audio_input] + group_components + [comp for container in output_containers for comp in [
516
- container['emotion'],
517
- container['transcription'],
518
- container['sentiment'],
519
- container['image'],
520
- container['music']
521
- ]] + [loading_indicator] + [chunk_duration_input] + [example_selector] + [fade_preview_output, fade_animation_output]
522
- )
523
-
524
- # Function to load example audio
525
- def load_example_audio(example_name):
526
- # This function would load the example audio based on the selected example
527
- # For now, we'll return a placeholder path - you should replace these with actual paths to your example audio files
528
- example_paths = {
529
- "Happy Speech": "examples/happy_speech.wav",
530
- "Sad Story": "examples/sad_story.wav",
531
- "Neutral News": "examples/neutral_news.wav"
532
- }
533
-
534
- # Return the path to the selected example
535
- return example_paths.get(example_name, "examples/happy_speech.wav")
536
-
537
  # Create the Gradio interface with proper output handling
538
  with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
539
  gr.Markdown("# Affective Virtual Environments")
@@ -609,12 +555,17 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
609
  'music': audio_output
610
  })
611
 
612
- # Add a component to display the fade animation at the end
613
- fade_animation_output = gr.Image(
614
- label="Image Sequence with Fade Transitions",
615
- format="gif",
616
- interactive=False
617
- )
 
 
 
 
 
618
 
619
  # Function to process and display results
620
  def process_and_display(audio_input, generate_audio, chunk_duration):
@@ -629,7 +580,7 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
629
  <div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
630
  <style>@keyframes spin {{ 0% {{ transform: rotate(0deg); }} 100% {{ transform: rotate(360deg); }} }}</style>
631
  </div>
632
- """)] + [gr.Group(visible=False)] * len(group_components) + [None] * (len(output_containers) * 5) + [None]
633
 
634
  results = get_predictions(audio_input, generate_audio, chunk_duration)
635
 
@@ -662,13 +613,14 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
662
  outputs.extend([None] * 5)
663
 
664
  # Create fade animation if we have multiple images
 
665
  fade_animation = None
666
  if len(all_images) > 1:
667
  # Create a fade animation (GIF)
668
- fade_animation = create_fade_transition(all_images, fade_duration=1.5, fps=15)
669
 
670
  # Hide loading indicator and show results
671
- yield [gr.HTML("")] + group_visibility + outputs + [fade_animation]
672
 
673
  # Function to handle example selection
674
  def load_example(example_name):
@@ -702,7 +654,8 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
702
  # For example selector (reset to None)
703
  outputs.append(None)
704
 
705
- # For fade animation (set to None)
 
706
  outputs.append(None)
707
 
708
  return outputs
@@ -717,7 +670,7 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
717
  container['sentiment'],
718
  container['image'],
719
  container['music']
720
- ]] + [fade_animation_output]
721
  )
722
 
723
  # Set up the clear button
@@ -730,7 +683,7 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
730
  container['sentiment'],
731
  container['image'],
732
  container['music']
733
- ]] + [loading_indicator] + [chunk_duration_input] + [example_selector] + [fade_animation_output]
734
  )
735
 
736
  # Set up the example loading button
 
480
  return outputs
481
 
482
  # In the Gradio interface, replace the fade_animation_output with these two components
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483
  # Create the Gradio interface with proper output handling
484
  with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
485
  gr.Markdown("# Affective Virtual Environments")
 
555
  'music': audio_output
556
  })
557
 
558
+ # Add components to display the fade animation
559
+ with gr.Row():
560
+ fade_preview_output = gr.Image(
561
+ label="Fade Animation Preview",
562
+ interactive=False
563
+ )
564
+ fade_animation_output = gr.File(
565
+ label="Download Fade Animation",
566
+ type="binary",
567
+ interactive=False
568
+ )
569
 
570
  # Function to process and display results
571
  def process_and_display(audio_input, generate_audio, chunk_duration):
 
580
  <div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
581
  <style>@keyframes spin {{ 0% {{ transform: rotate(0deg); }} 100% {{ transform: rotate(360deg); }} }}</style>
582
  </div>
583
+ """)] + [gr.Group(visible=False)] * len(group_components) + [None] * (len(output_containers) * 5) + [None, None]
584
 
585
  results = get_predictions(audio_input, generate_audio, chunk_duration)
586
 
 
613
  outputs.extend([None] * 5)
614
 
615
  # Create fade animation if we have multiple images
616
+ fade_preview = None
617
  fade_animation = None
618
  if len(all_images) > 1:
619
  # Create a fade animation (GIF)
620
+ fade_preview, fade_animation = create_fade_transition(all_images, fade_duration=1.5, fps=15)
621
 
622
  # Hide loading indicator and show results
623
+ yield [gr.HTML("")] + group_visibility + outputs + [fade_preview, fade_animation]
624
 
625
  # Function to handle example selection
626
  def load_example(example_name):
 
654
  # For example selector (reset to None)
655
  outputs.append(None)
656
 
657
+ # For fade preview and animation (set to None)
658
+ outputs.append(None)
659
  outputs.append(None)
660
 
661
  return outputs
 
670
  container['sentiment'],
671
  container['image'],
672
  container['music']
673
+ ]] + [fade_preview_output, fade_animation_output]
674
  )
675
 
676
  # Set up the clear button
 
683
  container['sentiment'],
684
  container['image'],
685
  container['music']
686
+ ]] + [loading_indicator] + [chunk_duration_input] + [example_selector] + [fade_preview_output, fade_animation_output]
687
  )
688
 
689
  # Set up the example loading button