Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,12 +29,6 @@ from matplotlib.animation import FuncAnimation
|
|
| 29 |
import base64
|
| 30 |
from io import BytesIO
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
# Load the emotion prediction model
|
| 39 |
def load_emotion_model(model_path):
|
| 40 |
try:
|
|
@@ -341,35 +335,7 @@ def get_predictions(audio_input, generate_audio=True, chunk_duration=10):
|
|
| 341 |
|
| 342 |
return results
|
| 343 |
|
| 344 |
-
|
| 345 |
-
# Update the clear_all function to clear the fade animation
|
| 346 |
-
def clear_all():
|
| 347 |
-
# Create a list with None for all outputs
|
| 348 |
-
outputs = [None] # For audio input
|
| 349 |
-
|
| 350 |
-
# For group components (set to invisible)
|
| 351 |
-
outputs.extend([gr.Group(visible=False)] * len(group_components))
|
| 352 |
-
|
| 353 |
-
# For all output containers (set to None)
|
| 354 |
-
outputs.extend([None] * (len(output_containers) * 5))
|
| 355 |
-
|
| 356 |
-
# For loading indicator (empty HTML)
|
| 357 |
-
outputs.append(gr.HTML(""))
|
| 358 |
-
|
| 359 |
-
# For chunk duration (reset to 10)
|
| 360 |
-
outputs.append(10)
|
| 361 |
-
|
| 362 |
-
# For example selector (reset to None)
|
| 363 |
-
outputs.append(None)
|
| 364 |
-
|
| 365 |
-
# For fade animation (set to None)
|
| 366 |
-
outputs.append(None)
|
| 367 |
-
|
| 368 |
-
return outputs
|
| 369 |
-
|
| 370 |
-
#
|
| 371 |
-
|
| 372 |
-
# Add this function to create a fade transition between images
|
| 373 |
def create_fade_transition(images, fade_duration=1.0, fps=24):
|
| 374 |
"""
|
| 375 |
Create a smooth fade transition between a sequence of images.
|
|
@@ -380,7 +346,7 @@ def create_fade_transition(images, fade_duration=1.0, fps=24):
|
|
| 380 |
fps: Frames per second for the animation
|
| 381 |
|
| 382 |
Returns:
|
| 383 |
-
BytesIO object containing the animation as a
|
| 384 |
"""
|
| 385 |
if not images or len(images) < 2:
|
| 386 |
return None
|
|
@@ -428,169 +394,6 @@ def create_fade_transition(images, fade_duration=1.0, fps=24):
|
|
| 428 |
buffer.seek(0)
|
| 429 |
return buffer
|
| 430 |
|
| 431 |
-
# Add this function to create a video with fade transitions
|
| 432 |
-
def create_fade_video(images, fade_duration=1.0, fps=24):
|
| 433 |
-
"""
|
| 434 |
-
Create a video with smooth fade transitions between images.
|
| 435 |
-
|
| 436 |
-
Args:
|
| 437 |
-
images: List of PIL Images
|
| 438 |
-
fade_duration: Duration of fade in seconds
|
| 439 |
-
fps: Frames per second for the video
|
| 440 |
-
|
| 441 |
-
Returns:
|
| 442 |
-
BytesIO object containing the video
|
| 443 |
-
"""
|
| 444 |
-
if not images or len(images) < 2:
|
| 445 |
-
return None
|
| 446 |
-
|
| 447 |
-
# Ensure all images are the same size
|
| 448 |
-
width, height = images[0].size
|
| 449 |
-
for img in images:
|
| 450 |
-
img.thumbnail((width, height), Image.Resampling.LANCZOS)
|
| 451 |
-
|
| 452 |
-
# Calculate number of frames for fade
|
| 453 |
-
fade_frames = int(fade_duration * fps)
|
| 454 |
-
|
| 455 |
-
# Create a temporary file for the video
|
| 456 |
-
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
|
| 457 |
-
temp_path = temp_file.name
|
| 458 |
-
|
| 459 |
-
# Create writer
|
| 460 |
-
writer = imageio.get_writer(temp_path, fps=fps)
|
| 461 |
-
|
| 462 |
-
try:
|
| 463 |
-
# Add first image with full display time
|
| 464 |
-
for _ in range(fade_frames):
|
| 465 |
-
writer.append_data(np.array(images[0]))
|
| 466 |
-
|
| 467 |
-
# Create fade transitions between images
|
| 468 |
-
for i in range(len(images) - 1):
|
| 469 |
-
img1 = np.array(images[i])
|
| 470 |
-
img2 = np.array(images[i+1])
|
| 471 |
-
|
| 472 |
-
# Create fade frames
|
| 473 |
-
for j in range(fade_frames):
|
| 474 |
-
alpha = j / fade_frames
|
| 475 |
-
blended = (img1 * (1 - alpha) + img2 * alpha).astype(np.uint8)
|
| 476 |
-
writer.append_data(blended)
|
| 477 |
-
|
| 478 |
-
# Display the new image for a while
|
| 479 |
-
for _ in range(fade_frames):
|
| 480 |
-
writer.append_data(np.array(images[i+1]))
|
| 481 |
-
|
| 482 |
-
writer.close()
|
| 483 |
-
|
| 484 |
-
# Read the video back into memory
|
| 485 |
-
with open(temp_path, 'rb') as f:
|
| 486 |
-
video_data = BytesIO(f.read())
|
| 487 |
-
|
| 488 |
-
# Clean up
|
| 489 |
-
os.unlink(temp_path)
|
| 490 |
-
|
| 491 |
-
return video_data
|
| 492 |
-
except Exception as e:
|
| 493 |
-
print("Error creating fade video:", e)
|
| 494 |
-
# Clean up
|
| 495 |
-
if os.path.exists(temp_path):
|
| 496 |
-
os.unlink(temp_path)
|
| 497 |
-
return None
|
| 498 |
-
|
| 499 |
-
#
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
def process_and_display(audio_input, generate_audio, chunk_duration):
|
| 503 |
-
# Validate chunk duration
|
| 504 |
-
if chunk_duration is None or chunk_duration <= 0:
|
| 505 |
-
chunk_duration = 10
|
| 506 |
-
|
| 507 |
-
# Show loading indicator
|
| 508 |
-
yield [gr.HTML(f"""
|
| 509 |
-
<div style="text-align: center; margin: 20px;">
|
| 510 |
-
<p style="font-size: 18px; color: #4a4a4a;">Processing audio in {chunk_duration}-second chunks...</p>
|
| 511 |
-
<div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
|
| 512 |
-
<style>@keyframes spin {{ 0% {{ transform: rotate(0deg); }} 100% {{ transform: rotate(360deg); }} }}</style>
|
| 513 |
-
</div>
|
| 514 |
-
""")] + [gr.Group(visible=False)] * len(group_components) + [None] * (len(output_containers) * 5) + [None]
|
| 515 |
-
|
| 516 |
-
results = get_predictions(audio_input, generate_audio, chunk_duration)
|
| 517 |
-
|
| 518 |
-
# Initialize outputs list
|
| 519 |
-
outputs = []
|
| 520 |
-
group_visibility = []
|
| 521 |
-
all_images = [] # Collect all generated images for the fade animation
|
| 522 |
-
|
| 523 |
-
# Process each result
|
| 524 |
-
for i, result in enumerate(results):
|
| 525 |
-
if i < len(output_containers):
|
| 526 |
-
group_visibility.append(gr.Group(visible=True))
|
| 527 |
-
outputs.extend([
|
| 528 |
-
result['emotion'],
|
| 529 |
-
result['transcription'],
|
| 530 |
-
result['sentiment'],
|
| 531 |
-
result['image'],
|
| 532 |
-
result['music']
|
| 533 |
-
])
|
| 534 |
-
# Collect the image for the fade animation
|
| 535 |
-
all_images.append(result['image'])
|
| 536 |
-
else:
|
| 537 |
-
# If we have more results than containers, just extend with None
|
| 538 |
-
group_visibility.append(gr.Group(visible=False))
|
| 539 |
-
outputs.extend([None] * 5)
|
| 540 |
-
|
| 541 |
-
# Hide remaining containers
|
| 542 |
-
for i in range(len(results), len(output_containers)):
|
| 543 |
-
group_visibility.append(gr.Group(visible=False))
|
| 544 |
-
outputs.extend([None] * 5)
|
| 545 |
-
|
| 546 |
-
# Create fade animation if we have multiple images
|
| 547 |
-
fade_animation = None
|
| 548 |
-
if len(all_images) > 1:
|
| 549 |
-
# Create a fade animation (GIF or video)
|
| 550 |
-
fade_animation = create_fade_transition(all_images, fade_duration=1.5, fps=15)
|
| 551 |
-
|
| 552 |
-
# Hide loading indicator and show results
|
| 553 |
-
yield [gr.HTML("")] + group_visibility + outputs + [fade_animation]
|
| 554 |
-
|
| 555 |
-
# In your Gradio interface setup, add a new output component for the fade animation
|
| 556 |
-
with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
|
| 557 |
-
# ... [your existing interface code] ...
|
| 558 |
-
|
| 559 |
-
# Add a component to display the fade animation at the end
|
| 560 |
-
fade_animation_output = gr.Image(
|
| 561 |
-
label="Image Sequence with Fade Transitions",
|
| 562 |
-
format="gif",
|
| 563 |
-
interactive=False
|
| 564 |
-
)
|
| 565 |
-
|
| 566 |
-
# Update the process_btn.click call to include the new output
|
| 567 |
-
process_btn.click(
|
| 568 |
-
fn=process_and_display,
|
| 569 |
-
inputs=[audio_input, generate_audio_checkbox, chunk_duration_input],
|
| 570 |
-
outputs=[loading_indicator] + group_components + [comp for container in output_containers for comp in [
|
| 571 |
-
container['emotion'],
|
| 572 |
-
container['transcription'],
|
| 573 |
-
container['sentiment'],
|
| 574 |
-
container['image'],
|
| 575 |
-
container['music']
|
| 576 |
-
]] + [fade_animation_output]
|
| 577 |
-
)
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
# Update the clear_btn.click call to include the new output
|
| 582 |
-
clear_btn.click(
|
| 583 |
-
fn=clear_all,
|
| 584 |
-
inputs=[],
|
| 585 |
-
outputs=[audio_input] + group_components + [comp for container in output_containers for comp in [
|
| 586 |
-
container['emotion'],
|
| 587 |
-
container['transcription'],
|
| 588 |
-
container['sentiment'],
|
| 589 |
-
container['image'],
|
| 590 |
-
container['music']
|
| 591 |
-
]] + [loading_indicator] + [chunk_duration_input] + [example_selector] + [fade_animation_output]
|
| 592 |
-
)
|
| 593 |
-
|
| 594 |
# Function to load example audio
|
| 595 |
def load_example_audio(example_name):
|
| 596 |
# This function would load the example audio based on the selected example
|
|
@@ -644,12 +447,12 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 644 |
process_btn = gr.Button("Process Audio", variant="primary")
|
| 645 |
clear_btn = gr.Button("Clear All", variant="secondary")
|
| 646 |
|
| 647 |
-
# Add a loading indicator
|
| 648 |
loading_indicator = gr.HTML("""
|
| 649 |
<div id="loading" style="display: none; text-align: center; margin: 20px;">
|
| 650 |
<p style="font-size: 18px; color: #4a4a4a;">Processing audio chunks...</p>
|
| 651 |
<div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
|
| 652 |
-
<style>@keyframes spin { 0%
|
| 653 |
</div>
|
| 654 |
""")
|
| 655 |
|
|
@@ -679,7 +482,66 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 679 |
'music': audio_output
|
| 680 |
})
|
| 681 |
|
| 682 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 683 |
|
| 684 |
# Function to handle example selection
|
| 685 |
def load_example(example_name):
|
|
@@ -693,6 +555,31 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 693 |
# The waveform will be automatically displayed by Gradio's Audio component
|
| 694 |
return example_path, example_name
|
| 695 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 696 |
# Set up the button click
|
| 697 |
process_btn.click(
|
| 698 |
fn=process_and_display,
|
|
@@ -703,10 +590,10 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 703 |
container['sentiment'],
|
| 704 |
container['image'],
|
| 705 |
container['music']
|
| 706 |
-
]]
|
| 707 |
)
|
| 708 |
|
| 709 |
-
# Set up the clear button
|
| 710 |
clear_btn.click(
|
| 711 |
fn=clear_all,
|
| 712 |
inputs=[],
|
|
@@ -716,7 +603,7 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 716 |
container['sentiment'],
|
| 717 |
container['image'],
|
| 718 |
container['music']
|
| 719 |
-
]] + [loading_indicator] + [chunk_duration_input] + [example_selector]
|
| 720 |
)
|
| 721 |
|
| 722 |
# Set up the example loading button
|
|
|
|
| 29 |
import base64
|
| 30 |
from io import BytesIO
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
# Load the emotion prediction model
|
| 33 |
def load_emotion_model(model_path):
|
| 34 |
try:
|
|
|
|
| 335 |
|
| 336 |
return results
|
| 337 |
|
| 338 |
+
# Function to create a fade transition between images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
def create_fade_transition(images, fade_duration=1.0, fps=24):
|
| 340 |
"""
|
| 341 |
Create a smooth fade transition between a sequence of images.
|
|
|
|
| 346 |
fps: Frames per second for the animation
|
| 347 |
|
| 348 |
Returns:
|
| 349 |
+
BytesIO object containing the animation as a GIF
|
| 350 |
"""
|
| 351 |
if not images or len(images) < 2:
|
| 352 |
return None
|
|
|
|
| 394 |
buffer.seek(0)
|
| 395 |
return buffer
|
| 396 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
# Function to load example audio
|
| 398 |
def load_example_audio(example_name):
|
| 399 |
# This function would load the example audio based on the selected example
|
|
|
|
| 447 |
process_btn = gr.Button("Process Audio", variant="primary")
|
| 448 |
clear_btn = gr.Button("Clear All", variant="secondary")
|
| 449 |
|
| 450 |
+
# Add a loading indicator
|
| 451 |
loading_indicator = gr.HTML("""
|
| 452 |
<div id="loading" style="display: none; text-align: center; margin: 20px;">
|
| 453 |
<p style="font-size: 18px; color: #4a4a4a;">Processing audio chunks...</p>
|
| 454 |
<div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
|
| 455 |
+
<style>@keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }</style>
|
| 456 |
</div>
|
| 457 |
""")
|
| 458 |
|
|
|
|
| 482 |
'music': audio_output
|
| 483 |
})
|
| 484 |
|
| 485 |
+
# Add a component to display the fade animation at the end
|
| 486 |
+
fade_animation_output = gr.Image(
|
| 487 |
+
label="Image Sequence with Fade Transitions",
|
| 488 |
+
format="gif",
|
| 489 |
+
interactive=False
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
# Function to process and display results
|
| 493 |
+
def process_and_display(audio_input, generate_audio, chunk_duration):
|
| 494 |
+
# Validate chunk duration
|
| 495 |
+
if chunk_duration is None or chunk_duration <= 0:
|
| 496 |
+
chunk_duration = 10
|
| 497 |
+
|
| 498 |
+
# Show loading indicator
|
| 499 |
+
yield [gr.HTML(f"""
|
| 500 |
+
<div style="text-align: center; margin: 20px;">
|
| 501 |
+
<p style="font-size: 18px; color: #4a4a4a;">Processing audio in {chunk_duration}-second chunks...</p>
|
| 502 |
+
<div style="border: 4px solid #f3f3f3; border-top: 4px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; margin: 0 auto;"></div>
|
| 503 |
+
<style>@keyframes spin {{ 0% {{ transform: rotate(0deg); }} 100% {{ transform: rotate(360deg); }} }}</style>
|
| 504 |
+
</div>
|
| 505 |
+
""")] + [gr.Group(visible=False)] * len(group_components) + [None] * (len(output_containers) * 5) + [None]
|
| 506 |
+
|
| 507 |
+
results = get_predictions(audio_input, generate_audio, chunk_duration)
|
| 508 |
+
|
| 509 |
+
# Initialize outputs list
|
| 510 |
+
outputs = []
|
| 511 |
+
group_visibility = []
|
| 512 |
+
all_images = [] # Collect all generated images for the fade animation
|
| 513 |
+
|
| 514 |
+
# Process each result
|
| 515 |
+
for i, result in enumerate(results):
|
| 516 |
+
if i < len(output_containers):
|
| 517 |
+
group_visibility.append(gr.Group(visible=True))
|
| 518 |
+
outputs.extend([
|
| 519 |
+
result['emotion'],
|
| 520 |
+
result['transcription'],
|
| 521 |
+
result['sentiment'],
|
| 522 |
+
result['image'],
|
| 523 |
+
result['music']
|
| 524 |
+
])
|
| 525 |
+
# Collect the image for the fade animation
|
| 526 |
+
all_images.append(result['image'])
|
| 527 |
+
else:
|
| 528 |
+
# If we have more results than containers, just extend with None
|
| 529 |
+
group_visibility.append(gr.Group(visible=False))
|
| 530 |
+
outputs.extend([None] * 5)
|
| 531 |
+
|
| 532 |
+
# Hide remaining containers
|
| 533 |
+
for i in range(len(results), len(output_containers)):
|
| 534 |
+
group_visibility.append(gr.Group(visible=False))
|
| 535 |
+
outputs.extend([None] * 5)
|
| 536 |
+
|
| 537 |
+
# Create fade animation if we have multiple images
|
| 538 |
+
fade_animation = None
|
| 539 |
+
if len(all_images) > 1:
|
| 540 |
+
# Create a fade animation (GIF)
|
| 541 |
+
fade_animation = create_fade_transition(all_images, fade_duration=1.5, fps=15)
|
| 542 |
+
|
| 543 |
+
# Hide loading indicator and show results
|
| 544 |
+
yield [gr.HTML("")] + group_visibility + outputs + [fade_animation]
|
| 545 |
|
| 546 |
# Function to handle example selection
|
| 547 |
def load_example(example_name):
|
|
|
|
| 555 |
# The waveform will be automatically displayed by Gradio's Audio component
|
| 556 |
return example_path, example_name
|
| 557 |
|
| 558 |
+
# Function to clear audio input and all outputs
|
| 559 |
+
def clear_all():
|
| 560 |
+
# Create a list with None for all outputs
|
| 561 |
+
outputs = [None] # For audio input
|
| 562 |
+
|
| 563 |
+
# For group components (set to invisible)
|
| 564 |
+
outputs.extend([gr.Group(visible=False)] * len(group_components))
|
| 565 |
+
|
| 566 |
+
# For all output containers (set to None)
|
| 567 |
+
outputs.extend([None] * (len(output_containers) * 5))
|
| 568 |
+
|
| 569 |
+
# For loading indicator (empty HTML)
|
| 570 |
+
outputs.append(gr.HTML(""))
|
| 571 |
+
|
| 572 |
+
# For chunk duration (reset to 10)
|
| 573 |
+
outputs.append(10)
|
| 574 |
+
|
| 575 |
+
# For example selector (reset to None)
|
| 576 |
+
outputs.append(None)
|
| 577 |
+
|
| 578 |
+
# For fade animation (set to None)
|
| 579 |
+
outputs.append(None)
|
| 580 |
+
|
| 581 |
+
return outputs
|
| 582 |
+
|
| 583 |
# Set up the button click
|
| 584 |
process_btn.click(
|
| 585 |
fn=process_and_display,
|
|
|
|
| 590 |
container['sentiment'],
|
| 591 |
container['image'],
|
| 592 |
container['music']
|
| 593 |
+
]] + [fade_animation_output]
|
| 594 |
)
|
| 595 |
|
| 596 |
+
# Set up the clear button
|
| 597 |
clear_btn.click(
|
| 598 |
fn=clear_all,
|
| 599 |
inputs=[],
|
|
|
|
| 603 |
container['sentiment'],
|
| 604 |
container['image'],
|
| 605 |
container['music']
|
| 606 |
+
]] + [loading_indicator] + [chunk_duration_input] + [example_selector] + [fade_animation_output]
|
| 607 |
)
|
| 608 |
|
| 609 |
# Set up the example loading button
|