Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -348,14 +348,40 @@ def clear_all():
|
|
| 348 |
|
| 349 |
return outputs
|
| 350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
# Create the Gradio interface with proper output handling
|
| 352 |
with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
|
| 353 |
gr.Markdown("# Affective Virtual Environments")
|
| 354 |
gr.Markdown("Create an AVE using your voice. Audio is split into chunks, with separate predictions and generations for each segment.")
|
| 355 |
|
| 356 |
with gr.Row():
|
| 357 |
-
|
| 358 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
# Add chunk duration input
|
| 360 |
chunk_duration_input = gr.Number(
|
| 361 |
label="Chunk Duration (seconds)",
|
|
@@ -454,6 +480,12 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 454 |
# Hide loading indicator and show results
|
| 455 |
yield [gr.HTML("")] + group_visibility + outputs
|
| 456 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 457 |
# Set up the button click
|
| 458 |
process_btn.click(
|
| 459 |
fn=process_and_display,
|
|
@@ -479,5 +511,12 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as i
|
|
| 479 |
container['music']
|
| 480 |
]] + [loading_indicator] + [chunk_duration_input]
|
| 481 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
|
| 483 |
interface.launch()
|
|
|
|
| 348 |
|
| 349 |
return outputs
|
| 350 |
|
| 351 |
+
# Function to load example audio
|
| 352 |
+
def load_example_audio(example_name):
|
| 353 |
+
# This function would load the example audio based on the selected example
|
| 354 |
+
# For now, we'll return a placeholder path - you should replace these with actual paths to your example audio files
|
| 355 |
+
example_paths = {
|
| 356 |
+
"Happy Speech": "examples/happy_speech.wav",
|
| 357 |
+
"Sad Story": "examples/sad_story.wav",
|
| 358 |
+
"Neutral News": "examples/neutral_news.wav"
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
# Return the path to the selected example
|
| 362 |
+
return example_paths.get(example_name, "examples/happy_speech.wav")
|
| 363 |
+
|
| 364 |
# Create the Gradio interface with proper output handling
|
| 365 |
with gr.Blocks(title="Affective Virtual Environments - Chunked Processing") as interface:
|
| 366 |
gr.Markdown("# Affective Virtual Environments")
|
| 367 |
gr.Markdown("Create an AVE using your voice. Audio is split into chunks, with separate predictions and generations for each segment.")
|
| 368 |
|
| 369 |
with gr.Row():
|
| 370 |
+
with gr.Column(scale=2):
|
| 371 |
+
audio_input = gr.Audio(label="Input Audio", type="filepath", sources=["microphone", "upload"])
|
| 372 |
+
|
| 373 |
+
# Add example audio selection
|
| 374 |
+
example_selector = gr.Dropdown(
|
| 375 |
+
label="Select Example Audio",
|
| 376 |
+
choices=["Happy Speech", "Sad Story", "Neutral News"],
|
| 377 |
+
value=None,
|
| 378 |
+
info="Choose from pre-recorded example speeches"
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Add button to load selected example
|
| 382 |
+
load_example_btn = gr.Button("Load Example", variant="secondary")
|
| 383 |
+
|
| 384 |
+
with gr.Column(scale=1):
|
| 385 |
# Add chunk duration input
|
| 386 |
chunk_duration_input = gr.Number(
|
| 387 |
label="Chunk Duration (seconds)",
|
|
|
|
| 480 |
# Hide loading indicator and show results
|
| 481 |
yield [gr.HTML("")] + group_visibility + outputs
|
| 482 |
|
| 483 |
+
# Function to handle example selection
|
| 484 |
+
def load_example(example_name):
|
| 485 |
+
if not example_name:
|
| 486 |
+
return None
|
| 487 |
+
return load_example_audio(example_name)
|
| 488 |
+
|
| 489 |
# Set up the button click
|
| 490 |
process_btn.click(
|
| 491 |
fn=process_and_display,
|
|
|
|
| 511 |
container['music']
|
| 512 |
]] + [loading_indicator] + [chunk_duration_input]
|
| 513 |
)
|
| 514 |
+
|
| 515 |
+
# Set up the example loading button
|
| 516 |
+
load_example_btn.click(
|
| 517 |
+
fn=load_example,
|
| 518 |
+
inputs=[example_selector],
|
| 519 |
+
outputs=[audio_input]
|
| 520 |
+
)
|
| 521 |
|
| 522 |
interface.launch()
|