Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -910,22 +910,39 @@ custom_css = """
|
|
| 910 |
# Create the Gradio interface with proper output handling
|
| 911 |
with gr.Blocks(title="Affective Virtual Environments - Chunked Processing", css=custom_css) as interface:
|
| 912 |
gr.Markdown("# The Emotional Machine")
|
|
|
|
|
|
|
|
|
|
| 913 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 914 |
|
| 915 |
with gr.Row():
|
| 916 |
with gr.Column(scale=2):
|
| 917 |
audio_input = gr.Audio(label="Input Audio", type="filepath", sources=["microphone", "upload"])
|
| 918 |
|
| 919 |
# Add example audio selection
|
| 920 |
-
|
| 921 |
-
|
| 922 |
-
|
| 923 |
-
|
| 924 |
-
|
| 925 |
-
|
| 926 |
|
| 927 |
# Add button to load selected example
|
| 928 |
-
load_example_btn = gr.Button("Load Example", variant="secondary")
|
| 929 |
|
| 930 |
with gr.Column(scale=1):
|
| 931 |
# Add chunk duration input
|
|
@@ -1045,14 +1062,14 @@ with gr.Blocks(title="Affective Virtual Environments - Chunked Processing", css=
|
|
| 1045 |
container['image'],
|
| 1046 |
container['image_360'],
|
| 1047 |
container['music']
|
| 1048 |
-
]] + [loading_indicator, chunk_duration_input,
|
| 1049 |
)
|
| 1050 |
|
| 1051 |
-
load_example_btn.click(
|
| 1052 |
-
|
| 1053 |
-
|
| 1054 |
-
|
| 1055 |
-
)
|
| 1056 |
|
| 1057 |
# Check if we're running on Hugging Face Spaces
|
| 1058 |
is_spaces = os.getenv('SPACE_ID') is not None
|
|
|
|
| 910 |
# Create the Gradio interface with proper output handling
|
| 911 |
with gr.Blocks(title="Affective Virtual Environments - Chunked Processing", css=custom_css) as interface:
|
| 912 |
gr.Markdown("# The Emotional Machine")
|
| 913 |
+
gr.Markdown(
|
| 914 |
+
"""
|
| 915 |
+
An Affective Virtual Environment or AVE is a digital space composed of two components: An emotion recognition system and a virtual environment generator.
|
| 916 |
|
| 917 |
+
In this project, we use bimodal speech emotion recognition to predict categorical emotions from semantic and acoustic modes.
|
| 918 |
+
|
| 919 |
+
Virtual Environments are generated using deepAI and MusicGEN.
|
| 920 |
+
|
| 921 |
+
To interact, record your voice or upload an audio file. Define the length to Chunk your sample, and if you want to generate Audio for each chunk or not.
|
| 922 |
+
|
| 923 |
+
Generate your Affective Virtual Environment and wait for the results.
|
| 924 |
+
|
| 925 |
+
Download the HTML file with your creation.
|
| 926 |
+
"""
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
|
| 931 |
|
| 932 |
with gr.Row():
|
| 933 |
with gr.Column(scale=2):
|
| 934 |
audio_input = gr.Audio(label="Input Audio", type="filepath", sources=["microphone", "upload"])
|
| 935 |
|
| 936 |
# Add example audio selection
|
| 937 |
+
# example_selector = gr.Dropdown(
|
| 938 |
+
# label="Select Example Audio",
|
| 939 |
+
# choices=["Happy Speech", "Sad Story", "Neutral News"],
|
| 940 |
+
# value=None,
|
| 941 |
+
# info="Choose from pre-recorded example speeches"
|
| 942 |
+
# )
|
| 943 |
|
| 944 |
# Add button to load selected example
|
| 945 |
+
#load_example_btn = gr.Button("Load Example", variant="secondary")
|
| 946 |
|
| 947 |
with gr.Column(scale=1):
|
| 948 |
# Add chunk duration input
|
|
|
|
| 1062 |
container['image'],
|
| 1063 |
container['image_360'],
|
| 1064 |
container['music']
|
| 1065 |
+
]] + [loading_indicator, chunk_duration_input, viewer_html_output, js_output]
|
| 1066 |
)
|
| 1067 |
|
| 1068 |
+
#load_example_btn.click(
|
| 1069 |
+
# fn=load_example,
|
| 1070 |
+
# inputs=[example_selector],
|
| 1071 |
+
# outputs=[audio_input, example_selector]
|
| 1072 |
+
#)
|
| 1073 |
|
| 1074 |
# Check if we're running on Hugging Face Spaces
|
| 1075 |
is_spaces = os.getenv('SPACE_ID') is not None
|