Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -37,29 +37,23 @@ def sentiment_vader(sentence):
|
|
| 37 |
overall_sentiment = "Neutral"
|
| 38 |
return overall_sentiment
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
def transcribe(audio):
|
| 42 |
-
time.sleep(3) # Simulate processing delay
|
| 43 |
-
# In this case, just return a placeholder value
|
| 44 |
-
return "Transcription not available"
|
| 45 |
-
|
| 46 |
-
# Function to get predictions for emotion and sentiment
|
| 47 |
def get_predictions(audio_input):
|
| 48 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
| 49 |
-
|
| 50 |
-
return emotion_prediction,
|
| 51 |
|
| 52 |
# Create the Gradio interface
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
| 63 |
|
| 64 |
-
# Launch the interface
|
| 65 |
interface.launch()
|
|
|
|
| 37 |
overall_sentiment = "Neutral"
|
| 38 |
return overall_sentiment
|
| 39 |
|
| 40 |
+
# Create a combined function that calls both models
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
def get_predictions(audio_input):
|
| 42 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
| 43 |
+
transcribe_prediction = transcribe(audio_input)
|
| 44 |
+
return [emotion_prediction, transcribe_prediction]
|
| 45 |
|
| 46 |
# Create the Gradio interface
|
| 47 |
+
with gr.Blocks() as interface:
|
| 48 |
+
gr.Markdown("Emotional Machines test: Load or Record an audio file to speech emotion analysis")
|
| 49 |
+
with gr.Tabs():
|
| 50 |
+
with gr.Tab("Acoustic and Semantic Predictions"):
|
| 51 |
+
with gr.Row():
|
| 52 |
+
input_audio = gr.Audio(label="Input Audio", type="filepath")
|
| 53 |
+
submit_button = gr.Button("Submit")
|
| 54 |
+
output_labels = [gr.Label(num_top_classes=8), gr.Label(num_top_classes=4)]
|
| 55 |
+
|
| 56 |
+
# Set the function to be called when the button is clicked
|
| 57 |
+
submit_button.click(get_predictions, inputs=input_audio, outputs=output_labels)
|
| 58 |
|
|
|
|
| 59 |
interface.launch()
|