Update app.py
Browse files
app.py
CHANGED
|
@@ -65,7 +65,6 @@ class SpeechEmotionRecognizer:
|
|
| 65 |
# Make prediction
|
| 66 |
prediction = self.model.predict(mel_spec)
|
| 67 |
emotion_index = np.argmax(prediction)
|
| 68 |
-
confidence = float(prediction[0][emotion_index])
|
| 69 |
|
| 70 |
# Create results dictionary with confidence scores
|
| 71 |
results = {emotion: float(pred) for emotion, pred in zip(self.emotion_labels, prediction[0])}
|
|
@@ -91,16 +90,14 @@ demo = gr.Interface(
|
|
| 91 |
fn=process_audio,
|
| 92 |
inputs=[
|
| 93 |
gr.Audio(
|
| 94 |
-
|
| 95 |
type="filepath",
|
| 96 |
-
|
| 97 |
)
|
| 98 |
],
|
| 99 |
outputs=gr.Label(num_top_classes=6),
|
| 100 |
title="Speech Emotion Recognition",
|
| 101 |
-
description="Record a 4-second audio clip to detect the emotion in your voice."
|
| 102 |
-
examples=None, # You can add example audio files here
|
| 103 |
-
theme=gr.themes.Base()
|
| 104 |
)
|
| 105 |
|
| 106 |
# Launch the app
|
|
|
|
| 65 |
# Make prediction
|
| 66 |
prediction = self.model.predict(mel_spec)
|
| 67 |
emotion_index = np.argmax(prediction)
|
|
|
|
| 68 |
|
| 69 |
# Create results dictionary with confidence scores
|
| 70 |
results = {emotion: float(pred) for emotion, pred in zip(self.emotion_labels, prediction[0])}
|
|
|
|
| 90 |
fn=process_audio,
|
| 91 |
inputs=[
|
| 92 |
gr.Audio(
|
| 93 |
+
label="Record audio (4 seconds)",
|
| 94 |
type="filepath",
|
| 95 |
+
sources=["microphone"] # Updated from 'source' to 'sources'
|
| 96 |
)
|
| 97 |
],
|
| 98 |
outputs=gr.Label(num_top_classes=6),
|
| 99 |
title="Speech Emotion Recognition",
|
| 100 |
+
description="Record a 4-second audio clip to detect the emotion in your voice."
|
|
|
|
|
|
|
| 101 |
)
|
| 102 |
|
| 103 |
# Launch the app
|