Update app.py
Browse files
app.py
CHANGED
|
@@ -2,13 +2,9 @@ import gradio as gr
|
|
| 2 |
import numpy as np
|
| 3 |
import librosa
|
| 4 |
import time
|
| 5 |
-
from transformers import pipeline
|
| 6 |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| 7 |
from tensorflow.keras.models import load_model
|
| 8 |
|
| 9 |
-
# Load the ASR pipeline
|
| 10 |
-
p = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-960h-lv60-self")
|
| 11 |
-
|
| 12 |
# Load the emotion prediction model
|
| 13 |
model = load_model('mymodel_SER_LSTM_RAVDESS.h5')
|
| 14 |
|
|
@@ -44,15 +40,14 @@ def sentiment_vader(sentence):
|
|
| 44 |
# Function to transcribe audio and perform sentiment analysis
|
| 45 |
def transcribe(audio):
|
| 46 |
time.sleep(3) # Simulate processing delay
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
return text, text_sentiment
|
| 50 |
|
| 51 |
# Function to get predictions for emotion and sentiment
|
| 52 |
def get_predictions(audio_input):
|
| 53 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
| 54 |
-
|
| 55 |
-
return emotion_prediction,
|
| 56 |
|
| 57 |
# Create the Gradio interface
|
| 58 |
interface = gr.Interface(
|
|
@@ -60,7 +55,6 @@ interface = gr.Interface(
|
|
| 60 |
inputs=gr.Audio(label="Input Audio", type="file"),
|
| 61 |
outputs=[
|
| 62 |
gr.Label(label="Emotion Prediction"),
|
| 63 |
-
gr.Textbox(label="Transcript"),
|
| 64 |
gr.Label(label="Sentiment Prediction")
|
| 65 |
],
|
| 66 |
title="Emotional Machines Test",
|
|
@@ -68,4 +62,4 @@ interface = gr.Interface(
|
|
| 68 |
)
|
| 69 |
|
| 70 |
# Launch the interface
|
| 71 |
-
interface.launch()
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import librosa
|
| 4 |
import time
|
|
|
|
| 5 |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| 6 |
from tensorflow.keras.models import load_model
|
| 7 |
|
|
|
|
|
|
|
|
|
|
| 8 |
# Load the emotion prediction model
|
| 9 |
model = load_model('mymodel_SER_LSTM_RAVDESS.h5')
|
| 10 |
|
|
|
|
| 40 |
# Function to transcribe audio and perform sentiment analysis
|
| 41 |
def transcribe(audio):
|
| 42 |
time.sleep(3) # Simulate processing delay
|
| 43 |
+
# In this case, just return a placeholder value
|
| 44 |
+
return "Transcription not available"
|
|
|
|
| 45 |
|
| 46 |
# Function to get predictions for emotion and sentiment
|
| 47 |
def get_predictions(audio_input):
|
| 48 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
| 49 |
+
sentiment_prediction = sentiment_vader(transcribe(audio_input))
|
| 50 |
+
return emotion_prediction, sentiment_prediction
|
| 51 |
|
| 52 |
# Create the Gradio interface
|
| 53 |
interface = gr.Interface(
|
|
|
|
| 55 |
inputs=gr.Audio(label="Input Audio", type="file"),
|
| 56 |
outputs=[
|
| 57 |
gr.Label(label="Emotion Prediction"),
|
|
|
|
| 58 |
gr.Label(label="Sentiment Prediction")
|
| 59 |
],
|
| 60 |
title="Emotional Machines Test",
|
|
|
|
| 62 |
)
|
| 63 |
|
| 64 |
# Launch the interface
|
| 65 |
+
interface.launch()
|