Update app.py
Browse files
app.py
CHANGED
|
@@ -1,60 +1,23 @@
|
|
| 1 |
-
import
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
return np.squeeze(zcr)
|
| 25 |
-
def rmse(data,frame_length=2048,hop_length=512):
|
| 26 |
-
rmse=librosa.feature.rms(y=data,frame_length=frame_length,hop_length=hop_length)
|
| 27 |
-
return np.squeeze(rmse)
|
| 28 |
-
def mfcc(data,sr,frame_length=2048,hop_length=512,flatten:bool=True):
|
| 29 |
-
mfcc=librosa.feature.mfcc(y=data,sr=sr)
|
| 30 |
-
return np.squeeze(mfcc.T)if not flatten else np.ravel(mfcc.T)
|
| 31 |
-
|
| 32 |
-
def extract_features(data,sr=22050,frame_length=2048,hop_length=512):
|
| 33 |
-
result=np.array([])
|
| 34 |
-
|
| 35 |
-
result=np.hstack((result,
|
| 36 |
-
zcr(data,frame_length,hop_length),
|
| 37 |
-
rmse(data,frame_length,hop_length),
|
| 38 |
-
mfcc(data,sr,frame_length,hop_length)
|
| 39 |
-
))
|
| 40 |
-
return result
|
| 41 |
-
|
| 42 |
-
def get_predict_feat(path):
|
| 43 |
-
d, s_rate= librosa.load(path, duration=2.5, offset=0.6)
|
| 44 |
-
res=extract_features(d)
|
| 45 |
-
result=np.array(res)
|
| 46 |
-
result=np.reshape(result,newshape=(1,2376))
|
| 47 |
-
i_result = scaler2.transform(result)
|
| 48 |
-
final_result=np.expand_dims(i_result, axis=2)
|
| 49 |
-
|
| 50 |
-
return final_result
|
| 51 |
-
|
| 52 |
-
emotions1={1:'Neutral', 2:'Calm', 3:'Happy', 4:'Sad', 5:'Angry', 6:'Fear', 7:'Disgust',8:'Surprise'}
|
| 53 |
-
def prediction(path1):
|
| 54 |
-
res = get_predict_feat(path1)
|
| 55 |
-
predictions = loaded_model.predict(res)
|
| 56 |
-
predicted_class = predictions.argmax(axis=1)[0] + 1 # Convert from 0-based indexing to emotion labels
|
| 57 |
-
predicted_emotion = emotions1[predicted_class] # Get the corresponding emotion label
|
| 58 |
-
return predicted_emotion[0]
|
| 59 |
-
|
| 60 |
-
gr.Interface(fn=prediction, inputs="audio", outputs="text").launch()
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from speechbrain.inference.interfaces import foreign_class
|
| 3 |
+
|
| 4 |
+
# Initialize the classifier
|
| 5 |
+
classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
|
| 6 |
+
|
| 7 |
+
def emotion(file):
|
| 8 |
+
if file is not None:
|
| 9 |
+
# Classify the file
|
| 10 |
+
out_prob, score, index, text_lab = classifier.classify_file(file.name)
|
| 11 |
+
# Display the output
|
| 12 |
+
st.write(text_lab)
|
| 13 |
+
else:
|
| 14 |
+
st.write("Please upload a file.")
|
| 15 |
+
|
| 16 |
+
def main():
|
| 17 |
+
st.title("Emotion Recognition")
|
| 18 |
+
uploaded_file = st.file_uploader("Upload audio file", type=["wav"])
|
| 19 |
+
if uploaded_file is not None:
|
| 20 |
+
emotion(uploaded_file)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|