|
|
import gradio as gr |
|
|
import torch |
|
|
import librosa |
|
|
import numpy as np |
|
|
import os |
|
|
|
|
|
|
|
|
class EmotionClassifier(torch.nn.Module): |
|
|
def __init__(self, input_shape, num_classes): |
|
|
super().__init__() |
|
|
|
|
|
self.flatten = torch.nn.Flatten() |
|
|
self.layers = torch.nn.Sequential( |
|
|
torch.nn.Linear(input_shape, 128), |
|
|
torch.nn.ReLU(), |
|
|
torch.nn.Dropout(0.3), |
|
|
torch.nn.Linear(128, 64), |
|
|
torch.nn.ReLU(), |
|
|
torch.nn.Dropout(0.3), |
|
|
torch.nn.Linear(64, num_classes) |
|
|
) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.flatten(x) |
|
|
return self.layers(x) |
|
|
|
|
|
|
|
|
input_shape = 13 * 128 |
|
|
num_classes = 7 |
|
|
model = EmotionClassifier(input_shape, num_classes) |
|
|
|
|
|
|
|
|
model_path = os.path.join(os.path.dirname(__file__), 'emotion_model.pt') |
|
|
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
emotions = ["neutral", "happy", "sad", "angry", "fearful", "disgust", "surprised"] |
|
|
|
|
|
def extract_features(audio_path, sample_rate=16000, n_mfcc=13, max_length=128): |
|
|
"""Extract MFCC features from an audio file""" |
|
|
try: |
|
|
audio, sr = librosa.load(audio_path, sr=sample_rate) |
|
|
|
|
|
|
|
|
mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=n_mfcc) |
|
|
|
|
|
|
|
|
if mfccs.shape[1] < max_length: |
|
|
pad_width = max_length - mfccs.shape[1] |
|
|
mfccs = np.pad(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant') |
|
|
else: |
|
|
mfccs = mfccs[:, :max_length] |
|
|
|
|
|
return mfccs |
|
|
except Exception as e: |
|
|
print(f"Error in feature extraction: {e}") |
|
|
return None |
|
|
|
|
|
def predict_emotion(audio): |
|
|
"""Predict emotion from audio input""" |
|
|
try: |
|
|
|
|
|
if isinstance(audio, str): |
|
|
features = extract_features(audio) |
|
|
else: |
|
|
|
|
|
if isinstance(audio, tuple): |
|
|
audio_array, sample_rate = audio |
|
|
else: |
|
|
audio_array = audio |
|
|
sample_rate = 16000 |
|
|
|
|
|
|
|
|
if len(np.array(audio_array).shape) > 1: |
|
|
audio_array = np.mean(audio_array, axis=1) |
|
|
|
|
|
|
|
|
mfccs = librosa.feature.mfcc(y=np.array(audio_array), sr=sample_rate, n_mfcc=13) |
|
|
|
|
|
|
|
|
max_length = 128 |
|
|
if mfccs.shape[1] < max_length: |
|
|
pad_width = max_length - mfccs.shape[1] |
|
|
mfccs = np.pad(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant') |
|
|
else: |
|
|
mfccs = mfccs[:, :max_length] |
|
|
|
|
|
features = mfccs |
|
|
|
|
|
if features is None: |
|
|
return {emotion: 0.0 for emotion in emotions} |
|
|
|
|
|
|
|
|
features_flat = features.reshape(1, -1) |
|
|
|
|
|
|
|
|
features_tensor = torch.tensor(features_flat, dtype=torch.float32) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(features_tensor) |
|
|
probabilities = torch.nn.functional.softmax(outputs, dim=1) |
|
|
|
|
|
|
|
|
result = {emotion: float(probabilities[0][i].item()) for i, emotion in enumerate(emotions)} |
|
|
return result |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error in prediction: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
return {emotion: 1/len(emotions) for emotion in emotions} |
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=predict_emotion, |
|
|
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"), |
|
|
outputs=gr.Label(num_top_classes=7), |
|
|
title="Speech Emotion Recognition", |
|
|
description="Upload an audio file or record your voice to identify the emotion. This model can detect neutral, happy, sad, angry, fearful, disgust, and surprised emotions." |
|
|
) |
|
|
|
|
|
demo.launch() |