|
|
import gradio as gr |
|
|
from transformers import pipeline, AutoFeatureExtractor, AutoModelForAudioClassification |
|
|
import torch |
|
|
import librosa |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
model_id = "your-username/speech-emotion-recognition-model" |
|
|
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) |
|
|
model = AutoModelForAudioClassification.from_pretrained(model_id) |
|
|
|
|
|
|
|
|
emotions = ["neutral", "happy", "sad", "angry", "fearful", "disgust", "surprised"] |
|
|
|
|
|
def predict_emotion(audio_path): |
|
|
|
|
|
audio, sampling_rate = librosa.load(audio_path, sr=16000) |
|
|
|
|
|
|
|
|
inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt", padding=True) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
probs = torch.nn.functional.softmax(outputs.logits, dim=1) |
|
|
predicted_class_id = torch.argmax(probs, dim=1).item() |
|
|
predicted_label = emotions[predicted_class_id] |
|
|
confidence = probs[0][predicted_class_id].item() |
|
|
|
|
|
|
|
|
result = {emotion: float(probs[0][i].item()) for i, emotion in enumerate(emotions)} |
|
|
return result |
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=predict_emotion, |
|
|
inputs=gr.Audio(source="microphone", type="filepath"), |
|
|
outputs=gr.Label(num_top_classes=7), |
|
|
title="Speech Emotion Recognition", |
|
|
description="Upload audio or record your voice to identify the emotion. This model can detect neutral, happy, sad, angry, fearful, disgust, and surprised emotions." |
|
|
) |
|
|
|
|
|
demo.launch() |