File size: 1,604 Bytes
5d7352c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from transformers import pipeline, AutoFeatureExtractor, AutoModelForAudioClassification
import torch
import librosa
import numpy as np

# Load model and feature extractor
model_id = "your-username/speech-emotion-recognition-model" 
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
model = AutoModelForAudioClassification.from_pretrained(model_id)

# Define emotions
emotions = ["neutral", "happy", "sad", "angry", "fearful", "disgust", "surprised"]

def predict_emotion(audio_path):
    # Load audio
    audio, sampling_rate = librosa.load(audio_path, sr=16000)
    
    # Process through feature extractor
    inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
    
    # Get prediction
    with torch.no_grad():
        outputs = model(**inputs)
        probs = torch.nn.functional.softmax(outputs.logits, dim=1)
        predicted_class_id = torch.argmax(probs, dim=1).item()
        predicted_label = emotions[predicted_class_id]
        confidence = probs[0][predicted_class_id].item()
    
    # Return result
    result = {emotion: float(probs[0][i].item()) for i, emotion in enumerate(emotions)}
    return result

# Create Gradio interface
demo = gr.Interface(
    fn=predict_emotion,
    inputs=gr.Audio(source="microphone", type="filepath"),
    outputs=gr.Label(num_top_classes=7),
    title="Speech Emotion Recognition",
    description="Upload audio or record your voice to identify the emotion. This model can detect neutral, happy, sad, angry, fearful, disgust, and surprised emotions."
)

demo.launch()