import gradio as gr from transformers import pipeline, AutoFeatureExtractor, AutoModelForAudioClassification import torch import librosa import numpy as np # Load model and feature extractor model_id = "your-username/speech-emotion-recognition-model" feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) model = AutoModelForAudioClassification.from_pretrained(model_id) # Define emotions emotions = ["neutral", "happy", "sad", "angry", "fearful", "disgust", "surprised"] def predict_emotion(audio_path): # Load audio audio, sampling_rate = librosa.load(audio_path, sr=16000) # Process through feature extractor inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt", padding=True) # Get prediction with torch.no_grad(): outputs = model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=1) predicted_class_id = torch.argmax(probs, dim=1).item() predicted_label = emotions[predicted_class_id] confidence = probs[0][predicted_class_id].item() # Return result result = {emotion: float(probs[0][i].item()) for i, emotion in enumerate(emotions)} return result # Create Gradio interface demo = gr.Interface( fn=predict_emotion, inputs=gr.Audio(source="microphone", type="filepath"), outputs=gr.Label(num_top_classes=7), title="Speech Emotion Recognition", description="Upload audio or record your voice to identify the emotion. This model can detect neutral, happy, sad, angry, fearful, disgust, and surprised emotions." ) demo.launch()