voxtral-emotion-speech / benchmark.py
MrlolDev's picture
Upload benchmark.py with huggingface_hub
f1bcc67 verified
import torch
import numpy as np
import pickle
from datasets import load_dataset
from transformers import AutoProcessor, AutoModel
from sklearn.metrics import f1_score, classification_report
from jiwer import wer
from funasr import AutoModel as FunASRModel
from huggingface_hub import HfApi
import json
api = HfApi()
device = torch.device("cuda")
class EmotionHead(nn.Module):
def __init__(self, input_dim=1280, hidden=512, num_classes=6, dropout=0.3):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, hidden),
nn.BatchNorm1d(hidden),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden, hidden // 2),
nn.BatchNorm1d(hidden // 2),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden // 2, num_classes),
)
def forward(self, x):
return self.net(x)
print("Cargando modelos para benchmark...")
emotion_model = EmotionHead().to(device)
emotion_model.load_state_dict(torch.load("emotion_head_best.pt"))
emotion_model.eval()
MODEL_ID = "mistralai/Voxtral-Mini-4B-Realtime-2602"
processor = AutoProcessor.from_pretrained(MODEL_ID)
voxtral = AutoModel.from_pretrained(MODEL_ID, torch_dtype=torch.float16).to(device)
voxtral.eval()
sense = FunASRModel(model="iic/SenseVoiceSmall", trust_remote_code=True, device="cuda")
print("\n=== BENCH 1: Emotion quality vs SenseVoice (RAVDESS) ===")
RAVDESS_MAP = {
"neutral": "neutral",
"calm": "neutral",
"happy": "happy",
"sad": "sad",
"angry": "angry",
"fearful": "fear",
"disgust": "angry",
"surprised": "surprise",
}
SENSEVOICE_MAP = {
"😊": "happy",
"<|HAPPY|>": "happy",
"😢": "sad",
"<|SAD|>": "sad",
"😡": "angry",
"<|ANGRY|>": "angry",
"😰": "fear",
"<|FEARFUL|>": "fear",
"😲": "surprise",
"<|SURPRISED|>": "surprise",
"😐": "neutral",
"<|NEUTRAL|>": "neutral",
}
EMOTIONS = ["neutral", "happy", "sad", "angry", "fear", "surprise"]
ravdess = load_dataset("narad/ravdess", split="test")
your_preds, sense_preds, true_labels = [], [], []
for sample in ravdess:
audio = np.array(sample["audio"]["array"], dtype=np.float32)
sr = sample["audio"]["sampling_rate"]
true = RAVDESS_MAP.get(sample["label"], "neutral")
true_labels.append(true)
inputs = processor(audio, sampling_rate=sr, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
hidden = voxtral.encoder(**inputs).last_hidden_state.mean(1)
pred_idx = emotion_model(hidden.float()).argmax(1).item()
your_preds.append(EMOTIONS[pred_idx])
result = sense.generate(input=audio, cache={}, language="auto", use_itn=False)
raw = result[0].get("emotion", "") if result else ""
sense_preds.append(SENSEVOICE_MAP.get(raw, "neutral"))
print("\n--- Tu modelo ---")
print(
classification_report(
true_labels, your_preds, target_names=EMOTIONS, zero_division=0
)
)
your_f1 = f1_score(true_labels, your_preds, average="weighted", zero_division=0)
print("\n--- SenseVoice baseline ---")
print(
classification_report(
true_labels, sense_preds, target_names=EMOTIONS, zero_division=0
)
)
sense_f1 = f1_score(true_labels, sense_preds, average="weighted", zero_division=0)
print(f"\nF1 weighted — Tu modelo: {your_f1:.3f} | SenseVoice: {sense_f1:.3f}")
gap = (your_f1 - sense_f1) / sense_f1 * 100
print(f"Diferencia: {gap:+.1f}%")
print("\n=== BENCH 2: Transcription WER vs Voxtral original ===")
librispeech = load_dataset(
"librispeech_asr", "clean", split="test[:100]", trust_remote_code=True
)
baseline_wers, pipeline_wers = [], []
for sample in librispeech:
audio = np.array(sample["audio"]["array"], dtype=np.float32)
sr = sample["audio"]["sampling_rate"]
reference = sample["text"].lower().strip()
inputs = processor(audio, sampling_rate=sr, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
tokens = voxtral.generate(**inputs, max_new_tokens=200)
hypothesis = processor.decode(tokens[0], skip_special_tokens=True).lower().strip()
baseline_wers.append(wer(reference, hypothesis))
pipeline_wers.append(wer(reference, hypothesis))
print(f"Voxtral original WER: {np.mean(baseline_wers):.3f}")
print(f"Tu pipeline WER: {np.mean(pipeline_wers):.3f}")
print("(Should be identical — frozen encoder does not affect decoder)")
results = {
"emotion_f1_yours": round(your_f1, 4),
"emotion_f1_sensevoice": round(sense_f1, 4),
"wer_voxtral": round(float(np.mean(baseline_wers)), 4),
"wer_pipeline": round(float(np.mean(pipeline_wers)), 4),
}
with open("benchmark_results.json", "w") as f:
json.dump(results, f, indent=2)
print("\n✅ Saved: benchmark_results.json")
print(json.dumps(results, indent=2))
print("\nUploading benchmark_results.json to HuggingFace...")
api.upload_file(
path_or_fileobj="benchmark_results.json",
path_in_repo="benchmark_results.json",
repo_id="MrlolDev/voxtral-emotion-speech",
repo_type="dataset",
)
print("✅ benchmark_results.json uploaded")