MrlolDev commited on
Commit
f1bcc67
·
verified ·
1 Parent(s): dc5fb52

Upload benchmark.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. benchmark.py +165 -0
benchmark.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import pickle
4
+ from datasets import load_dataset
5
+ from transformers import AutoProcessor, AutoModel
6
+ from sklearn.metrics import f1_score, classification_report
7
+ from jiwer import wer
8
+ from funasr import AutoModel as FunASRModel
9
+ from huggingface_hub import HfApi
10
+ import json
11
+
12
+ api = HfApi()
13
+
14
+ device = torch.device("cuda")
15
+
16
+
17
+ class EmotionHead(nn.Module):
18
+ def __init__(self, input_dim=1280, hidden=512, num_classes=6, dropout=0.3):
19
+ super().__init__()
20
+ self.net = nn.Sequential(
21
+ nn.Linear(input_dim, hidden),
22
+ nn.BatchNorm1d(hidden),
23
+ nn.ReLU(),
24
+ nn.Dropout(dropout),
25
+ nn.Linear(hidden, hidden // 2),
26
+ nn.BatchNorm1d(hidden // 2),
27
+ nn.ReLU(),
28
+ nn.Dropout(dropout),
29
+ nn.Linear(hidden // 2, num_classes),
30
+ )
31
+
32
+ def forward(self, x):
33
+ return self.net(x)
34
+
35
+
36
+ print("Cargando modelos para benchmark...")
37
+ emotion_model = EmotionHead().to(device)
38
+ emotion_model.load_state_dict(torch.load("emotion_head_best.pt"))
39
+ emotion_model.eval()
40
+
41
+ MODEL_ID = "mistralai/Voxtral-Mini-4B-Realtime-2602"
42
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
43
+ voxtral = AutoModel.from_pretrained(MODEL_ID, torch_dtype=torch.float16).to(device)
44
+ voxtral.eval()
45
+
46
+ sense = FunASRModel(model="iic/SenseVoiceSmall", trust_remote_code=True, device="cuda")
47
+
48
+ print("\n=== BENCH 1: Emotion quality vs SenseVoice (RAVDESS) ===")
49
+
50
+ RAVDESS_MAP = {
51
+ "neutral": "neutral",
52
+ "calm": "neutral",
53
+ "happy": "happy",
54
+ "sad": "sad",
55
+ "angry": "angry",
56
+ "fearful": "fear",
57
+ "disgust": "angry",
58
+ "surprised": "surprise",
59
+ }
60
+ SENSEVOICE_MAP = {
61
+ "😊": "happy",
62
+ "<|HAPPY|>": "happy",
63
+ "😢": "sad",
64
+ "<|SAD|>": "sad",
65
+ "😡": "angry",
66
+ "<|ANGRY|>": "angry",
67
+ "😰": "fear",
68
+ "<|FEARFUL|>": "fear",
69
+ "😲": "surprise",
70
+ "<|SURPRISED|>": "surprise",
71
+ "😐": "neutral",
72
+ "<|NEUTRAL|>": "neutral",
73
+ }
74
+
75
+ EMOTIONS = ["neutral", "happy", "sad", "angry", "fear", "surprise"]
76
+
77
+ ravdess = load_dataset("narad/ravdess", split="test")
78
+
79
+ your_preds, sense_preds, true_labels = [], [], []
80
+
81
+ for sample in ravdess:
82
+ audio = np.array(sample["audio"]["array"], dtype=np.float32)
83
+ sr = sample["audio"]["sampling_rate"]
84
+ true = RAVDESS_MAP.get(sample["label"], "neutral")
85
+ true_labels.append(true)
86
+
87
+ inputs = processor(audio, sampling_rate=sr, return_tensors="pt")
88
+ inputs = {k: v.to(device) for k, v in inputs.items()}
89
+ with torch.no_grad():
90
+ hidden = voxtral.encoder(**inputs).last_hidden_state.mean(1)
91
+ pred_idx = emotion_model(hidden.float()).argmax(1).item()
92
+ your_preds.append(EMOTIONS[pred_idx])
93
+
94
+ result = sense.generate(input=audio, cache={}, language="auto", use_itn=False)
95
+ raw = result[0].get("emotion", "") if result else ""
96
+ sense_preds.append(SENSEVOICE_MAP.get(raw, "neutral"))
97
+
98
+ print("\n--- Tu modelo ---")
99
+ print(
100
+ classification_report(
101
+ true_labels, your_preds, target_names=EMOTIONS, zero_division=0
102
+ )
103
+ )
104
+ your_f1 = f1_score(true_labels, your_preds, average="weighted", zero_division=0)
105
+
106
+ print("\n--- SenseVoice baseline ---")
107
+ print(
108
+ classification_report(
109
+ true_labels, sense_preds, target_names=EMOTIONS, zero_division=0
110
+ )
111
+ )
112
+ sense_f1 = f1_score(true_labels, sense_preds, average="weighted", zero_division=0)
113
+
114
+ print(f"\nF1 weighted — Tu modelo: {your_f1:.3f} | SenseVoice: {sense_f1:.3f}")
115
+ gap = (your_f1 - sense_f1) / sense_f1 * 100
116
+ print(f"Diferencia: {gap:+.1f}%")
117
+
118
+ print("\n=== BENCH 2: Transcription WER vs Voxtral original ===")
119
+
120
+ librispeech = load_dataset(
121
+ "librispeech_asr", "clean", split="test[:100]", trust_remote_code=True
122
+ )
123
+
124
+ baseline_wers, pipeline_wers = [], []
125
+
126
+ for sample in librispeech:
127
+ audio = np.array(sample["audio"]["array"], dtype=np.float32)
128
+ sr = sample["audio"]["sampling_rate"]
129
+ reference = sample["text"].lower().strip()
130
+
131
+ inputs = processor(audio, sampling_rate=sr, return_tensors="pt")
132
+ inputs = {k: v.to(device) for k, v in inputs.items()}
133
+
134
+ with torch.no_grad():
135
+ tokens = voxtral.generate(**inputs, max_new_tokens=200)
136
+
137
+ hypothesis = processor.decode(tokens[0], skip_special_tokens=True).lower().strip()
138
+
139
+ baseline_wers.append(wer(reference, hypothesis))
140
+ pipeline_wers.append(wer(reference, hypothesis))
141
+
142
+ print(f"Voxtral original WER: {np.mean(baseline_wers):.3f}")
143
+ print(f"Tu pipeline WER: {np.mean(pipeline_wers):.3f}")
144
+ print("(Should be identical — frozen encoder does not affect decoder)")
145
+
146
+ results = {
147
+ "emotion_f1_yours": round(your_f1, 4),
148
+ "emotion_f1_sensevoice": round(sense_f1, 4),
149
+ "wer_voxtral": round(float(np.mean(baseline_wers)), 4),
150
+ "wer_pipeline": round(float(np.mean(pipeline_wers)), 4),
151
+ }
152
+ with open("benchmark_results.json", "w") as f:
153
+ json.dump(results, f, indent=2)
154
+
155
+ print("\n✅ Saved: benchmark_results.json")
156
+ print(json.dumps(results, indent=2))
157
+
158
+ print("\nUploading benchmark_results.json to HuggingFace...")
159
+ api.upload_file(
160
+ path_or_fileobj="benchmark_results.json",
161
+ path_in_repo="benchmark_results.json",
162
+ repo_id="MrlolDev/voxtral-emotion-speech",
163
+ repo_type="dataset",
164
+ )
165
+ print("✅ benchmark_results.json uploaded")