lamooon commited on
Commit
0822c28
·
verified ·
1 Parent(s): ff74bc4

Upload scripts/evaluate_comparison.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/evaluate_comparison.py +683 -0
scripts/evaluate_comparison.py ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.utils.data import Dataset, DataLoader
8
+ import h5py
9
+ import librosa
10
+ import pretty_midi
11
+ import soundfile as sf
12
+ import torchaudio
13
+ from tqdm import tqdm
14
+ from sklearn.metrics import f1_score, precision_score, recall_score
15
+ from transformers import WavLMModel, Wav2Vec2Model
16
+ import math
17
+ import logging
18
+
19
+ # Set up logging
20
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
21
+
22
+ # Force soundfile backend
23
+ try:
24
+ torchaudio.set_audio_backend("soundfile")
25
+ except:
26
+ pass
27
+
28
+ # ============================================================
29
+ # UTILS & PREPROCESSING
30
+ # ============================================================
31
+
32
+ def compute_onset_labels(frame_labels, threshold=0.5):
33
+ """
34
+ Compute onset labels from frame labels (from drum_train_sota.py).
35
+ Onset = frame is active AND previous frame was inactive.
36
+ """
37
+ active = (frame_labels > threshold).float()
38
+ prev_active = F.pad(active[:, :-1], (0, 0, 1, 0), value=0)
39
+ onsets = active * (1 - prev_active)
40
+ return onsets
41
+
42
+
43
+ def compute_mel_spectrogram(waveform, sr=16000, n_mels=64, hop_length=320, n_fft=1024):
44
+ """Compute Mel Spectrogram matching CNNSA training params."""
45
+ if isinstance(waveform, torch.Tensor):
46
+ waveform = waveform.numpy()
47
+
48
+ if waveform.ndim > 1:
49
+ waveform = waveform.squeeze()
50
+
51
+ mel = librosa.feature.melspectrogram(
52
+ y=waveform,
53
+ sr=sr,
54
+ n_fft=n_fft,
55
+ hop_length=hop_length,
56
+ n_mels=n_mels
57
+ )
58
+ mel = librosa.power_to_db(mel, ref=np.max)
59
+ return torch.tensor(mel, dtype=torch.float32)
60
+
61
+
62
+ def compute_hcqt(waveform, sr=22050, hop_length=512, harmonics=[1, 2, 3]):
63
+ """Compute HCQT (from bass_train_sota.py)"""
64
+ if isinstance(waveform, torch.Tensor):
65
+ y = waveform.squeeze().cpu().numpy()
66
+ else:
67
+ y = waveform
68
+
69
+ fmin = librosa.note_to_hz("E1")
70
+ bins_per_octave = 12
71
+ n_octaves = 6
72
+ n_bins = n_octaves * bins_per_octave
73
+
74
+ hcqt = []
75
+ for h in harmonics:
76
+ cqt = librosa.cqt(
77
+ y=y,
78
+ sr=sr,
79
+ hop_length=hop_length,
80
+ fmin=fmin * h,
81
+ n_bins=n_bins,
82
+ bins_per_octave=bins_per_octave
83
+ )
84
+ hcqt.append(np.abs(cqt))
85
+
86
+ hcqt = np.log(np.stack(hcqt) + 1e-9)
87
+ return torch.from_numpy(hcqt).float().permute(0, 2, 1) # [H, T, F]
88
+
89
+
90
+ # ============================================================
91
+ # METRICS
92
+ # ============================================================
93
+
94
+ def calculate_metrics(pred_logits, target_labels, threshold=0.5):
95
+ """
96
+ Calculate Frame F1, Onset F1, Precision, Recall.
97
+ """
98
+ preds = (torch.sigmoid(pred_logits) > threshold).float()
99
+
100
+ # Flatten
101
+ preds_flat = preds.cpu().numpy().flatten()
102
+ targets_flat = target_labels.cpu().numpy().flatten()
103
+
104
+ # Frame metrics
105
+ frame_f1 = f1_score(targets_flat, preds_flat, zero_division=0)
106
+ frame_precision = precision_score(targets_flat, preds_flat, zero_division=0)
107
+ frame_recall = recall_score(targets_flat, preds_flat, zero_division=0)
108
+
109
+ # Onset metrics
110
+ pred_onsets = compute_onset_labels(preds).cpu().numpy().flatten()
111
+ target_onsets = compute_onset_labels(target_labels).cpu().numpy().flatten()
112
+
113
+ onset_f1 = f1_score(target_onsets, pred_onsets, zero_division=0)
114
+ onset_precision = precision_score(target_onsets, pred_onsets, zero_division=0)
115
+ onset_recall = recall_score(target_onsets, pred_onsets, zero_division=0)
116
+
117
+ return {
118
+ 'frame_f1': frame_f1,
119
+ 'frame_precision': frame_precision,
120
+ 'frame_recall': frame_recall,
121
+ 'onset_f1': onset_f1,
122
+ 'onset_precision': onset_precision,
123
+ 'onset_recall': onset_recall
124
+ }
125
+
126
+
127
+ # ============================================================
128
+ # DATASETS
129
+ # ============================================================
130
+
131
+ class DrumEvalDataset(Dataset):
132
+ def __init__(self, h5_path):
133
+ self.h5_path = h5_path
134
+ with h5py.File(h5_path, "r") as f:
135
+ self.length = f["audio"].shape[0]
136
+ logging.info(f"Drum dataset: {self.length} samples")
137
+
138
+ def __len__(self):
139
+ return self.length
140
+
141
+ def __getitem__(self, idx):
142
+ with h5py.File(self.h5_path, "r") as f:
143
+ audio = torch.from_numpy(f["audio"][idx]).float()
144
+ labels = torch.from_numpy(f["labels"][idx]).float()
145
+
146
+ # SOTA input (raw audio)
147
+ sota_input = audio
148
+
149
+ # Comparison input (Mel Spectrogram)
150
+ # Match CNNSA training: hop=256 for ~62.5 Hz frame rate
151
+ comp_input = compute_mel_spectrogram(audio, sr=16000, n_mels=64, hop_length=256)
152
+
153
+ return {
154
+ "sota_input": sota_input,
155
+ "comp_input": comp_input,
156
+ "labels": labels
157
+ }
158
+
159
+
160
+ class BassEvalDataset(Dataset):
161
+ def __init__(self, audio_dir, midi_dir):
162
+ import glob
163
+ self.pairs = []
164
+ for af in sorted(glob.glob(os.path.join(audio_dir, "*.flac"))):
165
+ base = os.path.splitext(os.path.basename(af))[0]
166
+ if base.startswith('._'): # Skip macOS metadata
167
+ continue
168
+ mf = os.path.join(midi_dir, base + ".mid")
169
+ if not os.path.exists(mf):
170
+ mf = os.path.join(midi_dir, base + ".midi")
171
+ if os.path.exists(mf):
172
+ self.pairs.append((af, mf))
173
+
174
+ logging.info(f"Bass dataset: {len(self.pairs)} pairs")
175
+
176
+ def __len__(self):
177
+ return len(self.pairs)
178
+
179
+ def __getitem__(self, idx):
180
+ audio_path, midi_path = self.pairs[idx]
181
+
182
+ try:
183
+ audio_data, sr = sf.read(audio_path)
184
+ waveform = torch.from_numpy(audio_data).float()
185
+ except Exception as e:
186
+ logging.error(f"Failed to read {audio_path}: {e}")
187
+ return self.__getitem__((idx + 1) % len(self))
188
+
189
+ # Ensure [C, T] shape
190
+ if waveform.ndim == 1:
191
+ waveform = waveform.unsqueeze(0)
192
+ else:
193
+ waveform = waveform.t()
194
+
195
+ # Resample to 16kHz for SOTA
196
+ if sr != 16000:
197
+ waveform = torchaudio.functional.resample(waveform, sr, 16000)
198
+
199
+ if waveform.shape[0] > 1:
200
+ waveform = waveform.mean(dim=0, keepdim=True)
201
+
202
+ # HCQT for SOTA (needs 22050 Hz)
203
+ wav_22k = torchaudio.functional.resample(waveform, 16000, 22050)
204
+ hcqt = compute_hcqt(wav_22k)
205
+
206
+ # Mel for comparison (using 22050 Hz like training)
207
+ mel_spec = torchaudio.transforms.MelSpectrogram(
208
+ sample_rate=22050,
209
+ n_fft=2048,
210
+ hop_length=512,
211
+ n_mels=88,
212
+ f_min=27.5,
213
+ f_max=1000.0,
214
+ normalized=True
215
+ )(wav_22k).squeeze(0)
216
+ mel_spec = torch.log(mel_spec + 1e-9).transpose(0, 1) # [Time, Mels]
217
+
218
+ # Labels at original sample rate frame timing
219
+ fps = sr / 512
220
+ pm = pretty_midi.PrettyMIDI(midi_path)
221
+
222
+ # Use HCQT length as reference
223
+ n_frames = hcqt.shape[1]
224
+
225
+ labels_full = np.zeros((n_frames, 88), dtype=np.float32)
226
+
227
+ for inst in pm.instruments:
228
+ for note in inst.notes:
229
+ start = int(note.start * fps)
230
+ end = int(note.end * fps)
231
+ pitch = note.pitch - 21
232
+ if 0 <= pitch < 88 and start < n_frames:
233
+ end = min(end, n_frames)
234
+ labels_full[start:end, pitch] = 1.0
235
+
236
+ labels_full = torch.from_numpy(labels_full).float()
237
+
238
+ # Bass range labels (MIDI 28-67)
239
+ labels_sota = labels_full[:, 7:47] # 40 bins
240
+
241
+ return {
242
+ "sota_input_wav": waveform.squeeze(),
243
+ "sota_input_hcqt": hcqt,
244
+ "comp_input_mel": mel_spec,
245
+ "labels_full": labels_full,
246
+ "labels_sota": labels_sota
247
+ }
248
+
249
+
250
+ # ============================================================
251
+ # MODELS (same as before but with fixes)
252
+ # ============================================================
253
+
254
+ class PositionalEncoding(nn.Module):
255
+ def __init__(self, d_model, max_len=5000):
256
+ super().__init__()
257
+ pe = torch.zeros(max_len, d_model)
258
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
259
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
260
+ pe[:, 0::2] = torch.sin(position * div_term)
261
+ pe[:, 1::2] = torch.cos(position * div_term)
262
+ self.register_buffer('pe', pe)
263
+
264
+ def forward(self, x):
265
+ return x + self.pe[:x.size(0), :].unsqueeze(1)
266
+
267
+
268
+ class CNNSA(nn.Module):
269
+ def __init__(self, input_freq_bins=64, num_classes=9, d_model=512, nhead=8, num_layers=3):
270
+ super().__init__()
271
+ self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
272
+ self.bn1 = nn.BatchNorm2d(32)
273
+ self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
274
+ self.bn2 = nn.BatchNorm2d(64)
275
+ self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
276
+ self.bn3 = nn.BatchNorm2d(128)
277
+ self.conv4 = nn.Conv2d(128, d_model, kernel_size=3, padding=1)
278
+ self.bn4 = nn.BatchNorm2d(d_model)
279
+ self.pool_sq = nn.MaxPool2d(2, 2)
280
+ self.pool_freq = nn.MaxPool2d((2, 1))
281
+
282
+ self.cnn_flatten_dim = d_model * 4
283
+
284
+ self.projection = nn.Linear(self.cnn_flatten_dim, d_model)
285
+ self.pos_encoder = PositionalEncoding(d_model)
286
+ encoder_layers = nn.TransformerEncoderLayer(
287
+ d_model=d_model, nhead=nhead, dim_feedforward=1024, dropout=0.2, batch_first=True
288
+ )
289
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=num_layers)
290
+ self.fc1 = nn.Linear(d_model, 256)
291
+ self.fc2 = nn.Linear(256, num_classes)
292
+ self.dropout = nn.Dropout(0.3)
293
+
294
+ def forward(self, x):
295
+ if x.dim() == 3:
296
+ x = x.unsqueeze(1)
297
+
298
+ x = self.pool_sq(F.relu(self.bn1(self.conv1(x))))
299
+ x = self.pool_sq(F.relu(self.bn2(self.conv2(x))))
300
+ x = self.pool_freq(F.relu(self.bn3(self.conv3(x))))
301
+ x = self.pool_freq(F.relu(self.bn4(self.conv4(x))))
302
+
303
+ b, c, f, t = x.shape
304
+ x = x.permute(0, 3, 1, 2).contiguous().view(b, t, c * f)
305
+ x = self.projection(x)
306
+ x = self.pos_encoder(x.transpose(0, 1)).transpose(0, 1)
307
+ x = self.transformer_encoder(x)
308
+ x = F.relu(self.fc1(x))
309
+ x = self.dropout(x)
310
+ return self.fc2(x) # Return logits, not sigmoid
311
+
312
+
313
+ class DrumSOTAModel(nn.Module):
314
+ def __init__(self, num_classes=9, unfreeze_layers=4):
315
+ super().__init__()
316
+ try:
317
+ self.wavlm = WavLMModel.from_pretrained("microsoft/wavlm-base", use_safetensors=True)
318
+ except:
319
+ self.wavlm = WavLMModel.from_pretrained("microsoft/wavlm-base")
320
+
321
+ hidden = self.wavlm.config.hidden_size
322
+ self.frame_head = nn.Sequential(
323
+ nn.Linear(hidden, hidden // 2),
324
+ nn.LayerNorm(hidden // 2),
325
+ nn.GELU(),
326
+ nn.Dropout(0.1),
327
+ nn.Linear(hidden // 2, num_classes)
328
+ )
329
+ self.onset_head = nn.Sequential(
330
+ nn.Linear(hidden, hidden // 4),
331
+ nn.LayerNorm(hidden // 4),
332
+ nn.GELU(),
333
+ nn.Dropout(0.2),
334
+ nn.Linear(hidden // 4, num_classes)
335
+ )
336
+
337
+ def forward(self, audio):
338
+ out = self.wavlm(audio).last_hidden_state
339
+ return self.frame_head(out), self.onset_head(out)
340
+
341
+
342
+ # [Include all other model classes from your original script: ConformerBlock, Conformer, etc.]
343
+ # For brevity, I'm showing the key ones. Copy the rest from your script.
344
+
345
+ class ConformerBlock(nn.Module):
346
+ def __init__(self, d_model=512, nhead=8, conv_kernel=31, dropout=0.1):
347
+ super().__init__()
348
+ self.ffn1 = nn.Sequential(
349
+ nn.LayerNorm(d_model), nn.Linear(d_model, d_model * 4), nn.SiLU(), nn.Dropout(dropout),
350
+ nn.Linear(d_model * 4, d_model), nn.Dropout(dropout)
351
+ )
352
+ self.norm_attn = nn.LayerNorm(d_model)
353
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
354
+ self.dropout_attn = nn.Dropout(dropout)
355
+ self.norm_conv = nn.LayerNorm(d_model)
356
+ self.pointwise_conv1 = nn.Conv1d(d_model, d_model * 2, 1)
357
+ self.depthwise_conv = nn.Conv1d(d_model, d_model, conv_kernel, padding=conv_kernel//2, groups=d_model)
358
+ self.batch_norm = nn.BatchNorm1d(d_model)
359
+ self.activation = nn.SiLU()
360
+ self.pointwise_conv2 = nn.Conv1d(d_model, d_model, 1)
361
+ self.dropout_conv = nn.Dropout(dropout)
362
+ self.ffn2 = nn.Sequential(
363
+ nn.LayerNorm(d_model), nn.Linear(d_model, d_model * 4), nn.SiLU(), nn.Dropout(dropout),
364
+ nn.Linear(d_model * 4, d_model), nn.Dropout(dropout)
365
+ )
366
+ self.norm_final = nn.LayerNorm(d_model)
367
+
368
+ def forward(self, x):
369
+ x = x + 0.5 * self.ffn1(x)
370
+ residual = x
371
+ x = self.norm_attn(x)
372
+ x, _ = self.self_attn(x, x, x)
373
+ x = residual + self.dropout_attn(x)
374
+ residual = x
375
+ x = self.norm_conv(x).transpose(1, 2)
376
+ x = F.glu(self.pointwise_conv1(x), dim=1)
377
+ x = self.activation(self.batch_norm(self.depthwise_conv(x)))
378
+ x = self.dropout_conv(self.pointwise_conv2(x)).transpose(1, 2)
379
+ x = residual + x
380
+ x = x + 0.5 * self.ffn2(x)
381
+ return self.norm_final(x)
382
+
383
+
384
+ class Conformer(nn.Module):
385
+ def __init__(self, d_model=512, nhead=8, conv_kernel=31, num_layers=2):
386
+ super().__init__()
387
+ self.layers = nn.ModuleList([ConformerBlock(d_model, nhead, conv_kernel) for _ in range(num_layers)])
388
+
389
+ def forward(self, x):
390
+ for layer in self.layers:
391
+ x = layer(x)
392
+ return x
393
+
394
+
395
+ class SimpleHarmonicAttention(nn.Module):
396
+ def __init__(self, n_bins=72, n_harmonics=3):
397
+ super().__init__()
398
+ self.attention = nn.MultiheadAttention(n_bins, 4, batch_first=True, dropout=0.1)
399
+
400
+ def forward(self, hcqt):
401
+ B, H, T, F = hcqt.shape
402
+ x = hcqt.permute(0, 2, 1, 3).reshape(B * T, H, F)
403
+ x, _ = self.attention(x, x, x)
404
+ return x.reshape(B, T, H, F).permute(0, 2, 1, 3)
405
+
406
+
407
+ class SpectralCNN(nn.Module):
408
+ def __init__(self, in_channels=3, hidden_dim=512):
409
+ super().__init__()
410
+ self.conv = nn.Sequential(
411
+ nn.Conv2d(in_channels, 64, 3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d((1, 2)),
412
+ nn.Conv2d(64, 128, 3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d((1, 2)),
413
+ nn.Conv2d(128, hidden_dim, 3, padding=1), nn.BatchNorm2d(hidden_dim), nn.ReLU()
414
+ )
415
+ self.pool = nn.AdaptiveAvgPool2d((None, 1))
416
+
417
+ def forward(self, x):
418
+ return self.pool(self.conv(x)).squeeze(-1).transpose(1, 2)
419
+
420
+
421
+ class BassSOTAModel(nn.Module):
422
+ def __init__(self, use_harmonic_branch=True, hidden_dim=768):
423
+ super().__init__()
424
+ self.use_harmonic_branch = use_harmonic_branch
425
+ self.audio_encoder = Wav2Vec2Model.from_pretrained("microsoft/wavlm-base-plus", use_safetensors=True)
426
+ for p in self.audio_encoder.parameters():
427
+ p.requires_grad = False
428
+ self.audio_proj = nn.Sequential(nn.Linear(768, hidden_dim), nn.LayerNorm(hidden_dim), nn.Dropout(0.1))
429
+
430
+ N_BINS = 72
431
+ HARMONICS = [1, 2, 3]
432
+ N_MIDI_BINS = 40
433
+
434
+ if use_harmonic_branch:
435
+ self.harmonic_attn = SimpleHarmonicAttention(N_BINS, len(HARMONICS))
436
+ self.spec_cnn = SpectralCNN(len(HARMONICS), hidden_dim)
437
+
438
+ fusion_dim = hidden_dim * (2 if use_harmonic_branch else 1)
439
+ self.fusion = nn.Sequential(nn.Linear(fusion_dim, hidden_dim), nn.LayerNorm(hidden_dim), nn.GELU(), nn.Dropout(0.1))
440
+ self.conformer = Conformer(hidden_dim, num_layers=2)
441
+ self.onset_head = nn.Sequential(
442
+ nn.Linear(hidden_dim, hidden_dim//2), nn.LayerNorm(hidden_dim//2), nn.GELU(), nn.Linear(hidden_dim//2, N_MIDI_BINS)
443
+ )
444
+ self.frame_head = nn.Sequential(
445
+ nn.Linear(hidden_dim + N_MIDI_BINS, hidden_dim//2), nn.LayerNorm(hidden_dim//2), nn.GELU(),
446
+ nn.Linear(hidden_dim//2, N_MIDI_BINS)
447
+ )
448
+
449
+ def forward(self, waveform, hcqt=None):
450
+ with torch.no_grad():
451
+ audio = self.audio_encoder(waveform).last_hidden_state
452
+ audio = self.audio_proj(audio)
453
+
454
+ if self.use_harmonic_branch and hcqt is not None:
455
+ T_target = hcqt.shape[2]
456
+ spec = self.spec_cnn(self.harmonic_attn(hcqt))
457
+ if audio.shape[1] != T_target:
458
+ audio = F.interpolate(audio.transpose(1, 2), size=T_target, mode='linear', align_corners=False).transpose(1, 2)
459
+ if spec.shape[1] != T_target:
460
+ spec = F.interpolate(spec.transpose(1, 2), size=T_target, mode='linear', align_corners=False).transpose(1, 2)
461
+ x = torch.cat([audio, spec], dim=-1)
462
+ else:
463
+ x = audio
464
+
465
+ x = self.conformer(self.fusion(x))
466
+ onset = self.onset_head(x)
467
+ frame = self.frame_head(torch.cat([x, onset], dim=-1))
468
+ return onset, frame
469
+
470
+
471
+ class BassCompModel(nn.Module):
472
+ def __init__(self, input_features=88, hidden_size=256, num_classes=88):
473
+ super().__init__()
474
+ self.cnn = nn.Sequential(
475
+ nn.Conv2d(1, 16, (3, 3), padding=1), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d((1, 2)),
476
+ nn.Conv2d(16, 32, (3, 3), padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d((1, 2))
477
+ )
478
+ self.lstm = nn.LSTM(32 * (input_features//4), hidden_size, 2, batch_first=True, bidirectional=True)
479
+ self.fc = nn.Linear(hidden_size*2, num_classes)
480
+
481
+ def forward(self, x):
482
+ x = x.unsqueeze(1)
483
+ x = self.cnn(x)
484
+ b, c, t, f = x.size()
485
+ x = x.permute(0, 2, 1, 3).reshape(b, t, -1)
486
+ x, _ = self.lstm(x)
487
+ return self.fc(x) # Return logits
488
+
489
+
490
+ # ============================================================
491
+ # MODEL LOADING
492
+ # ============================================================
493
+
494
+ def load_model_safe(weights_path, device, task):
495
+ """Robustly load a model."""
496
+ if not weights_path or not os.path.exists(weights_path):
497
+ logging.warning(f"Weights file not found: {weights_path}")
498
+ return None, None
499
+
500
+ logging.info(f"Loading weights from {weights_path}...")
501
+ try:
502
+ ckpt = torch.load(weights_path, map_location='cpu')
503
+ except Exception as e:
504
+ logging.error(f"Failed to load checkpoint: {e}")
505
+ return None, None
506
+
507
+ state_dict = ckpt
508
+ if isinstance(ckpt, dict):
509
+ if 'model' in ckpt:
510
+ state_dict = ckpt['model']
511
+ elif 'model_state_dict' in ckpt:
512
+ state_dict = ckpt['model_state_dict']
513
+
514
+ keys = list(state_dict.keys())
515
+ if not keys:
516
+ logging.error("Checkpoint is empty.")
517
+ return None, None
518
+
519
+ model = None
520
+ model_type = "Unknown"
521
+
522
+ if task == "bass":
523
+ if any(k.startswith("audio_encoder") or k.startswith("conformer") for k in keys):
524
+ logging.info("➡ Detected: BassSOTAModel")
525
+ model = BassSOTAModel().to(device)
526
+ model_type = "SOTA"
527
+ elif any(k.startswith("cnn") or k.startswith("lstm") for k in keys):
528
+ logging.info("➡ Detected: BassCompModel (CRNN)")
529
+ model = BassCompModel().to(device)
530
+ model_type = "CRNN"
531
+
532
+ elif task == "drum":
533
+ if any(k.startswith("wavlm") for k in keys):
534
+ logging.info("➡ Detected: DrumSOTAModel")
535
+ model = DrumSOTAModel().to(device)
536
+ model_type = "SOTA"
537
+ else:
538
+ logging.info("➡ Detected: CNNSA")
539
+ model = CNNSA().to(device)
540
+ model_type = "CNNSA"
541
+
542
+ if model:
543
+ try:
544
+ model.load_state_dict(state_dict, strict=True)
545
+ logging.info("✓ Loaded successfully")
546
+ except RuntimeError:
547
+ new_state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
548
+ try:
549
+ model.load_state_dict(new_state_dict, strict=True)
550
+ logging.info("✓ Loaded after key fix")
551
+ except RuntimeError:
552
+ model.load_state_dict(new_state_dict, strict=False)
553
+ logging.warning("⚠ Loaded with strict=False")
554
+
555
+ return model, model_type
556
+
557
+
558
+ # ============================================================
559
+ # EVALUATION
560
+ # ============================================================
561
+
562
+ def evaluate(args):
563
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
564
+ logging.info(f"Task: {args.task} | Device: {device}")
565
+
566
+ # Load models
567
+ models = {}
568
+
569
+ if args.sota_weights:
570
+ model_sota, type_sota = load_model_safe(args.sota_weights, device, args.task)
571
+ if model_sota:
572
+ models['SOTA'] = (model_sota, type_sota)
573
+
574
+ if args.comp_weights:
575
+ model_comp, type_comp = load_model_safe(args.comp_weights, device, args.task)
576
+ if model_comp:
577
+ models['Comparison'] = (model_comp, type_comp)
578
+
579
+ if not models:
580
+ logging.error("No models loaded. Exiting.")
581
+ return
582
+
583
+ # Load dataset
584
+ if args.task == "drum":
585
+ dataset = DrumEvalDataset(args.data_path)
586
+ elif args.task == "bass":
587
+ if not args.midi_path:
588
+ logging.error("--midi_path required for bass evaluation")
589
+ return
590
+ dataset = BassEvalDataset(args.data_path, args.midi_path)
591
+
592
+ loader = DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2)
593
+
594
+ # Metrics storage
595
+ results = {name: {
596
+ 'frame_f1': [], 'frame_precision': [], 'frame_recall': [],
597
+ 'onset_f1': [], 'onset_precision': [], 'onset_recall': []
598
+ } for name in models}
599
+
600
+ # Set to eval
601
+ for m, _ in models.values():
602
+ m.eval()
603
+
604
+ logging.info("Starting evaluation...")
605
+ with torch.no_grad():
606
+ for batch_idx, batch in enumerate(tqdm(loader, desc="Evaluating")):
607
+ if args.task == "drum":
608
+ wav = batch['sota_input'].to(device)
609
+ mel = batch['comp_input'].to(device)
610
+ y = batch['labels'].to(device)
611
+
612
+ for name, (model, mtype) in models.items():
613
+ if mtype == "SOTA":
614
+ f_pred, o_pred = model(wav)
615
+ else: # CNNSA
616
+ f_pred = model(mel)
617
+ o_pred = f_pred # Use frame for onset approximation
618
+
619
+ # Align
620
+ if f_pred.shape[1] != y.shape[1]:
621
+ f_pred = F.interpolate(f_pred.transpose(1, 2), size=y.shape[1], mode='linear').transpose(1, 2)
622
+ if o_pred.shape[1] != y.shape[1]:
623
+ o_pred = F.interpolate(o_pred.transpose(1, 2), size=y.shape[1], mode='linear').transpose(1, 2)
624
+
625
+ # Calculate metrics
626
+ metrics = calculate_metrics(f_pred, y)
627
+ for k, v in metrics.items():
628
+ results[name][k].append(v)
629
+
630
+ elif args.task == "bass":
631
+ wav = batch['sota_input_wav'].to(device)
632
+ hcqt = batch['sota_input_hcqt'].to(device)
633
+ mel = batch['comp_input_mel'].to(device)
634
+ y_full = batch['labels_full'].to(device)
635
+ y_sota = batch['labels_sota'].to(device)
636
+
637
+ for name, (model, mtype) in models.items():
638
+ if mtype == "SOTA":
639
+ o_pred, f_pred = model(wav, hcqt)
640
+ target = y_sota
641
+ elif mtype == "CRNN":
642
+ f_pred = model(mel)
643
+ o_pred = f_pred
644
+ target = y_full
645
+
646
+ # Align
647
+ if f_pred.shape[1] != target.shape[1]:
648
+ f_pred = F.interpolate(f_pred.transpose(1, 2), size=target.shape[1], mode='linear').transpose(1, 2)
649
+ if o_pred.shape[1] != target.shape[1]:
650
+ o_pred = F.interpolate(o_pred.transpose(1, 2), size=target.shape[1], mode='linear').transpose(1, 2)
651
+
652
+ metrics = calculate_metrics(f_pred, target)
653
+ for k, v in metrics.items():
654
+ results[name][k].append(v)
655
+
656
+ # Print results
657
+ print(f"\n{'='*80}")
658
+ print(f"EVALUATION RESULTS - {args.task.upper()}")
659
+ print(f"{'='*80}")
660
+ print(f"{'Model':<15} | {'Type':<8} | {'Frame F1':<10} | {'Frame P':<10} | {'Frame R':<10} | {'Onset F1':<10}")
661
+ print("-" * 80)
662
+
663
+ for name, metrics in results.items():
664
+ mtype = models[name][1]
665
+ print(f"{name:<15} | {mtype:<8} | "
666
+ f"{np.mean(metrics['frame_f1']):.4f} | "
667
+ f"{np.mean(metrics['frame_precision']):.4f} | "
668
+ f"{np.mean(metrics['frame_recall']):.4f} | "
669
+ f"{np.mean(metrics['onset_f1']):.4f}")
670
+
671
+ print(f"{'='*80}\n")
672
+
673
+
674
+ if __name__ == "__main__":
675
+ parser = argparse.ArgumentParser(description="Evaluate SOTA vs Comparison models")
676
+ parser.add_argument("--task", required=True, choices=["drum", "bass"])
677
+ parser.add_argument("--data_path", required=True, help="Path to audio dir (bass) or H5 file (drum)")
678
+ parser.add_argument("--midi_path", help="MIDI directory (bass only)")
679
+ parser.add_argument("--sota_weights", required=True, help="SOTA model weights")
680
+ parser.add_argument("--comp_weights", required=True, help="Comparison model weights")
681
+
682
+ args = parser.parse_args()
683
+ evaluate(args)