vipbench / code /extract_ssl_layers.py
sendfuze's picture
Add files using upload-large-folder tool
6351b36 verified
#!/usr/bin/env python3
"""Extract per-layer embeddings from SSL speech models.
For wav2vec2, HuBERT, WavLM, XLS-R, and Whisper, the "last hidden state" used
in the main extraction is known to underperform for speaker tasks relative to
intermediate layers (see SUPERB benchmark). This script saves the mean-pooled
embedding from EVERY transformer layer, enabling layer-wise analysis.
Output format: for each audio file, a numpy array of shape (n_layers, dim).
Usage:
python3 extract_ssl_layers.py --model wav2vec2 --device cpu
python3 extract_ssl_layers.py --model hubert --device cpu
python3 extract_ssl_layers.py --model wavlm --device cpu
python3 extract_ssl_layers.py --model xlsr --device cpu
python3 extract_ssl_layers.py --model whisper --device cpu
"""
import argparse
import numpy as np
import torch
from extraction_utils import load_audio, extract_all
HF_CONFIGS = {
'wav2vec2': {
'model_id': 'facebook/wav2vec2-base',
'model_cls': 'Wav2Vec2Model',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'hubert': {
'model_id': 'facebook/hubert-base-ls960',
'model_cls': 'HubertModel',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'wavlm': {
'model_id': 'microsoft/wavlm-base-plus',
'model_cls': 'WavLMModel',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'xlsr': {
'model_id': 'facebook/wav2vec2-xls-r-300m',
'model_cls': 'Wav2Vec2Model',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
}
def build_hf_model_fn(model_name, device):
cfg = HF_CONFIGS[model_name]
import transformers
ModelCls = getattr(transformers, cfg['model_cls'])
FeCls = getattr(transformers, cfg['fe_cls'])
print(f"Loading {model_name} ({cfg['model_id']}) on {device}...")
feature_extractor = FeCls.from_pretrained(cfg['model_id'])
model = ModelCls.from_pretrained(cfg['model_id']).to(device)
model.eval()
def model_fn(audio_path):
audio = load_audio(audio_path, target_sr=16000)
inputs = feature_extractor(audio, sampling_rate=16000, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
# outputs.hidden_states: tuple of (n_layers+1) tensors, each (1, T, D)
# Index 0 is the CNN feature projection output; indices 1..n are transformer layers
all_layers = torch.stack(outputs.hidden_states, dim=0) # (L+1, 1, T, D)
# Mean pool over time for each layer
pooled = all_layers.mean(dim=2).squeeze(1) # (L+1, D)
return pooled.cpu().numpy()
return model_fn
def build_whisper_model_fn(device, size='base'):
import whisper
print(f"Loading Whisper {size} encoder on {device}...")
model = whisper.load_model(size, device=device)
def model_fn(audio_path):
audio = whisper.load_audio(str(audio_path))
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(device)
# Whisper's encoder returns only the final output by default.
# To get per-layer outputs, register a forward hook.
layer_outputs = []
def hook(module, input_, output):
# output is typically a tensor (B, T, D) or tuple
out = output[0] if isinstance(output, tuple) else output
layer_outputs.append(out.mean(dim=1).squeeze().cpu().numpy()) # (D,)
handles = []
for block in model.encoder.blocks:
handles.append(block.register_forward_hook(hook))
with torch.no_grad():
_ = model.encoder(mel.unsqueeze(0))
for h in handles:
h.remove()
# Also include final layer norm output
with torch.no_grad():
final = model.encoder(mel.unsqueeze(0)).mean(dim=1).squeeze().cpu().numpy()
layer_outputs.append(final)
return np.stack(layer_outputs, axis=0) # (L, D)
return model_fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
choices=['wav2vec2', 'hubert', 'wavlm', 'xlsr', 'whisper'])
parser.add_argument("--device", default="cpu")
parser.add_argument("--base-dir", default=None)
parser.add_argument("--output-dir", default=None)
args = parser.parse_args()
if args.model == 'whisper':
model_fn = build_whisper_model_fn(args.device)
else:
model_fn = build_hf_model_fn(args.model, args.device)
extract_all(model_fn, f"{args.model}_layers", args.base_dir, args.output_dir)
if __name__ == "__main__":
main()