File size: 4,666 Bytes
6351b36 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | #!/usr/bin/env python3
"""Extract per-layer embeddings from SSL speech models.
For wav2vec2, HuBERT, WavLM, XLS-R, and Whisper, the "last hidden state" used
in the main extraction is known to underperform for speaker tasks relative to
intermediate layers (see SUPERB benchmark). This script saves the mean-pooled
embedding from EVERY transformer layer, enabling layer-wise analysis.
Output format: for each audio file, a numpy array of shape (n_layers, dim).
Usage:
python3 extract_ssl_layers.py --model wav2vec2 --device cpu
python3 extract_ssl_layers.py --model hubert --device cpu
python3 extract_ssl_layers.py --model wavlm --device cpu
python3 extract_ssl_layers.py --model xlsr --device cpu
python3 extract_ssl_layers.py --model whisper --device cpu
"""
import argparse
import numpy as np
import torch
from extraction_utils import load_audio, extract_all
HF_CONFIGS = {
'wav2vec2': {
'model_id': 'facebook/wav2vec2-base',
'model_cls': 'Wav2Vec2Model',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'hubert': {
'model_id': 'facebook/hubert-base-ls960',
'model_cls': 'HubertModel',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'wavlm': {
'model_id': 'microsoft/wavlm-base-plus',
'model_cls': 'WavLMModel',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
'xlsr': {
'model_id': 'facebook/wav2vec2-xls-r-300m',
'model_cls': 'Wav2Vec2Model',
'fe_cls': 'Wav2Vec2FeatureExtractor',
},
}
def build_hf_model_fn(model_name, device):
cfg = HF_CONFIGS[model_name]
import transformers
ModelCls = getattr(transformers, cfg['model_cls'])
FeCls = getattr(transformers, cfg['fe_cls'])
print(f"Loading {model_name} ({cfg['model_id']}) on {device}...")
feature_extractor = FeCls.from_pretrained(cfg['model_id'])
model = ModelCls.from_pretrained(cfg['model_id']).to(device)
model.eval()
def model_fn(audio_path):
audio = load_audio(audio_path, target_sr=16000)
inputs = feature_extractor(audio, sampling_rate=16000, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
# outputs.hidden_states: tuple of (n_layers+1) tensors, each (1, T, D)
# Index 0 is the CNN feature projection output; indices 1..n are transformer layers
all_layers = torch.stack(outputs.hidden_states, dim=0) # (L+1, 1, T, D)
# Mean pool over time for each layer
pooled = all_layers.mean(dim=2).squeeze(1) # (L+1, D)
return pooled.cpu().numpy()
return model_fn
def build_whisper_model_fn(device, size='base'):
import whisper
print(f"Loading Whisper {size} encoder on {device}...")
model = whisper.load_model(size, device=device)
def model_fn(audio_path):
audio = whisper.load_audio(str(audio_path))
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(device)
# Whisper's encoder returns only the final output by default.
# To get per-layer outputs, register a forward hook.
layer_outputs = []
def hook(module, input_, output):
# output is typically a tensor (B, T, D) or tuple
out = output[0] if isinstance(output, tuple) else output
layer_outputs.append(out.mean(dim=1).squeeze().cpu().numpy()) # (D,)
handles = []
for block in model.encoder.blocks:
handles.append(block.register_forward_hook(hook))
with torch.no_grad():
_ = model.encoder(mel.unsqueeze(0))
for h in handles:
h.remove()
# Also include final layer norm output
with torch.no_grad():
final = model.encoder(mel.unsqueeze(0)).mean(dim=1).squeeze().cpu().numpy()
layer_outputs.append(final)
return np.stack(layer_outputs, axis=0) # (L, D)
return model_fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
choices=['wav2vec2', 'hubert', 'wavlm', 'xlsr', 'whisper'])
parser.add_argument("--device", default="cpu")
parser.add_argument("--base-dir", default=None)
parser.add_argument("--output-dir", default=None)
args = parser.parse_args()
if args.model == 'whisper':
model_fn = build_whisper_model_fn(args.device)
else:
model_fn = build_hf_model_fn(args.model, args.device)
extract_all(model_fn, f"{args.model}_layers", args.base_dir, args.output_dir)
if __name__ == "__main__":
main()
|