| |
| """Extract wav2vec 2.0 embeddings using HuggingFace Transformers. |
| |
| Model: facebook/wav2vec2-base (self-supervised ONLY, 768-dim) |
| - Pre-trained with contrastive loss on LibriSpeech 960h (Baevski et al., NeurIPS 2020) |
| - NOT the ASR-fine-tuned version (wav2vec2-base-960h has CTC fine-tuning) |
| Frame-level output is mean-pooled to get utterance-level embeddings. |
| Install: pip install transformers torchaudio |
| """ |
|
|
| import argparse |
| import torch |
| import numpy as np |
| from extraction_utils import load_audio, extract_all |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") |
| parser.add_argument("--base-dir", default=None) |
| parser.add_argument("--output-dir", default=None) |
| args = parser.parse_args() |
|
|
| from transformers import Wav2Vec2Model, Wav2Vec2FeatureExtractor |
|
|
| print(f"Loading wav2vec2-base (self-supervised, no ASR fine-tuning) on {args.device}...") |
| feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") |
| model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base").to(args.device) |
| model.eval() |
|
|
| def model_fn(audio_path): |
| audio = load_audio(audio_path, target_sr=16000) |
| inputs = feature_extractor(audio, sampling_rate=16000, return_tensors="pt") |
| inputs = {k: v.to(args.device) for k, v in inputs.items()} |
| with torch.no_grad(): |
| outputs = model(**inputs) |
| hidden = outputs.last_hidden_state |
| embedding = hidden.mean(dim=1).squeeze().cpu().numpy() |
| return embedding |
|
|
| extract_all(model_fn, "wav2vec2", args.base_dir, args.output_dir) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|