File size: 1,749 Bytes
6351b36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/usr/bin/env python3
"""Extract wav2vec 2.0 embeddings using HuggingFace Transformers.

Model: facebook/wav2vec2-base (self-supervised ONLY, 768-dim)
  - Pre-trained with contrastive loss on LibriSpeech 960h (Baevski et al., NeurIPS 2020)
  - NOT the ASR-fine-tuned version (wav2vec2-base-960h has CTC fine-tuning)
Frame-level output is mean-pooled to get utterance-level embeddings.
Install: pip install transformers torchaudio
"""

import argparse
import torch
import numpy as np
from extraction_utils import load_audio, extract_all


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu")
    parser.add_argument("--base-dir", default=None)
    parser.add_argument("--output-dir", default=None)
    args = parser.parse_args()

    from transformers import Wav2Vec2Model, Wav2Vec2FeatureExtractor

    print(f"Loading wav2vec2-base (self-supervised, no ASR fine-tuning) on {args.device}...")
    feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base")
    model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base").to(args.device)
    model.eval()

    def model_fn(audio_path):
        audio = load_audio(audio_path, target_sr=16000)
        inputs = feature_extractor(audio, sampling_rate=16000, return_tensors="pt")
        inputs = {k: v.to(args.device) for k, v in inputs.items()}
        with torch.no_grad():
            outputs = model(**inputs)
        hidden = outputs.last_hidden_state  # (1, T, 768)
        embedding = hidden.mean(dim=1).squeeze().cpu().numpy()  # (768,)
        return embedding

    extract_all(model_fn, "wav2vec2", args.base_dir, args.output_dir)


if __name__ == "__main__":
    main()