#!/usr/bin/env python3 """Extract XLS-R (wav2vec2 multilingual) embeddings. Model: facebook/wav2vec2-xls-r-300m (self-supervised multilingual, 1024-dim) Frame-level output is mean-pooled to get utterance-level embeddings. Install: pip install transformers """ import argparse import torch import numpy as np from extraction_utils import load_audio, extract_all def main(): parser = argparse.ArgumentParser() parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--base-dir", default=None) parser.add_argument("--output-dir", default=None) args = parser.parse_args() from transformers import Wav2Vec2Model, Wav2Vec2FeatureExtractor print(f"Loading XLS-R 300M on {args.device}...") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-xls-r-300m") model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-xls-r-300m").to(args.device) model.eval() def model_fn(audio_path): audio = load_audio(audio_path, target_sr=16000) inputs = feature_extractor(audio, sampling_rate=16000, return_tensors="pt") inputs = {k: v.to(args.device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) hidden = outputs.last_hidden_state # (1, T, 1024) embedding = hidden.mean(dim=1).squeeze().cpu().numpy() return embedding extract_all(model_fn, "xlsr", args.base_dir, args.output_dir) if __name__ == "__main__": main()