niobures commited on
Commit
7e98d38
·
verified ·
1 Parent(s): 51a6c89

Pyannote models collection

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. brouhaha/.gitattributes +33 -0
  3. brouhaha/README.md +92 -0
  4. brouhaha/brouhaha.gif +3 -0
  5. brouhaha/config.yaml +17 -0
  6. brouhaha/pytorch_model.bin +3 -0
  7. ci-segmentation/.gitattributes +35 -0
  8. ci-segmentation/README.md +19 -0
  9. ci-segmentation/pytorch_model.bin +3 -0
  10. embedding/2.1/.gitattributes +16 -0
  11. embedding/2.1/README.md +121 -0
  12. embedding/2.1/config.yaml +85 -0
  13. embedding/2.1/hparams.yaml +6 -0
  14. embedding/2.1/hydra.yaml +139 -0
  15. embedding/2.1/overrides.yaml +12 -0
  16. embedding/2.1/pytorch_model.bin +3 -0
  17. embedding/2.1/tfevents.bin +3 -0
  18. embedding/2.1/train.log +0 -0
  19. embedding/2022.07/.gitattributes +16 -0
  20. embedding/2022.07/README.md +110 -0
  21. embedding/2022.07/config.yaml +85 -0
  22. embedding/2022.07/hparams.yaml +6 -0
  23. embedding/2022.07/hydra.yaml +139 -0
  24. embedding/2022.07/overrides.yaml +12 -0
  25. embedding/2022.07/pytorch_model.bin +3 -0
  26. embedding/2022.07/tfevents.bin +3 -0
  27. embedding/2022.07/train.log +0 -0
  28. embedding/ASRU2021/.gitattributes +16 -0
  29. embedding/ASRU2021/README.md +107 -0
  30. embedding/ASRU2021/config.yaml +85 -0
  31. embedding/ASRU2021/hparams.yaml +6 -0
  32. embedding/ASRU2021/hydra.yaml +139 -0
  33. embedding/ASRU2021/overrides.yaml +12 -0
  34. embedding/ASRU2021/pytorch_model.bin +3 -0
  35. embedding/ASRU2021/tfevents.bin +3 -0
  36. embedding/ASRU2021/train.log +0 -0
  37. embedding/develop/.gitattributes +16 -0
  38. embedding/develop/README.md +121 -0
  39. embedding/develop/config.yaml +85 -0
  40. embedding/develop/hparams.yaml +6 -0
  41. embedding/develop/hydra.yaml +139 -0
  42. embedding/develop/overrides.yaml +12 -0
  43. embedding/develop/pytorch_model.bin +3 -0
  44. embedding/develop/tfevents.bin +3 -0
  45. embedding/develop/train.log +0 -0
  46. embedding/main/.gitattributes +16 -0
  47. embedding/main/LICENSE +21 -0
  48. embedding/main/README.md +121 -0
  49. embedding/main/config.yaml +85 -0
  50. embedding/main/hparams.yaml +6 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ brouhaha/brouhaha.gif filter=lfs diff=lfs merge=lfs -text
37
+ separation-ami-1.0/model.png filter=lfs diff=lfs merge=lfs -text
38
+ speaker-diarization/technical_report_2.1.pdf filter=lfs diff=lfs merge=lfs -text
39
+ speech-separation-ami-1.0/pipeline.png filter=lfs diff=lfs merge=lfs -text
brouhaha/.gitattributes ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
25
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
27
+ *.tflite filter=lfs diff=lfs merge=lfs -text
28
+ *.tgz filter=lfs diff=lfs merge=lfs -text
29
+ *.wasm filter=lfs diff=lfs merge=lfs -text
30
+ *.xz filter=lfs diff=lfs merge=lfs -text
31
+ *.zip filter=lfs diff=lfs merge=lfs -text
32
+ *.zst filter=lfs diff=lfs merge=lfs -text
33
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
brouhaha/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ - audio
7
+ - voice
8
+ - speech
9
+ - voice-activity-detection
10
+ - speech-to-noise ratio
11
+ - snr
12
+ - room acoustics
13
+ - c50
14
+ datasets:
15
+ - LibriSpeech
16
+ - AudioSet
17
+ - EchoThief
18
+ - MIT-Acoustical-Reverberation-Scene
19
+ license: openrail
20
+ extra_gated_prompt: "The collected information will help acquire a better knowledge of this model userbase and help its maintainers apply for grants to improve it further. "
21
+ extra_gated_fields:
22
+ Company/university: text
23
+ Website: text
24
+ I plan to use this model for (task, type of audio data, etc): text
25
+ ---
26
+
27
+ # 🎙️🥁🚨🔊 Brouhaha
28
+
29
+ ![Sample Brouhaha predictions](brouhaha.gif)
30
+
31
+ **Joint voice activity detection, speech-to-noise ratio, and C50 room acoustics estimation**
32
+
33
+ [TL;DR](https://twitter.com/LavechinMarvin/status/1585645131251605504) | [Paper](https://arxiv.org/abs/2210.13248) | [Code](https://github.com/marianne-m/brouhaha-vad) | [And Now for Something Completely Different](https://www.youtube.com/watch?v=8ZyOAS22Moo)
34
+
35
+
36
+
37
+ ## Installation
38
+
39
+ This model relies on [pyannote.audio](https://github.com/pyannote/pyannote-audio) and [brouhaha-vad](https://github.com/marianne-m/brouhaha-vad).
40
+
41
+ ```bash
42
+ pip install pyannote-audio
43
+ pip install https://github.com/marianne-m/brouhaha-vad/archive/main.zip
44
+ ```
45
+
46
+ ## Usage
47
+
48
+ ```python
49
+ # 1. visit hf.co/pyannote/brouhaha and accept user conditions
50
+ # 2. visit hf.co/settings/tokens to create an access token
51
+ # 3. instantiate pretrained model
52
+ from pyannote.audio import Model
53
+ model = Model.from_pretrained("pyannote/brouhaha",
54
+ use_auth_token="ACCESS_TOKEN_GOES_HERE")
55
+
56
+ # apply model
57
+ from pyannote.audio import Inference
58
+ inference = Inference(model)
59
+ output = inference("audio.wav")
60
+
61
+ # iterate over each frame
62
+ for frame, (vad, snr, c50) in output:
63
+ t = frame.middle
64
+ print(f"{t:8.3f} vad={100*vad:.0f}% snr={snr:.0f} c50={c50:.0f}")
65
+
66
+ # ...
67
+ # 12.952 vad=100% snr=51 c50=17
68
+ # 12.968 vad=100% snr=52 c50=17
69
+ # 12.985 vad=100% snr=53 c50=17
70
+ # ...
71
+ ```
72
+
73
+ ## Citation
74
+
75
+ ```bibtex
76
+ @article{lavechin2022brouhaha,
77
+ Title = {{Brouhaha: multi-task training for voice activity detection, speech-to-noise ratio, and C50 room acoustics estimation}},
78
+ Author = {Marvin Lavechin and Marianne Métais and Hadrien Titeux and Alodie Boissonnet and Jade Copet and Morgane Rivière and Elika Bergelson and Alejandrina Cristia and Emmanuel Dupoux and Hervé Bredin},
79
+ Year = {2022},
80
+ Journal = {arXiv preprint arXiv: Arxiv-2210.13248}
81
+ }
82
+
83
+ ```bibtex
84
+ @inproceedings{Bredin2020,
85
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
86
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
87
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
88
+ Address = {Barcelona, Spain},
89
+ Month = {May},
90
+ Year = {2020},
91
+ }
92
+ ```
brouhaha/brouhaha.gif ADDED

Git LFS Details

  • SHA256: e885e3364ff9cd458c8a856a9ce850114f890c16fecf28f74c5c685f611a2d0f
  • Pointer size: 131 Bytes
  • Size of remote file: 553 kB
brouhaha/config.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task:
2
+ duration: 6
3
+ batch_size: 64
4
+ architecture:
5
+ sincnet:
6
+ stride: 10
7
+ sample_rate: 16000
8
+ lstm:
9
+ hidden_size: 256
10
+ num_layers: 3
11
+ bidirectional: true
12
+ monolithic: true
13
+ dropout: 0.5
14
+ batch_first: true
15
+ linear:
16
+ hidden_size: 128
17
+ num_layers: 2
brouhaha/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c237e4a7b1de8b456dbee25db853342bf374b19d8732b72b61356519e390ae1
3
+ size 47224097
ci-segmentation/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
ci-segmentation/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ license: mit
7
+ inference: false
8
+ ---
9
+
10
+ Dummy segmentation model used for continuous integration and unit tests.
11
+
12
+ ```bash
13
+ pyannote-audio-train \
14
+ +registry="[REDACTED]/pyannote-audio/tutorials/AMI-diarization-setup/pyannote/database.yml" \
15
+ protocol=AMI.SpeakerDiarization.only_words \
16
+ model=DebugSegmentation \
17
+ task=SpeakerDiarization \
18
+ trainer.max_epochs=1
19
+ ```
ci-segmentation/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd9ef9b5949955ee2ec9535c8e83219739a25f30907f62aeafd50a2c4251770
3
+ size 207983
embedding/2.1/.gitattributes ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
embedding/2.1/README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ - audio
7
+ - voice
8
+ - speech
9
+ - speaker
10
+ - speaker-recognition
11
+ - speaker-verification
12
+ - speaker-identification
13
+ - speaker-embedding
14
+ datasets:
15
+ - voxceleb
16
+ license: mit
17
+ inference: false
18
+ extra_gated_prompt: "The collected information will help acquire a better knowledge of pyannote.audio userbase and help its maintainers apply for grants to improve it further. If you are an academic researcher, please cite the relevant papers in your own publications using the model. If you work for a company, please consider contributing back to pyannote.audio development (e.g. through unrestricted gifts). We also provide scientific consulting services around speaker diarization and machine listening."
19
+ extra_gated_fields:
20
+ Company/university: text
21
+ Website: text
22
+ I plan to use this model for (task, type of audio data, etc): text
23
+ ---
24
+
25
+ # 🎹 Speaker embedding
26
+
27
+ Relies on pyannote.audio 2.1: see [installation instructions](https://github.com/pyannote/pyannote-audio/).
28
+
29
+ This model is based on the [canonical x-vector TDNN-based architecture](https://ieeexplore.ieee.org/abstract/document/8461375), but with filter banks replaced with [trainable SincNet features](https://ieeexplore.ieee.org/document/8639585). See [`XVectorSincNet`](https://github.com/pyannote/pyannote-audio/blob/3c988c028dc505c64fe776720372f6fe816b585a/pyannote/audio/models/embedding/xvector.py#L104-L169) architecture for implementation details.
30
+
31
+
32
+ ## Basic usage
33
+
34
+ ```python
35
+ # 1. visit hf.co/pyannote/embedding and accept user conditions (only if requested)
36
+ # 2. visit hf.co/settings/tokens to create an access token (only if you had to go through 1.)
37
+ # 3. instantiate pretrained model
38
+ from pyannote.audio import Model
39
+ model = Model.from_pretrained("pyannote/embedding",
40
+ use_auth_token="ACCESS_TOKEN_GOES_HERE")
41
+ ```
42
+
43
+ ```python
44
+ from pyannote.audio import Inference
45
+ inference = Inference(model, window="whole")
46
+ embedding1 = inference("speaker1.wav")
47
+ embedding2 = inference("speaker2.wav")
48
+ # `embeddingX` is (1 x D) numpy array extracted from the file as a whole.
49
+
50
+ from scipy.spatial.distance import cdist
51
+ distance = cdist(embedding1, embedding2, metric="cosine")[0,0]
52
+ # `distance` is a `float` describing how dissimilar speakers 1 and 2 are.
53
+ ```
54
+
55
+ Using cosine distance directly, this model reaches 2.8% equal error rate (EER) on VoxCeleb 1 test set.
56
+ This is without voice activity detection (VAD) nor probabilistic linear discriminant analysis (PLDA).
57
+ Expect even better results when adding one of those.
58
+
59
+ ## Advanced usage
60
+
61
+ ### Running on GPU
62
+
63
+ ```python
64
+ inference = Inference(model, window="whole", device="cuda")
65
+ embedding = inference("audio.wav")
66
+ ```
67
+
68
+ ### Extract embedding from an excerpt
69
+
70
+ ```python
71
+ from pyannote.audio import Inference, Segment
72
+ inference = Inference(model, window="whole")
73
+ excerpt = Segment(13.37, 19.81)
74
+ embedding = inference.crop("audio.wav", excerpt)
75
+ # `embedding` is (1 x D) numpy array extracted from the file excerpt.
76
+ ```
77
+
78
+ ### Extract embeddings using a sliding window
79
+
80
+ ```python
81
+ from pyannote.audio import Inference
82
+ inference = Inference(model, window="sliding",
83
+ duration=3.0, step=1.0)
84
+ embeddings = inference("audio.wav")
85
+ # `embeddings` is a (N x D) pyannote.core.SlidingWindowFeature
86
+ # `embeddings[i]` is the embedding of the ith position of the
87
+ # sliding window, i.e. from [i * step, i * step + duration].
88
+ ```
89
+
90
+ ## Support
91
+
92
+ For commercial enquiries and scientific consulting, please contact [me](mailto:herve@niderb.fr).
93
+ For [technical questions](https://github.com/pyannote/pyannote-audio/discussions) and [bug reports](https://github.com/pyannote/pyannote-audio/issues), please check [pyannote.audio](https://github.com/pyannote/pyannote-audio) Github repository.
94
+
95
+
96
+ ## Citation
97
+
98
+ ```bibtex
99
+ @inproceedings{Bredin2020,
100
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
101
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
102
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
103
+ Address = {Barcelona, Spain},
104
+ Month = {May},
105
+ Year = {2020},
106
+ }
107
+ ```
108
+
109
+ ```bibtex
110
+ @inproceedings{Coria2020,
111
+ author="Coria, Juan M. and Bredin, Herv{\'e} and Ghannay, Sahar and Rosset, Sophie",
112
+ editor="Espinosa-Anke, Luis and Mart{\'i}n-Vide, Carlos and Spasi{\'{c}}, Irena",
113
+ title="{A Comparison of Metric Learning Loss Functions for End-To-End Speaker Verification}",
114
+ booktitle="Statistical Language and Speech Processing",
115
+ year="2020",
116
+ publisher="Springer International Publishing",
117
+ pages="137--148",
118
+ isbn="978-3-030-59430-5"
119
+ }
120
+ ```
121
+
embedding/2.1/config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ protocol: VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ patience: 5
3
+ task:
4
+ _target_: pyannote.audio.tasks.SupervisedRepresentationLearningWithArcFace
5
+ min_duration: 2
6
+ duration: 5.0
7
+ num_classes_per_batch: 64
8
+ num_chunks_per_class: 4
9
+ margin: 10.0
10
+ scale: 50.0
11
+ num_workers: 20
12
+ pin_memory: false
13
+ model:
14
+ _target_: pyannote.audio.models.embedding.XVectorSincNet
15
+ optimizer:
16
+ _target_: torch.optim.Adam
17
+ lr: 0.001
18
+ betas:
19
+ - 0.9
20
+ - 0.999
21
+ eps: 1.0e-08
22
+ weight_decay: 0
23
+ amsgrad: false
24
+ trainer:
25
+ _target_: pytorch_lightning.Trainer
26
+ accelerator: null
27
+ accumulate_grad_batches: 1
28
+ amp_backend: native
29
+ amp_level: O2
30
+ auto_lr_find: false
31
+ auto_scale_batch_size: false
32
+ auto_select_gpus: true
33
+ benchmark: false
34
+ check_val_every_n_epoch: 1
35
+ checkpoint_callback: true
36
+ deterministic: false
37
+ fast_dev_run: false
38
+ flush_logs_every_n_steps: 100
39
+ gpus: 1
40
+ gradient_clip_val: 0
41
+ limit_test_batches: 1.0
42
+ limit_train_batches: 1.0
43
+ limit_val_batches: 1.0
44
+ log_every_n_steps: 50
45
+ log_gpu_memory: null
46
+ max_epochs: 1000
47
+ max_steps: null
48
+ min_epochs: 1
49
+ min_steps: null
50
+ num_nodes: 1
51
+ num_processes: 1
52
+ num_sanity_val_steps: 2
53
+ overfit_batches: 0.0
54
+ precision: 32
55
+ prepare_data_per_node: true
56
+ process_position: 0
57
+ profiler: null
58
+ progress_bar_refresh_rate: 1
59
+ reload_dataloaders_every_epoch: false
60
+ replace_sampler_ddp: true
61
+ sync_batchnorm: false
62
+ terminate_on_nan: false
63
+ tpu_cores: null
64
+ track_grad_norm: -1
65
+ truncated_bptt_steps: null
66
+ val_check_interval: 1.0
67
+ weights_save_path: null
68
+ weights_summary: top
69
+ augmentation:
70
+ transform: Compose
71
+ params:
72
+ shuffle: false
73
+ transforms:
74
+ - transform: AddBackgroundNoise
75
+ params:
76
+ background_paths: /gpfswork/rech/eie/commun/data/background/musan
77
+ min_snr_in_db: 5.0
78
+ max_snr_in_db: 15.0
79
+ mode: per_example
80
+ p: 0.9
81
+ - transform: ApplyImpulseResponse
82
+ params:
83
+ ir_paths: /gpfswork/rech/eie/commun/data/rir
84
+ mode: per_example
85
+ p: 0.5
embedding/2.1/hparams.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ num_channels: 1
3
+ sincnet:
4
+ stride: 10
5
+ sample_rate: 16000
6
+ dimension: 512
embedding/2.1/hydra.yaml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${protocol}/${task._target_}/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/${protocol}/${task._target_}
6
+ subdir: ${hydra.job.num}
7
+ hydra_logging:
8
+ version: 1
9
+ formatters:
10
+ simple:
11
+ format: '[%(asctime)s][HYDRA] %(message)s'
12
+ handlers:
13
+ console:
14
+ class: logging.StreamHandler
15
+ formatter: simple
16
+ stream: ext://sys.stdout
17
+ root:
18
+ level: INFO
19
+ handlers:
20
+ - console
21
+ loggers:
22
+ logging_example:
23
+ level: DEBUG
24
+ disable_existing_loggers: false
25
+ job_logging:
26
+ version: 1
27
+ formatters:
28
+ simple:
29
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
30
+ handlers:
31
+ console:
32
+ class: logging.StreamHandler
33
+ formatter: simple
34
+ stream: ext://sys.stdout
35
+ file:
36
+ class: logging.FileHandler
37
+ formatter: simple
38
+ filename: ${hydra.job.name}.log
39
+ root:
40
+ level: INFO
41
+ handlers:
42
+ - console
43
+ - file
44
+ disable_existing_loggers: false
45
+ sweeper:
46
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
47
+ max_batch_size: null
48
+ launcher:
49
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
50
+ help:
51
+ app_name: pyannote-audio-train
52
+ header: == ${hydra.help.app_name} ==
53
+ footer: 'Powered by Hydra (https://hydra.cc)
54
+
55
+ Use --hydra-help to view Hydra specific help'
56
+ template: "${hydra.help.header}\n\npyannote-audio-train protocol={protocol_name}\
57
+ \ task={task} model={model}\n\n{task} can be any of the following:\n* vad (default)\
58
+ \ = voice activity detection\n* scd = speaker change detection\n* osd = overlapped\
59
+ \ speech detection\n* xseg = multi-task segmentation\n\n{model} can be any of\
60
+ \ the following:\n* debug (default) = simple segmentation model for debugging\
61
+ \ purposes\n\n{optimizer} can be any of the following\n* adam (default) = Adam\
62
+ \ optimizer\n\n{trainer} can be any of the following\n* fast_dev_run for debugging\n\
63
+ * default (default) for training the model\n\nOptions\n=======\n\nHere, we describe\
64
+ \ the most common options: use \"--cfg job\" option to get a complete list.\n\
65
+ \n* task.duration: audio chunk duration (in seconds)\n* task.batch_size: number\
66
+ \ of audio chunks per batch\n* task.num_workers: number of workers used for\
67
+ \ generating training chunks\n\n* optimizer.lr: learning rate\n* trainer.auto_lr_find:\
68
+ \ use pytorch-lightning AutoLR\n\nHyper-parameter optimization\n============================\n\
69
+ \nBecause it is powered by Hydra (https://hydra.cc), one can run grid search\
70
+ \ using the --multirun option.\n\nFor instance, the following command will run\
71
+ \ the same job three times, with three different learning rates:\n pyannote-audio-train\
72
+ \ --multirun protocol={protocol_name} task={task} optimizer.lr=1e-3,1e-2,1e-1\n\
73
+ \nEven better, one can use Ax (https://ax.dev) sweeper to optimize learning\
74
+ \ rate directly:\n pyannote-audio-train --multirun hydra/sweeper=ax protocol={protocol_name}\
75
+ \ task={task} optimizer.lr=\"interval(1e-3, 1e-1)\"\n\nSee https://hydra.cc/docs/plugins/ax_sweeper\
76
+ \ for more details.\n\nUser-defined task or model\n==========================\n\
77
+ \n1. define your_package.YourTask (or your_package.YourModel) class\n2. create\
78
+ \ file /path/to/your_config/task/your_task.yaml (or /path/to/your_config/model/your_model.yaml)\n\
79
+ \ # @package _group_\n _target_: your_package.YourTask # or YourModel\n\
80
+ \ param1: value1\n param2: value2\n3. call pyannote-audio-train --config-dir\
81
+ \ /path/to/your_config task=your_task task.param1=modified_value1 model=your_model\
82
+ \ ...\n\n${hydra.help.footer}"
83
+ hydra_help:
84
+ hydra_help: ???
85
+ template: 'Hydra (${hydra.runtime.version})
86
+
87
+ See https://hydra.cc for more info.
88
+
89
+
90
+ == Flags ==
91
+
92
+ $FLAGS_HELP
93
+
94
+
95
+ == Configuration groups ==
96
+
97
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
98
+ to command line)
99
+
100
+
101
+ $HYDRA_CONFIG_GROUPS
102
+
103
+
104
+ Use ''--cfg hydra'' to Show the Hydra config.
105
+
106
+ '
107
+ output_subdir: ''
108
+ overrides:
109
+ hydra: []
110
+ task:
111
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
112
+ - task=SpeakerEmbedding
113
+ - task.num_workers=20
114
+ - task.min_duration=2
115
+ - task.duration=5.
116
+ - task.num_classes_per_batch=64
117
+ - task.num_chunks_per_class=4
118
+ - task.margin=10.0
119
+ - task.scale=50.
120
+ - model=XVectorSincNet
121
+ - trainer.gpus=1
122
+ - +augmentation=background_then_reverb
123
+ job:
124
+ name: train
125
+ override_dirname: +augmentation=background_then_reverb,model=XVectorSincNet,protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X,task.duration=5.,task.margin=10.0,task.min_duration=2,task.num_chunks_per_class=4,task.num_classes_per_batch=64,task.num_workers=20,task.scale=50.,task=SpeakerEmbedding,trainer.gpus=1
126
+ id: ???
127
+ num: ???
128
+ config_name: config
129
+ env_set: {}
130
+ env_copy: []
131
+ config:
132
+ override_dirname:
133
+ kv_sep: '='
134
+ item_sep: ','
135
+ exclude_keys: []
136
+ runtime:
137
+ version: 1.0.4
138
+ cwd: /gpfsdswork/projects/rech/eie/uno46kl/xvectors/debug
139
+ verbose: false
embedding/2.1/overrides.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ - task=SpeakerEmbedding
3
+ - task.num_workers=20
4
+ - task.min_duration=2
5
+ - task.duration=5.
6
+ - task.num_classes_per_batch=64
7
+ - task.num_chunks_per_class=4
8
+ - task.margin=10.0
9
+ - task.scale=50.
10
+ - model=XVectorSincNet
11
+ - trainer.gpus=1
12
+ - +augmentation=background_then_reverb
embedding/2.1/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcec986de13da7af7ac88736572692359950df63669989c4f78b294934c9089
3
+ size 96383626
embedding/2.1/tfevents.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3319218e36d416c5400ffbc592acc2e1ab520a187d586be86db7eef30fb65616
3
+ size 5669685
embedding/2.1/train.log ADDED
File without changes
embedding/2022.07/.gitattributes ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
embedding/2022.07/README.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ - audio
7
+ - voice
8
+ - speech
9
+ - speaker
10
+ - speaker-recognition
11
+ - speaker-verification
12
+ - speaker-identification
13
+ - speaker-embedding
14
+ datasets:
15
+ - voxceleb
16
+ license: mit
17
+ inference: false
18
+ ---
19
+
20
+ # 🎹 Speaker embedding
21
+
22
+
23
+ Relies on pyannote.audio 2.0 currently in development: see [installation instructions](https://github.com/pyannote/pyannote-audio/tree/develop#installation).
24
+
25
+ This model is based on the [canonical x-vector TDNN-based architecture](https://ieeexplore.ieee.org/abstract/document/8461375), but with filter banks replaced with [trainable SincNet features](https://ieeexplore.ieee.org/document/8639585). See [`XVectorSincNet`](https://github.com/pyannote/pyannote-audio/blob/3c988c028dc505c64fe776720372f6fe816b585a/pyannote/audio/models/embedding/xvector.py#L104-L169) architecture for implementation detalis.
26
+
27
+
28
+ ## Support
29
+
30
+ For commercial enquiries and scientific consulting, please contact [me](mailto:herve@niderb.fr).
31
+ For [technical questions](https://github.com/pyannote/pyannote-audio/discussions) and [bug reports](https://github.com/pyannote/pyannote-audio/issues), please check [pyannote.audio](https://github.com/pyannote/pyannote-audio) Github repository.
32
+
33
+
34
+ ## Basic usage
35
+
36
+ ```python
37
+ from pyannote.audio import Inference
38
+ inference = Inference("pyannote/embedding", window="whole")
39
+ embedding1 = inference("speaker1.wav")
40
+ embedding2 = inference("speaker2.wav")
41
+ # `embeddingX` is (1 x D) numpy array extracted from the file as a whole.
42
+
43
+ from scipy.spatial.distance import cdist
44
+ distance = cdist(embedding1, embedding2, metric="cosine")[0,0]
45
+ # `distance` is a `float` describing how dissimilar speakers 1 and 2 are.
46
+ ```
47
+
48
+ Using cosine distance directly, this model reaches 2.8% equal error rate (EER) on VoxCeleb 1 test set.
49
+ This is without voice activity detection (VAD) nor probabilistic linear discriminant analysis (PLDA).
50
+ Expect even better results when adding one of those.
51
+
52
+ ## Advanced usage
53
+
54
+ ### Running on GPU
55
+
56
+ ```python
57
+ inference = Inference("pyannote/embedding", window="whole", device="cuda")
58
+ embedding = inference("audio.wav")
59
+ ```
60
+
61
+ ### Extract embedding from an excerpt
62
+
63
+ ```python
64
+ from pyannote.audio import Inference, Segment
65
+ inference = Inference("pyannote/embedding",
66
+ window="whole")
67
+ excerpt = Segment(13.37, 19.81)
68
+ embedding = inference.crop("audio.wav", excerpt)
69
+ # `embedding` is (1 x D) numpy array extracted from the file excerpt.
70
+ ```
71
+
72
+ ### Extract embeddings using a sliding window
73
+
74
+ ```python
75
+ from pyannote.audio import Inference
76
+ inference = Inference("pyannote/embedding",
77
+ window="sliding",
78
+ duration=3.0, step=1.0)
79
+ embeddings = inference("audio.wav")
80
+ # `embeddings` is a (N x D) pyannote.core.SlidingWindowFeature
81
+ # `embeddings[i]` is the embedding of the ith position of the
82
+ # sliding window, i.e. from [i * step, i * step + duration].
83
+ ```
84
+
85
+
86
+ ## Citation
87
+
88
+ ```bibtex
89
+ @inproceedings{Bredin2020,
90
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
91
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
92
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
93
+ Address = {Barcelona, Spain},
94
+ Month = {May},
95
+ Year = {2020},
96
+ }
97
+ ```
98
+
99
+ ```bibtex
100
+ @inproceedings{Coria2020,
101
+ author="Coria, Juan M. and Bredin, Herv{\'e} and Ghannay, Sahar and Rosset, Sophie",
102
+ editor="Espinosa-Anke, Luis and Mart{\'i}n-Vide, Carlos and Spasi{\'{c}}, Irena",
103
+ title="{A Comparison of Metric Learning Loss Functions for End-To-End Speaker Verification}",
104
+ booktitle="Statistical Language and Speech Processing",
105
+ year="2020",
106
+ publisher="Springer International Publishing",
107
+ pages="137--148",
108
+ isbn="978-3-030-59430-5"
109
+ }
110
+ ```
embedding/2022.07/config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ protocol: VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ patience: 5
3
+ task:
4
+ _target_: pyannote.audio.tasks.SupervisedRepresentationLearningWithArcFace
5
+ min_duration: 2
6
+ duration: 5.0
7
+ num_classes_per_batch: 64
8
+ num_chunks_per_class: 4
9
+ margin: 10.0
10
+ scale: 50.0
11
+ num_workers: 20
12
+ pin_memory: false
13
+ model:
14
+ _target_: pyannote.audio.models.embedding.XVectorSincNet
15
+ optimizer:
16
+ _target_: torch.optim.Adam
17
+ lr: 0.001
18
+ betas:
19
+ - 0.9
20
+ - 0.999
21
+ eps: 1.0e-08
22
+ weight_decay: 0
23
+ amsgrad: false
24
+ trainer:
25
+ _target_: pytorch_lightning.Trainer
26
+ accelerator: null
27
+ accumulate_grad_batches: 1
28
+ amp_backend: native
29
+ amp_level: O2
30
+ auto_lr_find: false
31
+ auto_scale_batch_size: false
32
+ auto_select_gpus: true
33
+ benchmark: false
34
+ check_val_every_n_epoch: 1
35
+ checkpoint_callback: true
36
+ deterministic: false
37
+ fast_dev_run: false
38
+ flush_logs_every_n_steps: 100
39
+ gpus: 1
40
+ gradient_clip_val: 0
41
+ limit_test_batches: 1.0
42
+ limit_train_batches: 1.0
43
+ limit_val_batches: 1.0
44
+ log_every_n_steps: 50
45
+ log_gpu_memory: null
46
+ max_epochs: 1000
47
+ max_steps: null
48
+ min_epochs: 1
49
+ min_steps: null
50
+ num_nodes: 1
51
+ num_processes: 1
52
+ num_sanity_val_steps: 2
53
+ overfit_batches: 0.0
54
+ precision: 32
55
+ prepare_data_per_node: true
56
+ process_position: 0
57
+ profiler: null
58
+ progress_bar_refresh_rate: 1
59
+ reload_dataloaders_every_epoch: false
60
+ replace_sampler_ddp: true
61
+ sync_batchnorm: false
62
+ terminate_on_nan: false
63
+ tpu_cores: null
64
+ track_grad_norm: -1
65
+ truncated_bptt_steps: null
66
+ val_check_interval: 1.0
67
+ weights_save_path: null
68
+ weights_summary: top
69
+ augmentation:
70
+ transform: Compose
71
+ params:
72
+ shuffle: false
73
+ transforms:
74
+ - transform: AddBackgroundNoise
75
+ params:
76
+ background_paths: /gpfswork/rech/eie/commun/data/background/musan
77
+ min_snr_in_db: 5.0
78
+ max_snr_in_db: 15.0
79
+ mode: per_example
80
+ p: 0.9
81
+ - transform: ApplyImpulseResponse
82
+ params:
83
+ ir_paths: /gpfswork/rech/eie/commun/data/rir
84
+ mode: per_example
85
+ p: 0.5
embedding/2022.07/hparams.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ num_channels: 1
3
+ sincnet:
4
+ stride: 10
5
+ sample_rate: 16000
6
+ dimension: 512
embedding/2022.07/hydra.yaml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${protocol}/${task._target_}/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/${protocol}/${task._target_}
6
+ subdir: ${hydra.job.num}
7
+ hydra_logging:
8
+ version: 1
9
+ formatters:
10
+ simple:
11
+ format: '[%(asctime)s][HYDRA] %(message)s'
12
+ handlers:
13
+ console:
14
+ class: logging.StreamHandler
15
+ formatter: simple
16
+ stream: ext://sys.stdout
17
+ root:
18
+ level: INFO
19
+ handlers:
20
+ - console
21
+ loggers:
22
+ logging_example:
23
+ level: DEBUG
24
+ disable_existing_loggers: false
25
+ job_logging:
26
+ version: 1
27
+ formatters:
28
+ simple:
29
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
30
+ handlers:
31
+ console:
32
+ class: logging.StreamHandler
33
+ formatter: simple
34
+ stream: ext://sys.stdout
35
+ file:
36
+ class: logging.FileHandler
37
+ formatter: simple
38
+ filename: ${hydra.job.name}.log
39
+ root:
40
+ level: INFO
41
+ handlers:
42
+ - console
43
+ - file
44
+ disable_existing_loggers: false
45
+ sweeper:
46
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
47
+ max_batch_size: null
48
+ launcher:
49
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
50
+ help:
51
+ app_name: pyannote-audio-train
52
+ header: == ${hydra.help.app_name} ==
53
+ footer: 'Powered by Hydra (https://hydra.cc)
54
+
55
+ Use --hydra-help to view Hydra specific help'
56
+ template: "${hydra.help.header}\n\npyannote-audio-train protocol={protocol_name}\
57
+ \ task={task} model={model}\n\n{task} can be any of the following:\n* vad (default)\
58
+ \ = voice activity detection\n* scd = speaker change detection\n* osd = overlapped\
59
+ \ speech detection\n* xseg = multi-task segmentation\n\n{model} can be any of\
60
+ \ the following:\n* debug (default) = simple segmentation model for debugging\
61
+ \ purposes\n\n{optimizer} can be any of the following\n* adam (default) = Adam\
62
+ \ optimizer\n\n{trainer} can be any of the following\n* fast_dev_run for debugging\n\
63
+ * default (default) for training the model\n\nOptions\n=======\n\nHere, we describe\
64
+ \ the most common options: use \"--cfg job\" option to get a complete list.\n\
65
+ \n* task.duration: audio chunk duration (in seconds)\n* task.batch_size: number\
66
+ \ of audio chunks per batch\n* task.num_workers: number of workers used for\
67
+ \ generating training chunks\n\n* optimizer.lr: learning rate\n* trainer.auto_lr_find:\
68
+ \ use pytorch-lightning AutoLR\n\nHyper-parameter optimization\n============================\n\
69
+ \nBecause it is powered by Hydra (https://hydra.cc), one can run grid search\
70
+ \ using the --multirun option.\n\nFor instance, the following command will run\
71
+ \ the same job three times, with three different learning rates:\n pyannote-audio-train\
72
+ \ --multirun protocol={protocol_name} task={task} optimizer.lr=1e-3,1e-2,1e-1\n\
73
+ \nEven better, one can use Ax (https://ax.dev) sweeper to optimize learning\
74
+ \ rate directly:\n pyannote-audio-train --multirun hydra/sweeper=ax protocol={protocol_name}\
75
+ \ task={task} optimizer.lr=\"interval(1e-3, 1e-1)\"\n\nSee https://hydra.cc/docs/plugins/ax_sweeper\
76
+ \ for more details.\n\nUser-defined task or model\n==========================\n\
77
+ \n1. define your_package.YourTask (or your_package.YourModel) class\n2. create\
78
+ \ file /path/to/your_config/task/your_task.yaml (or /path/to/your_config/model/your_model.yaml)\n\
79
+ \ # @package _group_\n _target_: your_package.YourTask # or YourModel\n\
80
+ \ param1: value1\n param2: value2\n3. call pyannote-audio-train --config-dir\
81
+ \ /path/to/your_config task=your_task task.param1=modified_value1 model=your_model\
82
+ \ ...\n\n${hydra.help.footer}"
83
+ hydra_help:
84
+ hydra_help: ???
85
+ template: 'Hydra (${hydra.runtime.version})
86
+
87
+ See https://hydra.cc for more info.
88
+
89
+
90
+ == Flags ==
91
+
92
+ $FLAGS_HELP
93
+
94
+
95
+ == Configuration groups ==
96
+
97
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
98
+ to command line)
99
+
100
+
101
+ $HYDRA_CONFIG_GROUPS
102
+
103
+
104
+ Use ''--cfg hydra'' to Show the Hydra config.
105
+
106
+ '
107
+ output_subdir: ''
108
+ overrides:
109
+ hydra: []
110
+ task:
111
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
112
+ - task=SpeakerEmbedding
113
+ - task.num_workers=20
114
+ - task.min_duration=2
115
+ - task.duration=5.
116
+ - task.num_classes_per_batch=64
117
+ - task.num_chunks_per_class=4
118
+ - task.margin=10.0
119
+ - task.scale=50.
120
+ - model=XVectorSincNet
121
+ - trainer.gpus=1
122
+ - +augmentation=background_then_reverb
123
+ job:
124
+ name: train
125
+ override_dirname: +augmentation=background_then_reverb,model=XVectorSincNet,protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X,task.duration=5.,task.margin=10.0,task.min_duration=2,task.num_chunks_per_class=4,task.num_classes_per_batch=64,task.num_workers=20,task.scale=50.,task=SpeakerEmbedding,trainer.gpus=1
126
+ id: ???
127
+ num: ???
128
+ config_name: config
129
+ env_set: {}
130
+ env_copy: []
131
+ config:
132
+ override_dirname:
133
+ kv_sep: '='
134
+ item_sep: ','
135
+ exclude_keys: []
136
+ runtime:
137
+ version: 1.0.4
138
+ cwd: /gpfsdswork/projects/rech/eie/uno46kl/xvectors/debug
139
+ verbose: false
embedding/2022.07/overrides.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ - task=SpeakerEmbedding
3
+ - task.num_workers=20
4
+ - task.min_duration=2
5
+ - task.duration=5.
6
+ - task.num_classes_per_batch=64
7
+ - task.num_chunks_per_class=4
8
+ - task.margin=10.0
9
+ - task.scale=50.
10
+ - model=XVectorSincNet
11
+ - trainer.gpus=1
12
+ - +augmentation=background_then_reverb
embedding/2022.07/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcec986de13da7af7ac88736572692359950df63669989c4f78b294934c9089
3
+ size 96383626
embedding/2022.07/tfevents.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3319218e36d416c5400ffbc592acc2e1ab520a187d586be86db7eef30fb65616
3
+ size 5669685
embedding/2022.07/train.log ADDED
File without changes
embedding/ASRU2021/.gitattributes ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
embedding/ASRU2021/README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - audio
5
+ - voice
6
+ - speech
7
+ - speaker
8
+ - speaker-recognition
9
+ - speaker-verification
10
+ - speaker-identification
11
+ - speaker-embedding
12
+ datasets:
13
+ - voxceleb
14
+ license: mit
15
+ inference: false
16
+ ---
17
+
18
+ ## pyannote.audio // speaker embedding
19
+
20
+ Relies on pyannote.audio 2.0 currently in development: see [installation instructions](https://github.com/pyannote/pyannote-audio/tree/develop#installation).
21
+
22
+ This model is based on the [canonical x-vector TDNN-based architecture](https://ieeexplore.ieee.org/abstract/document/8461375), but with filter banks replaced with [trainable SincNet features](https://ieeexplore.ieee.org/document/8639585). See [`XVectorSincNet`](https://github.com/pyannote/pyannote-audio/blob/3c988c028dc505c64fe776720372f6fe816b585a/pyannote/audio/models/embedding/xvector.py#L104-L169) architecture for implementation detalis.
23
+
24
+
25
+ ## Support
26
+
27
+ For commercial enquiries and scientific consulting, please contact [me](mailto:herve@niderb.fr).
28
+ For [technical questions](https://github.com/pyannote/pyannote-audio/discussions) and [bug reports](https://github.com/pyannote/pyannote-audio/issues), please check [pyannote.audio](https://github.com/pyannote/pyannote-audio) Github repository.
29
+
30
+
31
+ ## Basic usage
32
+
33
+ ```python
34
+ from pyannote.audio import Inference
35
+ inference = Inference("pyannote/embedding", window="whole")
36
+ embedding1 = inference("speaker1.wav")
37
+ embedding2 = inference("speaker2.wav")
38
+ # `embeddingX` is (1 x D) numpy array extracted from the file as a whole.
39
+
40
+ from scipy.spatial.distance import cdist
41
+ distance = cdist(embedding1, embedding2, metric="cosine")[0,0]
42
+ # `distance` is a `float` describing how dissimilar speakers 1 and 2 are.
43
+ ```
44
+
45
+ Using cosine distance directly, this model reaches 2.8% equal error rate (EER) on VoxCeleb 1 test set.
46
+ This is without voice activity detection (VAD) nor probabilistic linear discriminant analysis (PLDA).
47
+ Expect even better results when adding one of those.
48
+
49
+ ## Advanced usage
50
+
51
+ ### Running on GPU
52
+
53
+ ```python
54
+ inference = Inference("pyannote/embedding", window="whole", device="cuda")
55
+ embedding = inference("audio.wav")
56
+ ```
57
+
58
+ ### Extract embedding from an excerpt
59
+
60
+ ```python
61
+ from pyannote.audio import Inference, Segment
62
+ inference = Inference("pyannote/embedding",
63
+ window="whole")
64
+ excerpt = Segment(13.37, 19.81)
65
+ embedding = inference.crop("audio.wav", excerpt)
66
+ # `embedding` is (1 x D) numpy array extracted from the file excerpt.
67
+ ```
68
+
69
+ ### Extract embeddings using a sliding window
70
+
71
+ ```python
72
+ from pyannote.audio import Inference
73
+ inference = Inference("pyannote/embedding",
74
+ window="sliding",
75
+ duration=3.0, step=1.0)
76
+ embeddings = inference("audio.wav")
77
+ # `embeddings` is a (N x D) pyannote.core.SlidingWindowFeature
78
+ # `embeddings[i]` is the embedding of the ith position of the
79
+ # sliding window, i.e. from [i * step, i * step + duration].
80
+ ```
81
+
82
+
83
+ ## Citation
84
+
85
+ ```bibtex
86
+ @inproceedings{Bredin2020,
87
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
88
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
89
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
90
+ Address = {Barcelona, Spain},
91
+ Month = {May},
92
+ Year = {2020},
93
+ }
94
+ ```
95
+
96
+ ```bibtex
97
+ @inproceedings{Coria2020,
98
+ author="Coria, Juan M. and Bredin, Herv{\'e} and Ghannay, Sahar and Rosset, Sophie",
99
+ editor="Espinosa-Anke, Luis and Mart{\'i}n-Vide, Carlos and Spasi{\'{c}}, Irena",
100
+ title="{A Comparison of Metric Learning Loss Functions for End-To-End Speaker Verification}",
101
+ booktitle="Statistical Language and Speech Processing",
102
+ year="2020",
103
+ publisher="Springer International Publishing",
104
+ pages="137--148",
105
+ isbn="978-3-030-59430-5"
106
+ }
107
+ ```
embedding/ASRU2021/config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ protocol: VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ patience: 5
3
+ task:
4
+ _target_: pyannote.audio.tasks.SupervisedRepresentationLearningWithArcFace
5
+ min_duration: 2
6
+ duration: 5.0
7
+ num_classes_per_batch: 64
8
+ num_chunks_per_class: 4
9
+ margin: 10.0
10
+ scale: 50.0
11
+ num_workers: 20
12
+ pin_memory: false
13
+ model:
14
+ _target_: pyannote.audio.models.embedding.XVectorSincNet
15
+ optimizer:
16
+ _target_: torch.optim.Adam
17
+ lr: 0.001
18
+ betas:
19
+ - 0.9
20
+ - 0.999
21
+ eps: 1.0e-08
22
+ weight_decay: 0
23
+ amsgrad: false
24
+ trainer:
25
+ _target_: pytorch_lightning.Trainer
26
+ accelerator: null
27
+ accumulate_grad_batches: 1
28
+ amp_backend: native
29
+ amp_level: O2
30
+ auto_lr_find: false
31
+ auto_scale_batch_size: false
32
+ auto_select_gpus: true
33
+ benchmark: false
34
+ check_val_every_n_epoch: 1
35
+ checkpoint_callback: true
36
+ deterministic: false
37
+ fast_dev_run: false
38
+ flush_logs_every_n_steps: 100
39
+ gpus: 1
40
+ gradient_clip_val: 0
41
+ limit_test_batches: 1.0
42
+ limit_train_batches: 1.0
43
+ limit_val_batches: 1.0
44
+ log_every_n_steps: 50
45
+ log_gpu_memory: null
46
+ max_epochs: 1000
47
+ max_steps: null
48
+ min_epochs: 1
49
+ min_steps: null
50
+ num_nodes: 1
51
+ num_processes: 1
52
+ num_sanity_val_steps: 2
53
+ overfit_batches: 0.0
54
+ precision: 32
55
+ prepare_data_per_node: true
56
+ process_position: 0
57
+ profiler: null
58
+ progress_bar_refresh_rate: 1
59
+ reload_dataloaders_every_epoch: false
60
+ replace_sampler_ddp: true
61
+ sync_batchnorm: false
62
+ terminate_on_nan: false
63
+ tpu_cores: null
64
+ track_grad_norm: -1
65
+ truncated_bptt_steps: null
66
+ val_check_interval: 1.0
67
+ weights_save_path: null
68
+ weights_summary: top
69
+ augmentation:
70
+ transform: Compose
71
+ params:
72
+ shuffle: false
73
+ transforms:
74
+ - transform: AddBackgroundNoise
75
+ params:
76
+ background_paths: /gpfswork/rech/eie/commun/data/background/musan
77
+ min_snr_in_db: 5.0
78
+ max_snr_in_db: 15.0
79
+ mode: per_example
80
+ p: 0.9
81
+ - transform: ApplyImpulseResponse
82
+ params:
83
+ ir_paths: /gpfswork/rech/eie/commun/data/rir
84
+ mode: per_example
85
+ p: 0.5
embedding/ASRU2021/hparams.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ num_channels: 1
3
+ sincnet:
4
+ stride: 10
5
+ sample_rate: 16000
6
+ dimension: 512
embedding/ASRU2021/hydra.yaml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${protocol}/${task._target_}/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/${protocol}/${task._target_}
6
+ subdir: ${hydra.job.num}
7
+ hydra_logging:
8
+ version: 1
9
+ formatters:
10
+ simple:
11
+ format: '[%(asctime)s][HYDRA] %(message)s'
12
+ handlers:
13
+ console:
14
+ class: logging.StreamHandler
15
+ formatter: simple
16
+ stream: ext://sys.stdout
17
+ root:
18
+ level: INFO
19
+ handlers:
20
+ - console
21
+ loggers:
22
+ logging_example:
23
+ level: DEBUG
24
+ disable_existing_loggers: false
25
+ job_logging:
26
+ version: 1
27
+ formatters:
28
+ simple:
29
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
30
+ handlers:
31
+ console:
32
+ class: logging.StreamHandler
33
+ formatter: simple
34
+ stream: ext://sys.stdout
35
+ file:
36
+ class: logging.FileHandler
37
+ formatter: simple
38
+ filename: ${hydra.job.name}.log
39
+ root:
40
+ level: INFO
41
+ handlers:
42
+ - console
43
+ - file
44
+ disable_existing_loggers: false
45
+ sweeper:
46
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
47
+ max_batch_size: null
48
+ launcher:
49
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
50
+ help:
51
+ app_name: pyannote-audio-train
52
+ header: == ${hydra.help.app_name} ==
53
+ footer: 'Powered by Hydra (https://hydra.cc)
54
+
55
+ Use --hydra-help to view Hydra specific help'
56
+ template: "${hydra.help.header}\n\npyannote-audio-train protocol={protocol_name}\
57
+ \ task={task} model={model}\n\n{task} can be any of the following:\n* vad (default)\
58
+ \ = voice activity detection\n* scd = speaker change detection\n* osd = overlapped\
59
+ \ speech detection\n* xseg = multi-task segmentation\n\n{model} can be any of\
60
+ \ the following:\n* debug (default) = simple segmentation model for debugging\
61
+ \ purposes\n\n{optimizer} can be any of the following\n* adam (default) = Adam\
62
+ \ optimizer\n\n{trainer} can be any of the following\n* fast_dev_run for debugging\n\
63
+ * default (default) for training the model\n\nOptions\n=======\n\nHere, we describe\
64
+ \ the most common options: use \"--cfg job\" option to get a complete list.\n\
65
+ \n* task.duration: audio chunk duration (in seconds)\n* task.batch_size: number\
66
+ \ of audio chunks per batch\n* task.num_workers: number of workers used for\
67
+ \ generating training chunks\n\n* optimizer.lr: learning rate\n* trainer.auto_lr_find:\
68
+ \ use pytorch-lightning AutoLR\n\nHyper-parameter optimization\n============================\n\
69
+ \nBecause it is powered by Hydra (https://hydra.cc), one can run grid search\
70
+ \ using the --multirun option.\n\nFor instance, the following command will run\
71
+ \ the same job three times, with three different learning rates:\n pyannote-audio-train\
72
+ \ --multirun protocol={protocol_name} task={task} optimizer.lr=1e-3,1e-2,1e-1\n\
73
+ \nEven better, one can use Ax (https://ax.dev) sweeper to optimize learning\
74
+ \ rate directly:\n pyannote-audio-train --multirun hydra/sweeper=ax protocol={protocol_name}\
75
+ \ task={task} optimizer.lr=\"interval(1e-3, 1e-1)\"\n\nSee https://hydra.cc/docs/plugins/ax_sweeper\
76
+ \ for more details.\n\nUser-defined task or model\n==========================\n\
77
+ \n1. define your_package.YourTask (or your_package.YourModel) class\n2. create\
78
+ \ file /path/to/your_config/task/your_task.yaml (or /path/to/your_config/model/your_model.yaml)\n\
79
+ \ # @package _group_\n _target_: your_package.YourTask # or YourModel\n\
80
+ \ param1: value1\n param2: value2\n3. call pyannote-audio-train --config-dir\
81
+ \ /path/to/your_config task=your_task task.param1=modified_value1 model=your_model\
82
+ \ ...\n\n${hydra.help.footer}"
83
+ hydra_help:
84
+ hydra_help: ???
85
+ template: 'Hydra (${hydra.runtime.version})
86
+
87
+ See https://hydra.cc for more info.
88
+
89
+
90
+ == Flags ==
91
+
92
+ $FLAGS_HELP
93
+
94
+
95
+ == Configuration groups ==
96
+
97
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
98
+ to command line)
99
+
100
+
101
+ $HYDRA_CONFIG_GROUPS
102
+
103
+
104
+ Use ''--cfg hydra'' to Show the Hydra config.
105
+
106
+ '
107
+ output_subdir: ''
108
+ overrides:
109
+ hydra: []
110
+ task:
111
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
112
+ - task=SpeakerEmbedding
113
+ - task.num_workers=20
114
+ - task.min_duration=2
115
+ - task.duration=5.
116
+ - task.num_classes_per_batch=64
117
+ - task.num_chunks_per_class=4
118
+ - task.margin=10.0
119
+ - task.scale=50.
120
+ - model=XVectorSincNet
121
+ - trainer.gpus=1
122
+ - +augmentation=background_then_reverb
123
+ job:
124
+ name: train
125
+ override_dirname: +augmentation=background_then_reverb,model=XVectorSincNet,protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X,task.duration=5.,task.margin=10.0,task.min_duration=2,task.num_chunks_per_class=4,task.num_classes_per_batch=64,task.num_workers=20,task.scale=50.,task=SpeakerEmbedding,trainer.gpus=1
126
+ id: ???
127
+ num: ???
128
+ config_name: config
129
+ env_set: {}
130
+ env_copy: []
131
+ config:
132
+ override_dirname:
133
+ kv_sep: '='
134
+ item_sep: ','
135
+ exclude_keys: []
136
+ runtime:
137
+ version: 1.0.4
138
+ cwd: /gpfsdswork/projects/rech/eie/uno46kl/xvectors/debug
139
+ verbose: false
embedding/ASRU2021/overrides.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ - task=SpeakerEmbedding
3
+ - task.num_workers=20
4
+ - task.min_duration=2
5
+ - task.duration=5.
6
+ - task.num_classes_per_batch=64
7
+ - task.num_chunks_per_class=4
8
+ - task.margin=10.0
9
+ - task.scale=50.
10
+ - model=XVectorSincNet
11
+ - trainer.gpus=1
12
+ - +augmentation=background_then_reverb
embedding/ASRU2021/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcec986de13da7af7ac88736572692359950df63669989c4f78b294934c9089
3
+ size 96383626
embedding/ASRU2021/tfevents.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3319218e36d416c5400ffbc592acc2e1ab520a187d586be86db7eef30fb65616
3
+ size 5669685
embedding/ASRU2021/train.log ADDED
File without changes
embedding/develop/.gitattributes ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
embedding/develop/README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ - audio
7
+ - voice
8
+ - speech
9
+ - speaker
10
+ - speaker-recognition
11
+ - speaker-verification
12
+ - speaker-identification
13
+ - speaker-embedding
14
+ datasets:
15
+ - voxceleb
16
+ license: mit
17
+ inference: false
18
+ extra_gated_prompt: "The collected information will help acquire a better knowledge of pyannote.audio userbase and help its maintainers apply for grants to improve it further. If you are an academic researcher, please cite the relevant papers in your own publications using the model. If you work for a company, please consider contributing back to pyannote.audio development (e.g. through unrestricted gifts). We also provide scientific consulting services around speaker diarization and machine listening."
19
+ extra_gated_fields:
20
+ Company/university: text
21
+ Website: text
22
+ I plan to use this model for (task, type of audio data, etc): text
23
+ ---
24
+
25
+ # 🎹 Speaker embedding
26
+
27
+ Relies on pyannote.audio 2.1: see [installation instructions](https://github.com/pyannote/pyannote-audio/).
28
+
29
+ This model is based on the [canonical x-vector TDNN-based architecture](https://ieeexplore.ieee.org/abstract/document/8461375), but with filter banks replaced with [trainable SincNet features](https://ieeexplore.ieee.org/document/8639585). See [`XVectorSincNet`](https://github.com/pyannote/pyannote-audio/blob/3c988c028dc505c64fe776720372f6fe816b585a/pyannote/audio/models/embedding/xvector.py#L104-L169) architecture for implementation details.
30
+
31
+
32
+ ## Basic usage
33
+
34
+ ```python
35
+ # 1. visit hf.co/pyannote/embedding and accept user conditions (only if requested)
36
+ # 2. visit hf.co/settings/tokens to create an access token (only if you had to go through 1.)
37
+ # 3. instantiate pretrained model
38
+ from pyannote.audio import Model
39
+ model = Model.from_pretrained("pyannote/embedding",
40
+ use_auth_token="ACCESS_TOKEN_GOES_HERE")
41
+ ```
42
+
43
+ ```python
44
+ from pyannote.audio import Inference
45
+ inference = Inference(model, window="whole")
46
+ embedding1 = inference("speaker1.wav")
47
+ embedding2 = inference("speaker2.wav")
48
+ # `embeddingX` is (1 x D) numpy array extracted from the file as a whole.
49
+
50
+ from scipy.spatial.distance import cdist
51
+ distance = cdist(embedding1, embedding2, metric="cosine")[0,0]
52
+ # `distance` is a `float` describing how dissimilar speakers 1 and 2 are.
53
+ ```
54
+
55
+ Using cosine distance directly, this model reaches 2.8% equal error rate (EER) on VoxCeleb 1 test set.
56
+ This is without voice activity detection (VAD) nor probabilistic linear discriminant analysis (PLDA).
57
+ Expect even better results when adding one of those.
58
+
59
+ ## Advanced usage
60
+
61
+ ### Running on GPU
62
+
63
+ ```python
64
+ inference = Inference(model, window="whole", device="cuda")
65
+ embedding = inference("audio.wav")
66
+ ```
67
+
68
+ ### Extract embedding from an excerpt
69
+
70
+ ```python
71
+ from pyannote.audio import Inference, Segment
72
+ inference = Inference(model, window="whole")
73
+ excerpt = Segment(13.37, 19.81)
74
+ embedding = inference.crop("audio.wav", excerpt)
75
+ # `embedding` is (1 x D) numpy array extracted from the file excerpt.
76
+ ```
77
+
78
+ ### Extract embeddings using a sliding window
79
+
80
+ ```python
81
+ from pyannote.audio import Inference
82
+ inference = Inference(model, window="sliding",
83
+ duration=3.0, step=1.0)
84
+ embeddings = inference("audio.wav")
85
+ # `embeddings` is a (N x D) pyannote.core.SlidingWindowFeature
86
+ # `embeddings[i]` is the embedding of the ith position of the
87
+ # sliding window, i.e. from [i * step, i * step + duration].
88
+ ```
89
+
90
+ ## Support
91
+
92
+ For commercial enquiries and scientific consulting, please contact [me](mailto:herve@niderb.fr).
93
+ For [technical questions](https://github.com/pyannote/pyannote-audio/discussions) and [bug reports](https://github.com/pyannote/pyannote-audio/issues), please check [pyannote.audio](https://github.com/pyannote/pyannote-audio) Github repository.
94
+
95
+
96
+ ## Citation
97
+
98
+ ```bibtex
99
+ @inproceedings{Bredin2020,
100
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
101
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
102
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
103
+ Address = {Barcelona, Spain},
104
+ Month = {May},
105
+ Year = {2020},
106
+ }
107
+ ```
108
+
109
+ ```bibtex
110
+ @inproceedings{Coria2020,
111
+ author="Coria, Juan M. and Bredin, Herv{\'e} and Ghannay, Sahar and Rosset, Sophie",
112
+ editor="Espinosa-Anke, Luis and Mart{\'i}n-Vide, Carlos and Spasi{\'{c}}, Irena",
113
+ title="{A Comparison of Metric Learning Loss Functions for End-To-End Speaker Verification}",
114
+ booktitle="Statistical Language and Speech Processing",
115
+ year="2020",
116
+ publisher="Springer International Publishing",
117
+ pages="137--148",
118
+ isbn="978-3-030-59430-5"
119
+ }
120
+ ```
121
+
embedding/develop/config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ protocol: VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ patience: 5
3
+ task:
4
+ _target_: pyannote.audio.tasks.SupervisedRepresentationLearningWithArcFace
5
+ min_duration: 2
6
+ duration: 5.0
7
+ num_classes_per_batch: 64
8
+ num_chunks_per_class: 4
9
+ margin: 10.0
10
+ scale: 50.0
11
+ num_workers: 20
12
+ pin_memory: false
13
+ model:
14
+ _target_: pyannote.audio.models.embedding.XVectorSincNet
15
+ optimizer:
16
+ _target_: torch.optim.Adam
17
+ lr: 0.001
18
+ betas:
19
+ - 0.9
20
+ - 0.999
21
+ eps: 1.0e-08
22
+ weight_decay: 0
23
+ amsgrad: false
24
+ trainer:
25
+ _target_: pytorch_lightning.Trainer
26
+ accelerator: null
27
+ accumulate_grad_batches: 1
28
+ amp_backend: native
29
+ amp_level: O2
30
+ auto_lr_find: false
31
+ auto_scale_batch_size: false
32
+ auto_select_gpus: true
33
+ benchmark: false
34
+ check_val_every_n_epoch: 1
35
+ checkpoint_callback: true
36
+ deterministic: false
37
+ fast_dev_run: false
38
+ flush_logs_every_n_steps: 100
39
+ gpus: 1
40
+ gradient_clip_val: 0
41
+ limit_test_batches: 1.0
42
+ limit_train_batches: 1.0
43
+ limit_val_batches: 1.0
44
+ log_every_n_steps: 50
45
+ log_gpu_memory: null
46
+ max_epochs: 1000
47
+ max_steps: null
48
+ min_epochs: 1
49
+ min_steps: null
50
+ num_nodes: 1
51
+ num_processes: 1
52
+ num_sanity_val_steps: 2
53
+ overfit_batches: 0.0
54
+ precision: 32
55
+ prepare_data_per_node: true
56
+ process_position: 0
57
+ profiler: null
58
+ progress_bar_refresh_rate: 1
59
+ reload_dataloaders_every_epoch: false
60
+ replace_sampler_ddp: true
61
+ sync_batchnorm: false
62
+ terminate_on_nan: false
63
+ tpu_cores: null
64
+ track_grad_norm: -1
65
+ truncated_bptt_steps: null
66
+ val_check_interval: 1.0
67
+ weights_save_path: null
68
+ weights_summary: top
69
+ augmentation:
70
+ transform: Compose
71
+ params:
72
+ shuffle: false
73
+ transforms:
74
+ - transform: AddBackgroundNoise
75
+ params:
76
+ background_paths: /gpfswork/rech/eie/commun/data/background/musan
77
+ min_snr_in_db: 5.0
78
+ max_snr_in_db: 15.0
79
+ mode: per_example
80
+ p: 0.9
81
+ - transform: ApplyImpulseResponse
82
+ params:
83
+ ir_paths: /gpfswork/rech/eie/commun/data/rir
84
+ mode: per_example
85
+ p: 0.5
embedding/develop/hparams.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ num_channels: 1
3
+ sincnet:
4
+ stride: 10
5
+ sample_rate: 16000
6
+ dimension: 512
embedding/develop/hydra.yaml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${protocol}/${task._target_}/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/${protocol}/${task._target_}
6
+ subdir: ${hydra.job.num}
7
+ hydra_logging:
8
+ version: 1
9
+ formatters:
10
+ simple:
11
+ format: '[%(asctime)s][HYDRA] %(message)s'
12
+ handlers:
13
+ console:
14
+ class: logging.StreamHandler
15
+ formatter: simple
16
+ stream: ext://sys.stdout
17
+ root:
18
+ level: INFO
19
+ handlers:
20
+ - console
21
+ loggers:
22
+ logging_example:
23
+ level: DEBUG
24
+ disable_existing_loggers: false
25
+ job_logging:
26
+ version: 1
27
+ formatters:
28
+ simple:
29
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
30
+ handlers:
31
+ console:
32
+ class: logging.StreamHandler
33
+ formatter: simple
34
+ stream: ext://sys.stdout
35
+ file:
36
+ class: logging.FileHandler
37
+ formatter: simple
38
+ filename: ${hydra.job.name}.log
39
+ root:
40
+ level: INFO
41
+ handlers:
42
+ - console
43
+ - file
44
+ disable_existing_loggers: false
45
+ sweeper:
46
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
47
+ max_batch_size: null
48
+ launcher:
49
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
50
+ help:
51
+ app_name: pyannote-audio-train
52
+ header: == ${hydra.help.app_name} ==
53
+ footer: 'Powered by Hydra (https://hydra.cc)
54
+
55
+ Use --hydra-help to view Hydra specific help'
56
+ template: "${hydra.help.header}\n\npyannote-audio-train protocol={protocol_name}\
57
+ \ task={task} model={model}\n\n{task} can be any of the following:\n* vad (default)\
58
+ \ = voice activity detection\n* scd = speaker change detection\n* osd = overlapped\
59
+ \ speech detection\n* xseg = multi-task segmentation\n\n{model} can be any of\
60
+ \ the following:\n* debug (default) = simple segmentation model for debugging\
61
+ \ purposes\n\n{optimizer} can be any of the following\n* adam (default) = Adam\
62
+ \ optimizer\n\n{trainer} can be any of the following\n* fast_dev_run for debugging\n\
63
+ * default (default) for training the model\n\nOptions\n=======\n\nHere, we describe\
64
+ \ the most common options: use \"--cfg job\" option to get a complete list.\n\
65
+ \n* task.duration: audio chunk duration (in seconds)\n* task.batch_size: number\
66
+ \ of audio chunks per batch\n* task.num_workers: number of workers used for\
67
+ \ generating training chunks\n\n* optimizer.lr: learning rate\n* trainer.auto_lr_find:\
68
+ \ use pytorch-lightning AutoLR\n\nHyper-parameter optimization\n============================\n\
69
+ \nBecause it is powered by Hydra (https://hydra.cc), one can run grid search\
70
+ \ using the --multirun option.\n\nFor instance, the following command will run\
71
+ \ the same job three times, with three different learning rates:\n pyannote-audio-train\
72
+ \ --multirun protocol={protocol_name} task={task} optimizer.lr=1e-3,1e-2,1e-1\n\
73
+ \nEven better, one can use Ax (https://ax.dev) sweeper to optimize learning\
74
+ \ rate directly:\n pyannote-audio-train --multirun hydra/sweeper=ax protocol={protocol_name}\
75
+ \ task={task} optimizer.lr=\"interval(1e-3, 1e-1)\"\n\nSee https://hydra.cc/docs/plugins/ax_sweeper\
76
+ \ for more details.\n\nUser-defined task or model\n==========================\n\
77
+ \n1. define your_package.YourTask (or your_package.YourModel) class\n2. create\
78
+ \ file /path/to/your_config/task/your_task.yaml (or /path/to/your_config/model/your_model.yaml)\n\
79
+ \ # @package _group_\n _target_: your_package.YourTask # or YourModel\n\
80
+ \ param1: value1\n param2: value2\n3. call pyannote-audio-train --config-dir\
81
+ \ /path/to/your_config task=your_task task.param1=modified_value1 model=your_model\
82
+ \ ...\n\n${hydra.help.footer}"
83
+ hydra_help:
84
+ hydra_help: ???
85
+ template: 'Hydra (${hydra.runtime.version})
86
+
87
+ See https://hydra.cc for more info.
88
+
89
+
90
+ == Flags ==
91
+
92
+ $FLAGS_HELP
93
+
94
+
95
+ == Configuration groups ==
96
+
97
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
98
+ to command line)
99
+
100
+
101
+ $HYDRA_CONFIG_GROUPS
102
+
103
+
104
+ Use ''--cfg hydra'' to Show the Hydra config.
105
+
106
+ '
107
+ output_subdir: ''
108
+ overrides:
109
+ hydra: []
110
+ task:
111
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
112
+ - task=SpeakerEmbedding
113
+ - task.num_workers=20
114
+ - task.min_duration=2
115
+ - task.duration=5.
116
+ - task.num_classes_per_batch=64
117
+ - task.num_chunks_per_class=4
118
+ - task.margin=10.0
119
+ - task.scale=50.
120
+ - model=XVectorSincNet
121
+ - trainer.gpus=1
122
+ - +augmentation=background_then_reverb
123
+ job:
124
+ name: train
125
+ override_dirname: +augmentation=background_then_reverb,model=XVectorSincNet,protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X,task.duration=5.,task.margin=10.0,task.min_duration=2,task.num_chunks_per_class=4,task.num_classes_per_batch=64,task.num_workers=20,task.scale=50.,task=SpeakerEmbedding,trainer.gpus=1
126
+ id: ???
127
+ num: ???
128
+ config_name: config
129
+ env_set: {}
130
+ env_copy: []
131
+ config:
132
+ override_dirname:
133
+ kv_sep: '='
134
+ item_sep: ','
135
+ exclude_keys: []
136
+ runtime:
137
+ version: 1.0.4
138
+ cwd: /gpfsdswork/projects/rech/eie/uno46kl/xvectors/debug
139
+ verbose: false
embedding/develop/overrides.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - protocol=VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ - task=SpeakerEmbedding
3
+ - task.num_workers=20
4
+ - task.min_duration=2
5
+ - task.duration=5.
6
+ - task.num_classes_per_batch=64
7
+ - task.num_chunks_per_class=4
8
+ - task.margin=10.0
9
+ - task.scale=50.
10
+ - model=XVectorSincNet
11
+ - trainer.gpus=1
12
+ - +augmentation=background_then_reverb
embedding/develop/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcec986de13da7af7ac88736572692359950df63669989c4f78b294934c9089
3
+ size 96383626
embedding/develop/tfevents.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3319218e36d416c5400ffbc592acc2e1ab520a187d586be86db7eef30fb65616
3
+ size 5669685
embedding/develop/train.log ADDED
File without changes
embedding/main/.gitattributes ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
embedding/main/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 CNRS
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
embedding/main/README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pyannote
4
+ - pyannote-audio
5
+ - pyannote-audio-model
6
+ - audio
7
+ - voice
8
+ - speech
9
+ - speaker
10
+ - speaker-recognition
11
+ - speaker-verification
12
+ - speaker-identification
13
+ - speaker-embedding
14
+ datasets:
15
+ - voxceleb
16
+ license: mit
17
+ inference: false
18
+ extra_gated_prompt: "The collected information will help acquire a better knowledge of pyannote.audio userbase and help its maintainers apply for grants to improve it further. If you are an academic researcher, please cite the relevant papers in your own publications using the model. If you work for a company, please consider contributing back to pyannote.audio development (e.g. through unrestricted gifts). We also provide scientific consulting services around speaker diarization and machine listening."
19
+ extra_gated_fields:
20
+ Company/university: text
21
+ Website: text
22
+ I plan to use this model for (task, type of audio data, etc): text
23
+ ---
24
+
25
+ Using this open-source model in production?
26
+ Consider switching to [pyannoteAI](https://www.pyannote.ai) for better and faster options.
27
+
28
+ # 🎹 Speaker embedding
29
+
30
+ Relies on pyannote.audio 2.1: see [installation instructions](https://github.com/pyannote/pyannote-audio/).
31
+
32
+ This model is based on the [canonical x-vector TDNN-based architecture](https://ieeexplore.ieee.org/abstract/document/8461375), but with filter banks replaced with [trainable SincNet features](https://ieeexplore.ieee.org/document/8639585). See [`XVectorSincNet`](https://github.com/pyannote/pyannote-audio/blob/3c988c028dc505c64fe776720372f6fe816b585a/pyannote/audio/models/embedding/xvector.py#L104-L169) architecture for implementation details.
33
+
34
+
35
+ ## Basic usage
36
+
37
+ ```python
38
+ # 1. visit hf.co/pyannote/embedding and accept user conditions
39
+ # 2. visit hf.co/settings/tokens to create an access token
40
+ # 3. instantiate pretrained model
41
+ from pyannote.audio import Model
42
+ model = Model.from_pretrained("pyannote/embedding",
43
+ use_auth_token="ACCESS_TOKEN_GOES_HERE")
44
+ ```
45
+
46
+ ```python
47
+ from pyannote.audio import Inference
48
+ inference = Inference(model, window="whole")
49
+ embedding1 = inference("speaker1.wav")
50
+ embedding2 = inference("speaker2.wav")
51
+ # `embeddingX` is (1 x D) numpy array extracted from the file as a whole.
52
+
53
+ from scipy.spatial.distance import cdist
54
+ distance = cdist(embedding1, embedding2, metric="cosine")[0,0]
55
+ # `distance` is a `float` describing how dissimilar speakers 1 and 2 are.
56
+ ```
57
+
58
+ Using cosine distance directly, this model reaches 2.8% equal error rate (EER) on VoxCeleb 1 test set.
59
+ This is without voice activity detection (VAD) nor probabilistic linear discriminant analysis (PLDA).
60
+ Expect even better results when adding one of those.
61
+
62
+ ## Advanced usage
63
+
64
+ ### Running on GPU
65
+
66
+ ```python
67
+ import torch
68
+ inference.to(torch.device("cuda"))
69
+ embedding = inference("audio.wav")
70
+ ```
71
+
72
+ ### Extract embedding from an excerpt
73
+
74
+ ```python
75
+ from pyannote.audio import Inference
76
+ from pyannote.core import Segment
77
+ inference = Inference(model, window="whole")
78
+ excerpt = Segment(13.37, 19.81)
79
+ embedding = inference.crop("audio.wav", excerpt)
80
+ # `embedding` is (1 x D) numpy array extracted from the file excerpt.
81
+ ```
82
+
83
+ ### Extract embeddings using a sliding window
84
+
85
+ ```python
86
+ from pyannote.audio import Inference
87
+ inference = Inference(model, window="sliding",
88
+ duration=3.0, step=1.0)
89
+ embeddings = inference("audio.wav")
90
+ # `embeddings` is a (N x D) pyannote.core.SlidingWindowFeature
91
+ # `embeddings[i]` is the embedding of the ith position of the
92
+ # sliding window, i.e. from [i * step, i * step + duration].
93
+ ```
94
+
95
+
96
+ ## Citation
97
+
98
+ ```bibtex
99
+ @inproceedings{Bredin2020,
100
+ Title = {{pyannote.audio: neural building blocks for speaker diarization}},
101
+ Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
102
+ Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
103
+ Address = {Barcelona, Spain},
104
+ Month = {May},
105
+ Year = {2020},
106
+ }
107
+ ```
108
+
109
+ ```bibtex
110
+ @inproceedings{Coria2020,
111
+ author="Coria, Juan M. and Bredin, Herv{\'e} and Ghannay, Sahar and Rosset, Sophie",
112
+ editor="Espinosa-Anke, Luis and Mart{\'i}n-Vide, Carlos and Spasi{\'{c}}, Irena",
113
+ title="{A Comparison of Metric Learning Loss Functions for End-To-End Speaker Verification}",
114
+ booktitle="Statistical Language and Speech Processing",
115
+ year="2020",
116
+ publisher="Springer International Publishing",
117
+ pages="137--148",
118
+ isbn="978-3-030-59430-5"
119
+ }
120
+ ```
121
+
embedding/main/config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ protocol: VoxCeleb.SpeakerVerification.VoxCeleb_X
2
+ patience: 5
3
+ task:
4
+ _target_: pyannote.audio.tasks.SupervisedRepresentationLearningWithArcFace
5
+ min_duration: 2
6
+ duration: 5.0
7
+ num_classes_per_batch: 64
8
+ num_chunks_per_class: 4
9
+ margin: 10.0
10
+ scale: 50.0
11
+ num_workers: 20
12
+ pin_memory: false
13
+ model:
14
+ _target_: pyannote.audio.models.embedding.XVectorSincNet
15
+ optimizer:
16
+ _target_: torch.optim.Adam
17
+ lr: 0.001
18
+ betas:
19
+ - 0.9
20
+ - 0.999
21
+ eps: 1.0e-08
22
+ weight_decay: 0
23
+ amsgrad: false
24
+ trainer:
25
+ _target_: pytorch_lightning.Trainer
26
+ accelerator: null
27
+ accumulate_grad_batches: 1
28
+ amp_backend: native
29
+ amp_level: O2
30
+ auto_lr_find: false
31
+ auto_scale_batch_size: false
32
+ auto_select_gpus: true
33
+ benchmark: false
34
+ check_val_every_n_epoch: 1
35
+ checkpoint_callback: true
36
+ deterministic: false
37
+ fast_dev_run: false
38
+ flush_logs_every_n_steps: 100
39
+ gpus: 1
40
+ gradient_clip_val: 0
41
+ limit_test_batches: 1.0
42
+ limit_train_batches: 1.0
43
+ limit_val_batches: 1.0
44
+ log_every_n_steps: 50
45
+ log_gpu_memory: null
46
+ max_epochs: 1000
47
+ max_steps: null
48
+ min_epochs: 1
49
+ min_steps: null
50
+ num_nodes: 1
51
+ num_processes: 1
52
+ num_sanity_val_steps: 2
53
+ overfit_batches: 0.0
54
+ precision: 32
55
+ prepare_data_per_node: true
56
+ process_position: 0
57
+ profiler: null
58
+ progress_bar_refresh_rate: 1
59
+ reload_dataloaders_every_epoch: false
60
+ replace_sampler_ddp: true
61
+ sync_batchnorm: false
62
+ terminate_on_nan: false
63
+ tpu_cores: null
64
+ track_grad_norm: -1
65
+ truncated_bptt_steps: null
66
+ val_check_interval: 1.0
67
+ weights_save_path: null
68
+ weights_summary: top
69
+ augmentation:
70
+ transform: Compose
71
+ params:
72
+ shuffle: false
73
+ transforms:
74
+ - transform: AddBackgroundNoise
75
+ params:
76
+ background_paths: /gpfswork/rech/eie/commun/data/background/musan
77
+ min_snr_in_db: 5.0
78
+ max_snr_in_db: 15.0
79
+ mode: per_example
80
+ p: 0.9
81
+ - transform: ApplyImpulseResponse
82
+ params:
83
+ ir_paths: /gpfswork/rech/eie/commun/data/rir
84
+ mode: per_example
85
+ p: 0.5
embedding/main/hparams.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ num_channels: 1
3
+ sincnet:
4
+ stride: 10
5
+ sample_rate: 16000
6
+ dimension: 512