niobures commited on
Commit
9bab9e7
·
verified ·
1 Parent(s): c336289

ECAPA-TDNN (models)

Browse files
.gitattributes CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  ECAPA-TDNN[[:space:]]based[[:space:]]online[[:space:]]discussion[[:space:]]activity-level[[:space:]]evaluation.pdf filter=lfs diff=lfs merge=lfs -text
37
  ECAPA-TDNN.[[:space:]]Emphasized[[:space:]]Channel[[:space:]]Attention,[[:space:]]Propagation[[:space:]]and[[:space:]]Aggregation[[:space:]]in[[:space:]]TDNN[[:space:]]Based[[:space:]]Speaker[[:space:]]Verification.pdf filter=lfs diff=lfs merge=lfs -text
38
  Validation[[:space:]]of[[:space:]]an[[:space:]]ECAPA-TDNN[[:space:]]system[[:space:]]for[[:space:]]Forensic[[:space:]]Automatic[[:space:]]Speaker[[:space:]]Recognition[[:space:]]under[[:space:]]case[[:space:]]work[[:space:]]conditions.pdf filter=lfs diff=lfs merge=lfs -text
 
 
36
  ECAPA-TDNN[[:space:]]based[[:space:]]online[[:space:]]discussion[[:space:]]activity-level[[:space:]]evaluation.pdf filter=lfs diff=lfs merge=lfs -text
37
  ECAPA-TDNN.[[:space:]]Emphasized[[:space:]]Channel[[:space:]]Attention,[[:space:]]Propagation[[:space:]]and[[:space:]]Aggregation[[:space:]]in[[:space:]]TDNN[[:space:]]Based[[:space:]]Speaker[[:space:]]Verification.pdf filter=lfs diff=lfs merge=lfs -text
38
  Validation[[:space:]]of[[:space:]]an[[:space:]]ECAPA-TDNN[[:space:]]system[[:space:]]for[[:space:]]Forensic[[:space:]]Automatic[[:space:]]Speaker[[:space:]]Recognition[[:space:]]under[[:space:]]case[[:space:]]work[[:space:]]conditions.pdf filter=lfs diff=lfs merge=lfs -text
39
+ models/spkrec-ecapa-voxceleb/example1.wav filter=lfs diff=lfs merge=lfs -text
models/Hyper-ECAPA-TDNN/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/Hyper-ECAPA-TDNN/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
models/Hyper-ECAPA-TDNN/model_0002.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d85932ea3300c4a4ea6d3e07950bab36bcd7b17a149cd2af0606363aa89efe2c
3
+ size 66659819
models/Hyper-ECAPA-TDNN/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/Zroslav/Hyper-ECAPA-TDNN
models/VPR_zhvoice_EcapaTdnn/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/VPR_zhvoice_EcapaTdnn/README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gpl-3.0
3
+ language:
4
+ - zh
5
+ pipeline_tag: audio-classification
6
+ ---
7
+ # Voiceprint Recognition model of zhvoice based on EcapaTdnn
8
+ This is a VPR model trained on [**zhvoice**](https://aistudio.baidu.com/aistudio/datasetdetail/133922) dataset using [**yeyupiaoling/VoiceprintRecognition-Pytorch**](https://github.com/yeyupiaoling/VoiceprintRecognition-Pytorch). I choosed MelSpectrogram as preprocessing method.
9
+
10
+ You can use it with my [**2DIPW/audio_dataset_vpr**](https://github.com/2DIPW/audio_dataset_vpr) project (modified from yyupiaoling/VoiceprintRecognition-Pytorch), or the origin project.
models/VPR_zhvoice_EcapaTdnn/config.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 数据集参数
2
+ dataset_conf:
3
+ # 训练的批量大小
4
+ batch_size: 64
5
+ # 说话人数量,即分类大小
6
+ num_speakers: 3242
7
+ # 读取数据的线程数量
8
+ num_workers: 4
9
+ # 过滤最短的音频长度
10
+ min_duration: 0.5
11
+ # 最长的音频长度,大于这个长度会裁剪掉
12
+ max_duration: 3
13
+ # 是否裁剪静音片段
14
+ do_vad: False
15
+ # 音频的采样率
16
+ sample_rate: 16000
17
+ # 是否对音频进行音量归一化
18
+ use_dB_normalization: True
19
+ # 对音频进行音量归一化的音量分贝值
20
+ target_dB: -20
21
+ # 训练数据的数据列表路径
22
+ train_list: 'dataset/train_list.txt'
23
+ # 测试数据的数据列表路径
24
+ test_list: 'dataset/test_list.txt'
25
+
26
+ # 数据预处理参数
27
+ preprocess_conf:
28
+ # 音频预处理方法,支持:MelSpectrogram、Spectrogram、MFCC
29
+ feature_method: 'MelSpectrogram'
30
+
31
+ # MelSpectrogram的参数,其他的预处理方法查看对应API设设置参数
32
+ feature_conf:
33
+ sample_rate: 16000
34
+ n_fft: 1024
35
+ hop_length: 320
36
+ win_length: 1024
37
+ f_min: 50.0
38
+ f_max: 14000.0
39
+ n_mels: 64
40
+
41
+ optimizer_conf:
42
+ # 优化方法,支持Adam、AdamW、SGD
43
+ optimizer: 'Adam'
44
+ # 初始学习率的大小
45
+ learning_rate: 0.001
46
+ weight_decay: 1e-6
47
+
48
+ model_conf:
49
+ # 所使用的池化层,支持ASP、SAP、TSP、TAP
50
+ pooling_type: 'ASP'
51
+
52
+ train_conf:
53
+ # 训练的轮数
54
+ max_epoch: 30
55
+ log_interval: 100
56
+
57
+ # 所使用的模型,支持EcapaTdnn、TDNN、Res2Net、ResNetSE
58
+ use_model: 'EcapaTdnn'
59
+ # 所使用的损失函数,支持AAMLoss、AMLoss、ARMLoss、CELoss
60
+ use_loss: 'AAMLoss'
models/VPR_zhvoice_EcapaTdnn/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c5b0f79add8d1ff11ee06ae58ce423e8fe2e099ebbf2e134713329040001e0
3
+ size 27193761
models/VPR_zhvoice_EcapaTdnn/model.state ADDED
@@ -0,0 +1 @@
 
 
1
+ {"last_epoch": 28, "eer": 0.019150287201911552, "version": "0.3.9"}
models/VPR_zhvoice_EcapaTdnn/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5aa197567fdddb8d1fcfc33cb8ccc27a698aab15bc873855050999badf402394
3
+ size 54235141
models/VPR_zhvoice_EcapaTdnn/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/2DIPW/VPR_zhvoice_EcapaTdnn
models/nemo-ecapa-tdnn/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/nemo-ecapa-tdnn/model_config.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sample_rate: 16000
2
+ train_ds:
3
+ manifest_filepath: /manifests/combined_fisher_swbd_voxceleb12_librispeech/train.json
4
+ sample_rate: 16000
5
+ labels: null
6
+ batch_size: 64
7
+ shuffle: true
8
+ time_length: 3
9
+ augmentor:
10
+ noise:
11
+ manifest_path: /manifests/noise/rir_noise_manifest.json
12
+ prob: 0.5
13
+ min_snr_db: 0
14
+ max_snr_db: 15
15
+ speed:
16
+ prob: 0.5
17
+ sr: 16000
18
+ resample_type: kaiser_fast
19
+ min_speed_rate: 0.95
20
+ max_speed_rate: 1.05
21
+ num_workers: 15
22
+ pin_memory: true
23
+ validation_ds:
24
+ manifest_filepath: /manifests/combined_fisher_swbd_voxceleb12_librispeech/dev.json
25
+ sample_rate: 16000
26
+ labels: null
27
+ batch_size: 64
28
+ shuffle: false
29
+ time_length: 3
30
+ num_workers: 15
31
+ pin_memory: true
32
+ preprocessor:
33
+ _target_: nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor
34
+ normalize: per_feature
35
+ window_size: 0.025
36
+ sample_rate: 16000
37
+ window_stride: 0.01
38
+ window: hann
39
+ features: 80
40
+ n_fft: 512
41
+ frame_splicing: 1
42
+ dither: 1.0e-05
43
+ stft_conv: false
44
+ spec_augment:
45
+ _target_: nemo.collections.asr.modules.SpectrogramAugmentation
46
+ freq_masks: 3
47
+ freq_width: 4
48
+ time_masks: 5
49
+ time_width: 0.03
50
+ encoder:
51
+ _target_: nemo.collections.asr.modules.ECAPAEncoder
52
+ feat_in: 80
53
+ filters:
54
+ - 1024
55
+ - 1024
56
+ - 1024
57
+ - 1024
58
+ - 3072
59
+ kernel_sizes:
60
+ - 5
61
+ - 3
62
+ - 3
63
+ - 3
64
+ - 1
65
+ dilations:
66
+ - 1
67
+ - 1
68
+ - 1
69
+ - 1
70
+ - 1
71
+ scale: 8
72
+ decoder:
73
+ _target_: nemo.collections.asr.modules.SpeakerDecoder
74
+ feat_in: 3072
75
+ num_classes: 16681
76
+ pool_mode: attention
77
+ emb_sizes: 192
78
+ angular: true
79
+ loss:
80
+ scale: 30
81
+ margin: 0.2
82
+ optim:
83
+ name: sgd
84
+ lr: 0.08
85
+ weight_decay: 0.0002
86
+ sched:
87
+ name: CosineAnnealing
88
+ warmup_ratio: 0.1
89
+ min_lr: 0.0001
90
+ momentum: 0.9
91
+ target: nemo.collections.asr.models.label_models.EncDecSpeakerLabelModel
models/nemo-ecapa-tdnn/model_weights.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b2146bb8a8aa2b92855e82aeb3ae4a07d4ba7d470647c9650c9fc1b465f6d1e
3
+ size 96765579
models/nemo-ecapa-tdnn/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/huseinzol05/nemo-ecapa-tdnn
models/spkrec-ecapa-voxceleb/.gitattributes ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
17
+ classifier.ckpt filter=lfs diff=lfs merge=lfs -text
18
+ embedding_model.ckpt filter=lfs diff=lfs merge=lfs -text
19
+ mean_var_norm_emb.ckpt filter=lfs diff=lfs merge=lfs -text
models/spkrec-ecapa-voxceleb/README.md ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: "en"
3
+ thumbnail:
4
+ tags:
5
+ - speechbrain
6
+ - embeddings
7
+ - Speaker
8
+ - Verification
9
+ - Identification
10
+ - pytorch
11
+ - ECAPA
12
+ - TDNN
13
+ license: "apache-2.0"
14
+ datasets:
15
+ - voxceleb
16
+ metrics:
17
+ - EER
18
+ widget:
19
+ - example_title: VoxCeleb Speaker id10003
20
+ src: https://cdn-media.huggingface.co/speech_samples/VoxCeleb1_00003.wav
21
+ - example_title: VoxCeleb Speaker id10004
22
+ src: https://cdn-media.huggingface.co/speech_samples/VoxCeleb_00004.wav
23
+ ---
24
+
25
+ <iframe src="https://ghbtns.com/github-btn.html?user=speechbrain&repo=speechbrain&type=star&count=true&size=large&v=2" frameborder="0" scrolling="0" width="170" height="30" title="GitHub"></iframe>
26
+ <br/><br/>
27
+
28
+ # Speaker Verification with ECAPA-TDNN embeddings on Voxceleb
29
+
30
+ This repository provides all the necessary tools to perform speaker verification with a pretrained ECAPA-TDNN model using SpeechBrain.
31
+ The system can be used to extract speaker embeddings as well.
32
+ It is trained on Voxceleb 1+ Voxceleb2 training data.
33
+
34
+ For a better experience, we encourage you to learn more about
35
+ [SpeechBrain](https://speechbrain.github.io). The model performance on Voxceleb1-test set(Cleaned) is:
36
+
37
+ | Release | EER(%)
38
+ |:-------------:|:--------------:|
39
+ | 05-03-21 | 0.80 |
40
+
41
+
42
+ ## Pipeline description
43
+
44
+ This system is composed of an ECAPA-TDNN model. It is a combination of convolutional and residual blocks. The embeddings are extracted using attentive statistical pooling. The system is trained with Additive Margin Softmax Loss. Speaker Verification is performed using cosine distance between speaker embeddings.
45
+
46
+ ## Install SpeechBrain
47
+
48
+ First of all, please install SpeechBrain with the following command:
49
+
50
+ ```
51
+ pip install git+https://github.com/speechbrain/speechbrain.git@develop
52
+ ```
53
+
54
+ Please notice that we encourage you to read our tutorials and learn more about
55
+ [SpeechBrain](https://speechbrain.github.io).
56
+
57
+ ### Compute your speaker embeddings
58
+
59
+ ```python
60
+ import torchaudio
61
+ from speechbrain.inference.speaker import EncoderClassifier
62
+ classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
63
+ signal, fs =torchaudio.load('tests/samples/ASR/spk1_snt1.wav')
64
+ embeddings = classifier.encode_batch(signal)
65
+ ```
66
+ The system is trained with recordings sampled at 16kHz (single channel).
67
+ The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *classify_file* if needed. Make sure your input tensor is compliant with the expected sampling rate if you use *encode_batch* and *classify_batch*.
68
+
69
+ ### Perform Speaker Verification
70
+
71
+ ```python
72
+ from speechbrain.inference.speaker import SpeakerRecognition
73
+ verification = SpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="pretrained_models/spkrec-ecapa-voxceleb")
74
+ score, prediction = verification.verify_files("tests/samples/ASR/spk1_snt1.wav", "tests/samples/ASR/spk2_snt1.wav") # Different Speakers
75
+ score, prediction = verification.verify_files("tests/samples/ASR/spk1_snt1.wav", "tests/samples/ASR/spk1_snt2.wav") # Same Speaker
76
+ ```
77
+ The prediction is 1 if the two signals in input are from the same speaker and 0 otherwise.
78
+
79
+ ### Inference on GPU
80
+ To perform inference on the GPU, add `run_opts={"device":"cuda"}` when calling the `from_hparams` method.
81
+
82
+ ### Training
83
+ The model was trained with SpeechBrain (aa018540).
84
+ To train it from scratch follows these steps:
85
+ 1. Clone SpeechBrain:
86
+ ```bash
87
+ git clone https://github.com/speechbrain/speechbrain/
88
+ ```
89
+ 2. Install it:
90
+ ```
91
+ cd speechbrain
92
+ pip install -r requirements.txt
93
+ pip install -e .
94
+ ```
95
+
96
+ 3. Run Training:
97
+ ```
98
+ cd recipes/VoxCeleb/SpeakerRec
99
+ python train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml --data_folder=your_data_folder
100
+ ```
101
+
102
+ You can find our training results (models, logs, etc) [here](https://drive.google.com/drive/folders/1-ahC1xeyPinAHp2oAohL-02smNWO41Cc?usp=sharing).
103
+
104
+ ### Limitations
105
+ The SpeechBrain team does not provide any warranty on the performance achieved by this model when used on other datasets.
106
+
107
+ #### Referencing ECAPA-TDNN
108
+ ```
109
+ @inproceedings{DBLP:conf/interspeech/DesplanquesTD20,
110
+ author = {Brecht Desplanques and
111
+ Jenthe Thienpondt and
112
+ Kris Demuynck},
113
+ editor = {Helen Meng and
114
+ Bo Xu and
115
+ Thomas Fang Zheng},
116
+ title = {{ECAPA-TDNN:} Emphasized Channel Attention, Propagation and Aggregation
117
+ in {TDNN} Based Speaker Verification},
118
+ booktitle = {Interspeech 2020},
119
+ pages = {3830--3834},
120
+ publisher = {{ISCA}},
121
+ year = {2020},
122
+ }
123
+ ```
124
+
125
+ # **Citing SpeechBrain**
126
+ Please, cite SpeechBrain if you use it for your research or business.
127
+
128
+ ```bibtex
129
+ @misc{speechbrain,
130
+ title={{SpeechBrain}: A General-Purpose Speech Toolkit},
131
+ author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},
132
+ year={2021},
133
+ eprint={2106.04624},
134
+ archivePrefix={arXiv},
135
+ primaryClass={eess.AS},
136
+ note={arXiv:2106.04624}
137
+ }
138
+ ```
139
+
140
+ # **About SpeechBrain**
141
+ - Website: https://speechbrain.github.io/
142
+ - Code: https://github.com/speechbrain/speechbrain/
143
+ - HuggingFace: https://huggingface.co/speechbrain/
models/spkrec-ecapa-voxceleb/classifier.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd9e3634fe68bd0a427c95e354c0c677374f62b3f434e45b78599950d860d535
3
+ size 5534328
models/spkrec-ecapa-voxceleb/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "speechbrain_interface": "SpeakerRecognition"
3
+ }
models/spkrec-ecapa-voxceleb/embedding_model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0575cb64845e6b9a10db9bcb74d5ac32b326b8dc90352671d345e2ee3d0126a2
3
+ size 83316686
models/spkrec-ecapa-voxceleb/example1.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf2dde5cb516939ff619d62fc07d4f4bec5b5d521aee3d07ae51828c9d93be0b
3
+ size 104390
models/spkrec-ecapa-voxceleb/example2.flac ADDED
Binary file (39.6 kB). View file
 
models/spkrec-ecapa-voxceleb/hyperparams.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ############################################################################
2
+ # Model: ECAPA big for Speaker verification
3
+ # ############################################################################
4
+
5
+ # Feature parameters
6
+ n_mels: 80
7
+
8
+ # Pretrain folder (HuggingFace)
9
+ pretrained_path: speechbrain/spkrec-ecapa-voxceleb
10
+
11
+ # Output parameters
12
+ out_n_neurons: 7205
13
+
14
+ # Model params
15
+ compute_features: !new:speechbrain.lobes.features.Fbank
16
+ n_mels: !ref <n_mels>
17
+
18
+ mean_var_norm: !new:speechbrain.processing.features.InputNormalization
19
+ norm_type: sentence
20
+ std_norm: False
21
+
22
+ embedding_model: !new:speechbrain.lobes.models.ECAPA_TDNN.ECAPA_TDNN
23
+ input_size: !ref <n_mels>
24
+ channels: [1024, 1024, 1024, 1024, 3072]
25
+ kernel_sizes: [5, 3, 3, 3, 1]
26
+ dilations: [1, 2, 3, 4, 1]
27
+ attention_channels: 128
28
+ lin_neurons: 192
29
+
30
+ classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier
31
+ input_size: 192
32
+ out_neurons: !ref <out_n_neurons>
33
+
34
+ mean_var_norm_emb: !new:speechbrain.processing.features.InputNormalization
35
+ norm_type: global
36
+ std_norm: False
37
+
38
+ modules:
39
+ compute_features: !ref <compute_features>
40
+ mean_var_norm: !ref <mean_var_norm>
41
+ embedding_model: !ref <embedding_model>
42
+ mean_var_norm_emb: !ref <mean_var_norm_emb>
43
+ classifier: !ref <classifier>
44
+
45
+ label_encoder: !new:speechbrain.dataio.encoder.CategoricalEncoder
46
+
47
+
48
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
49
+ loadables:
50
+ embedding_model: !ref <embedding_model>
51
+ mean_var_norm_emb: !ref <mean_var_norm_emb>
52
+ classifier: !ref <classifier>
53
+ label_encoder: !ref <label_encoder>
54
+ paths:
55
+ embedding_model: !ref <pretrained_path>/embedding_model.ckpt
56
+ mean_var_norm_emb: !ref <pretrained_path>/mean_var_norm_emb.ckpt
57
+ classifier: !ref <pretrained_path>/classifier.ckpt
58
+ label_encoder: !ref <pretrained_path>/label_encoder.txt
models/spkrec-ecapa-voxceleb/label_encoder.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/spkrec-ecapa-voxceleb/mean_var_norm_emb.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd70225b05b37be64fc5a95e24395d804231d43f74b2e1e5a513db7b69b34c33
3
+ size 1921
models/spkrec-ecapa-voxceleb/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb