Initial commit
Browse files- README.md +95 -0
- build_config_ecapa-tdnn.json +51 -0
- build_config_res2netv2.json +50 -0
- ecapa-tdnn.onnx +3 -0
- res2netv2.onnx +3 -0
- run_axmodel_ecapa_tdnn.py +98 -0
- run_axmodel_res2netv2.py +97 -0
- run_onnx_ecapa_tdnn.py +100 -0
- run_onnx_res2netv2.py +99 -0
README.md
CHANGED
|
@@ -1,3 +1,98 @@
|
|
| 1 |
---
|
| 2 |
license: mit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
base_model:
|
| 6 |
+
- res2netv2
|
| 7 |
+
- ecapa-tdnn
|
| 8 |
+
pipeline_tag: automatic-speech-recognition,
|
| 9 |
+
tags:
|
| 10 |
+
- Audio
|
| 11 |
+
- 3d-speaker
|
| 12 |
---
|
| 13 |
+
|
| 14 |
+
# 3D-Speaker
|
| 15 |
+
|
| 16 |
+
This version of 3D-Speaker has been converted to run on the Axera NPU using **w8a16** quantization.
|
| 17 |
+
|
| 18 |
+
This model has been optimized with the following LoRA:
|
| 19 |
+
|
| 20 |
+
Compatible with Pulsar2 version: 4.1-patch1
|
| 21 |
+
|
| 22 |
+
## Convert tools links:
|
| 23 |
+
|
| 24 |
+
For those who are interested in model conversion, you can try to export axmodel through
|
| 25 |
+
|
| 26 |
+
- [The repo of AXera Platform](https://github.com/AXERA-TECH/ax-samples), which you can get the detail of guide
|
| 27 |
+
|
| 28 |
+
- [Pulsar2 Link, How to Convert ONNX to axmodel](https://pulsar2-docs.readthedocs.io/en/latest/pulsar2/introduction.html)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## Support Platform
|
| 32 |
+
|
| 33 |
+
- AX650
|
| 34 |
+
- [M4N-Dock(爱芯派Pro)](https://wiki.sipeed.com/hardware/zh/maixIV/m4ndock/m4ndock.html)
|
| 35 |
+
- [M.2 Accelerator card](https://axcl-docs.readthedocs.io/zh-cn/latest/doc_guide_hardware.html)
|
| 36 |
+
- AX630C
|
| 37 |
+
- [爱芯派2](https://axera-pi-2-docs-cn.readthedocs.io/zh-cn/latest/index.html)
|
| 38 |
+
- [Module-LLM](https://docs.m5stack.com/zh_CN/module/Module-LLM)
|
| 39 |
+
- [LLM630 Compute Kit](https://docs.m5stack.com/zh_CN/core/LLM630%20Compute%20Kit)
|
| 40 |
+
|
| 41 |
+
|Chips|model|cost|
|
| 42 |
+
|--|--|--|
|
| 43 |
+
|AX650|ERes2NetV2|5.09ms|
|
| 44 |
+
||Ecapa-tdnn|7.37ms|
|
| 45 |
+
|
| 46 |
+
## How to use
|
| 47 |
+
|
| 48 |
+
Download all files from this repository to the device
|
| 49 |
+
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
root@ax650:~/3D-Speaker# tree
|
| 53 |
+
.
|
| 54 |
+
|-- ax650
|
| 55 |
+
| `-- res2netv2.axmodel
|
| 56 |
+
| `-- ecapa-tdnn.axmodel
|
| 57 |
+
|-- wavs
|
| 58 |
+
| `-- speaker1_a_cn_16k.wav
|
| 59 |
+
| `-- speaker1_b_cn_16k.wav
|
| 60 |
+
| `-- speaker2_a_cn_16k.wav
|
| 61 |
+
|-- run_onnx_res2netv2.py
|
| 62 |
+
|-- run_axmodel_res2netv2.py
|
| 63 |
+
|-- run_onnx_ecapa_tdnn.py
|
| 64 |
+
|-- run_axmodel_ecapa_tdnn.py
|
| 65 |
+
|-- res2netv2.onnx
|
| 66 |
+
|-- ecapa-tdnn.onnx
|
| 67 |
+
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
### Inference
|
| 71 |
+
|
| 72 |
+
Input Wavs:
|
| 73 |
+
|-- wavs
|
| 74 |
+
| `-- speaker1_a_cn_16k.wav
|
| 75 |
+
| `-- speaker1_b_cn_16k.wav
|
| 76 |
+
| `-- speaker2_a_cn_16k.wav
|
| 77 |
+
|
| 78 |
+
#### Inference with AX650 Host, such as M4N-Dock(爱芯派Pro)
|
| 79 |
+
|
| 80 |
+
```
|
| 81 |
+
root@ax650 ~/3d_speaker # python3 run_axmodel_ecapa_tdnn.py --wavs ./speaker1_a_cn_16k.wav ./speaker2_a_cn_16k.wav
|
| 82 |
+
[INFO] Available providers: ['AxEngineExecutionProvider']
|
| 83 |
+
[INFO] Using provider: AxEngineExecutionProvider
|
| 84 |
+
[INFO] Chip type: ChipType.MC50
|
| 85 |
+
[INFO] VNPU type: VNPUType.DISABLED
|
| 86 |
+
[INFO] Engine version: 2.12.0s
|
| 87 |
+
[INFO] Model type: 2 (triple core)
|
| 88 |
+
[INFO] Compiler version: 4.1-patch1-dirty 6247f37c-dirty
|
| 89 |
+
[INFO] Using provider: AxEngineExecutionProvider
|
| 90 |
+
[INFO] Model type: 2 (triple core)
|
| 91 |
+
[INFO] Compiler version: 4.1-patch1-dirty 6247f37c-dirty
|
| 92 |
+
[INFO]: Computing the similarity score...
|
| 93 |
+
[INFO]: The similarity score between two input wavs is 0.7166
|
| 94 |
+
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
Output:
|
| 98 |
+
[INFO]: The similarity score between two input wavs is 0.7166
|
build_config_ecapa-tdnn.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"input": "./ecapa-tdnn.onnx",
|
| 3 |
+
"output_dir": "./output",
|
| 4 |
+
"output_name": "ecapa-tdnn.axmodel",
|
| 5 |
+
"work_dir": "",
|
| 6 |
+
"model_type": "ONNX",
|
| 7 |
+
"target_hardware": "AX650",
|
| 8 |
+
"npu_mode": "NPU3",
|
| 9 |
+
"onnx_opt": {
|
| 10 |
+
"disable_onnx_optimization": false,
|
| 11 |
+
"model_check": false,
|
| 12 |
+
},
|
| 13 |
+
"quant": {
|
| 14 |
+
"input_configs": [
|
| 15 |
+
{
|
| 16 |
+
"tensor_name": "DEFAULT",
|
| 17 |
+
"calibration_dataset": "./npy.zip",
|
| 18 |
+
"calibration_format": "Numpy",
|
| 19 |
+
"calibration_size": 10,
|
| 20 |
+
"calibration_mean": [0],
|
| 21 |
+
"calibration_std": [1]
|
| 22 |
+
}
|
| 23 |
+
],
|
| 24 |
+
"calibration_method": "MinMax",
|
| 25 |
+
"precision_analysis": true,
|
| 26 |
+
"precision_analysis_method": "EndToEnd",
|
| 27 |
+
"precision_analysis_mode": "Reference",
|
| 28 |
+
"layer_configs":[
|
| 29 |
+
{
|
| 30 |
+
"start_tensor_names": ["DEFAULT"],
|
| 31 |
+
"end_tensor_names": ["DEFAULT"],
|
| 32 |
+
"data_type": "U16",
|
| 33 |
+
}
|
| 34 |
+
],
|
| 35 |
+
},
|
| 36 |
+
"input_processors": [
|
| 37 |
+
{
|
| 38 |
+
"tensor_name": "DEFAULT",
|
| 39 |
+
"src_dtype": "FP32",
|
| 40 |
+
}
|
| 41 |
+
],
|
| 42 |
+
"output_processors": [
|
| 43 |
+
{
|
| 44 |
+
"tensor_name": "DEFAULT"
|
| 45 |
+
}
|
| 46 |
+
],
|
| 47 |
+
"compiler": {
|
| 48 |
+
"check": 0
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
build_config_res2netv2.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"input": "./res2netv2.onnx",
|
| 3 |
+
"output_dir": "./output",
|
| 4 |
+
"output_name": "res2netv2.axmodel",
|
| 5 |
+
"work_dir": "",
|
| 6 |
+
"model_type": "ONNX",
|
| 7 |
+
"target_hardware": "AX650",
|
| 8 |
+
"npu_mode": "NPU3",
|
| 9 |
+
"onnx_opt": {
|
| 10 |
+
"disable_onnx_optimization": false,
|
| 11 |
+
"model_check": false,
|
| 12 |
+
},
|
| 13 |
+
"quant": {
|
| 14 |
+
"input_configs": [
|
| 15 |
+
{
|
| 16 |
+
"tensor_name": "DEFAULT",
|
| 17 |
+
"calibration_dataset": "./npy.zip",
|
| 18 |
+
"calibration_format": "Numpy",
|
| 19 |
+
"calibration_size": 10,
|
| 20 |
+
"calibration_mean": [0],
|
| 21 |
+
"calibration_std": [1]
|
| 22 |
+
}
|
| 23 |
+
],
|
| 24 |
+
"calibration_method": "MinMax",
|
| 25 |
+
"precision_analysis": true,
|
| 26 |
+
"precision_analysis_method": "EndToEnd",
|
| 27 |
+
"precision_analysis_mode": "Reference"
|
| 28 |
+
},
|
| 29 |
+
"input_processors": [
|
| 30 |
+
{
|
| 31 |
+
"tensor_name": "DEFAULT",
|
| 32 |
+
"tensor_format": "AutoColorSpace",
|
| 33 |
+
"tensor_layout": "NCHW",
|
| 34 |
+
"src_format": "AutoColorSpace",
|
| 35 |
+
"src_layout": "NHWC",
|
| 36 |
+
"src_dtype": "FP32",
|
| 37 |
+
"csc_mode": "FullRange",
|
| 38 |
+
"csc_mat": [1.164, 0, 1.596, -222.912, 1.164, -0.392, -0.813, 135.616, 1.164, 2.017, 0, -276.8]
|
| 39 |
+
}
|
| 40 |
+
],
|
| 41 |
+
"output_processors": [
|
| 42 |
+
{
|
| 43 |
+
"tensor_name": "DEFAULT"
|
| 44 |
+
}
|
| 45 |
+
],
|
| 46 |
+
"compiler": {
|
| 47 |
+
"check": 0
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
|
ecapa-tdnn.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38fdfc7d2bc9e2925349baab9639fcaf4b4c755be83ee7d616b6e3fbc9d5eab3
|
| 3 |
+
size 83303532
|
res2netv2.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e31f647abaf941d5b04f1e557b0194c24f4e74361bdd2dcc044743018f8f136
|
| 3 |
+
size 71438962
|
run_axmodel_ecapa_tdnn.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torchaudio
|
| 7 |
+
from processor import FBank
|
| 8 |
+
|
| 9 |
+
import axengine as axe
|
| 10 |
+
from axengine import axclrt_provider_name, axengine_provider_name
|
| 11 |
+
|
| 12 |
+
parser = argparse.ArgumentParser()
|
| 13 |
+
parser.add_argument("--model", type=str, default="ecapa-tdnn.axmodel", help="axmodel path")
|
| 14 |
+
parser.add_argument('--wavs', nargs='+', type=str, help='Wavs')
|
| 15 |
+
parser.add_argument("--samplerate", type=int, default=16000, help="Specify the audio sample rate in Hz (default is 16,000)")
|
| 16 |
+
parser.add_argument("--max_frames", type=int, default=360, help="the max audio frames")
|
| 17 |
+
|
| 18 |
+
def load_wav(wav_file, obj_fs=16000):
|
| 19 |
+
"""Read the audio from the specified source path. """
|
| 20 |
+
wav, fs = torchaudio.load(wav_file, format="wav")
|
| 21 |
+
if fs != obj_fs:
|
| 22 |
+
print(f'[WARNING]: The sample rate of {wav_file} is not {obj_fs}, resample it.')
|
| 23 |
+
wav, fs = torchaudio.sox_effects.apply_effects_tensor(
|
| 24 |
+
wav, fs, effects=[['rate', str(obj_fs)]]
|
| 25 |
+
)
|
| 26 |
+
if wav.shape[0] > 1:
|
| 27 |
+
wav = wav[0, :].unsqueeze(0)
|
| 28 |
+
return wav
|
| 29 |
+
|
| 30 |
+
def axmodel_inference(onnx_path, features, cuda=True):
|
| 31 |
+
"""Perform inference using an axmodel. """
|
| 32 |
+
def from_numpy(x):
|
| 33 |
+
return x if isinstance(x, np.ndarray) else np.array(x)
|
| 34 |
+
|
| 35 |
+
session = axe.InferenceSession(onnx_path, providers='AxEngineExecutionProvider')
|
| 36 |
+
output_names = [x.name for x in session.get_outputs()]
|
| 37 |
+
input_name = session.get_inputs()[0].name
|
| 38 |
+
|
| 39 |
+
# onnx inference
|
| 40 |
+
y = session.run(output_names, {input_name: features})
|
| 41 |
+
|
| 42 |
+
if isinstance(y, (list, tuple)):
|
| 43 |
+
y = from_numpy(y[0]) if len(y) == 1 else [from_numpy(x) for x in y]
|
| 44 |
+
else:
|
| 45 |
+
y = from_numpy(y)
|
| 46 |
+
return y
|
| 47 |
+
|
| 48 |
+
def compute_embedding(wav_file, model, frames=360, obj_fs=16000):
|
| 49 |
+
# load wav
|
| 50 |
+
wav = load_wav(wav_file, obj_fs)
|
| 51 |
+
|
| 52 |
+
# compute feat
|
| 53 |
+
feature_extractor = FBank(80, obj_fs, mean_nor=True)
|
| 54 |
+
feat = feature_extractor(wav).unsqueeze(0)
|
| 55 |
+
|
| 56 |
+
# Adjust the shape of feature to [1, 1, frames, 80]
|
| 57 |
+
shape = list(feat.shape)
|
| 58 |
+
if shape[1] >= frames:
|
| 59 |
+
feat = feat.narrow(1, 0, frames)
|
| 60 |
+
else:
|
| 61 |
+
shape[1] = frames
|
| 62 |
+
feat = feat.new_full(shape, fill_value=0)
|
| 63 |
+
|
| 64 |
+
#feat = feat.permute(1, 2, 0).cpu().numpy()
|
| 65 |
+
feat = np.ascontiguousarray(feat.cpu().numpy())
|
| 66 |
+
|
| 67 |
+
# compute embedding
|
| 68 |
+
embedding = axmodel_inference(model, feat).squeeze(0)
|
| 69 |
+
|
| 70 |
+
return embedding
|
| 71 |
+
|
| 72 |
+
def main():
|
| 73 |
+
args = parser.parse_args()
|
| 74 |
+
if args.wavs is None or len(args.wavs) == 2:
|
| 75 |
+
if args.wavs is None:
|
| 76 |
+
try:
|
| 77 |
+
# use example wavs
|
| 78 |
+
examples_dir = '../wav'
|
| 79 |
+
wav_path1, wav_path2 = list(glob.glob(os.path.join(examples_dir, '*.wav')))[0:2]
|
| 80 |
+
print(f'[INFO]: No wavs input, use example wavs instead.')
|
| 81 |
+
except:
|
| 82 |
+
assert Exception('Invalid input wav.')
|
| 83 |
+
else:
|
| 84 |
+
# use input wavs
|
| 85 |
+
wav_path1, wav_path2 = args.wavs
|
| 86 |
+
|
| 87 |
+
embedding1 = compute_embedding(wav_path1, args.model)
|
| 88 |
+
embedding2 = compute_embedding(wav_path2, args.model)
|
| 89 |
+
|
| 90 |
+
# compute similarity score
|
| 91 |
+
print('[INFO]: Computing the similarity score...')
|
| 92 |
+
similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
|
| 93 |
+
scores = similarity(torch.from_numpy(embedding1).unsqueeze(0), torch.from_numpy(embedding2).unsqueeze(0)).item()
|
| 94 |
+
print('[INFO]: The similarity score between two input wavs is %.4f' % scores)
|
| 95 |
+
|
| 96 |
+
if __name__ == '__main__':
|
| 97 |
+
main()
|
| 98 |
+
|
run_axmodel_res2netv2.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
imrpot glob
|
| 3 |
+
import argparse
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torchaudio
|
| 7 |
+
from processor import FBank
|
| 8 |
+
|
| 9 |
+
import axengine as axe
|
| 10 |
+
from axengine import axclrt_provider_name, axengine_provider_name
|
| 11 |
+
|
| 12 |
+
parser = argparse.ArgumentParser()
|
| 13 |
+
parser.add_argument("--model", type=str, default="res2netv2.axmodel", help="axmodel path")
|
| 14 |
+
parser.add_argument('--wavs', nargs='+', type=str, help='Wavs')
|
| 15 |
+
parser.add_argument("--samplerate", type=int, default=16000, help="Specify the audio sample rate in Hz (default is 16,000)")
|
| 16 |
+
parser.add_argument("--max_frames", type=int, default=360, help="the max audio frames")
|
| 17 |
+
|
| 18 |
+
def load_wav(wav_file, obj_fs=16000):
|
| 19 |
+
"""Read the audio from the specified source path. """
|
| 20 |
+
wav, fs = torchaudio.load(wav_file)
|
| 21 |
+
if fs != obj_fs:
|
| 22 |
+
print(f'[WARNING]: The sample rate of {wav_file} is not {obj_fs}, resample it.')
|
| 23 |
+
wav, fs = torchaudio.sox_effects.apply_effects_tensor(
|
| 24 |
+
wav, fs, effects=[['rate', str(obj_fs)]]
|
| 25 |
+
)
|
| 26 |
+
if wav.shape[0] > 1:
|
| 27 |
+
wav = wav[0, :].unsqueeze(0)
|
| 28 |
+
return wav
|
| 29 |
+
|
| 30 |
+
def axmodel_inference(onnx_path, features, cuda=True):
|
| 31 |
+
"""Perform inference using an axmodel. """
|
| 32 |
+
def from_numpy(x):
|
| 33 |
+
return x if isinstance(x, np.ndarray) else np.array(x)
|
| 34 |
+
|
| 35 |
+
session = axe.InferenceSession(onnx_path, providers='AxEngineExecutionProvider')
|
| 36 |
+
output_names = [x.name for x in session.get_outputs()]
|
| 37 |
+
input_name = session.get_inputs()[0].name
|
| 38 |
+
|
| 39 |
+
# onnx inference
|
| 40 |
+
y = session.run(output_names, {input_name: features})
|
| 41 |
+
|
| 42 |
+
if isinstance(y, (list, tuple)):
|
| 43 |
+
y = from_numpy(y[0]) if len(y) == 1 else [from_numpy(x) for x in y]
|
| 44 |
+
else:
|
| 45 |
+
y = from_numpy(y)
|
| 46 |
+
return y
|
| 47 |
+
|
| 48 |
+
def compute_embedding(wav_file, model, frames=360, obj_fs=16000):
|
| 49 |
+
# load wav
|
| 50 |
+
wav = load_wav(wav_file, obj_fs)
|
| 51 |
+
|
| 52 |
+
# compute feat
|
| 53 |
+
feature_extractor = FBank(80, obj_fs, mean_nor=True)
|
| 54 |
+
feat = feature_extractor(wav).unsqueeze(0)
|
| 55 |
+
|
| 56 |
+
# Adjust the shape of feature to [1, 1, frames, 80]
|
| 57 |
+
shape = list(feat.shape)
|
| 58 |
+
if shape[1] >= frames:
|
| 59 |
+
feat = feat.narrow(1, 0, frames)
|
| 60 |
+
else:
|
| 61 |
+
shape[1] = frames
|
| 62 |
+
feat = feat.new_full(shape, fill_value=0)
|
| 63 |
+
|
| 64 |
+
feat = feat.permute(1, 2, 0).unsqueeze(0).cpu().numpy()
|
| 65 |
+
|
| 66 |
+
# compute embedding
|
| 67 |
+
embedding = axmodel_inference(model, feat).squeeze(0)
|
| 68 |
+
|
| 69 |
+
return embedding
|
| 70 |
+
|
| 71 |
+
def main():
|
| 72 |
+
args = parser.parse_args()
|
| 73 |
+
if args.wavs is None or len(args.wavs) == 2:
|
| 74 |
+
if args.wavs is None:
|
| 75 |
+
try:
|
| 76 |
+
# use example wavs
|
| 77 |
+
examples_dir = '../wav'
|
| 78 |
+
wav_path1, wav_path2 = list(glob.glob(os.path.join(examples_dir, '*.wav')))[0:2]
|
| 79 |
+
print(f'[INFO]: No wavs input, use example wavs instead.')
|
| 80 |
+
except:
|
| 81 |
+
assert Exception('Invalid input wav.')
|
| 82 |
+
else:
|
| 83 |
+
# use input wavs
|
| 84 |
+
wav_path1, wav_path2 = args.wavs
|
| 85 |
+
|
| 86 |
+
embedding1 = compute_embedding(wav_path1, args.model)
|
| 87 |
+
embedding2 = compute_embedding(wav_path2, args.model)
|
| 88 |
+
|
| 89 |
+
# compute similarity score
|
| 90 |
+
print('[INFO]: Computing the similarity score...')
|
| 91 |
+
similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
|
| 92 |
+
scores = similarity(torch.from_numpy(embedding1).unsqueeze(0), torch.from_numpy(embedding2).unsqueeze(0)).item()
|
| 93 |
+
print('[INFO]: The similarity score between two input wavs is %.4f' % scores)
|
| 94 |
+
|
| 95 |
+
if __name__ == '__main__':
|
| 96 |
+
main()
|
| 97 |
+
|
run_onnx_ecapa_tdnn.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
import numpy as np
|
| 5 |
+
import onnxruntime as ort
|
| 6 |
+
import torch
|
| 7 |
+
import torchaudio
|
| 8 |
+
from processor import FBank
|
| 9 |
+
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument("--model", type=str, default="ecapa-tdnn.onnx", help="onnx model path")
|
| 12 |
+
parser.add_argument('--wavs', nargs='+', type=str, help='Wavs')
|
| 13 |
+
parser.add_argument("--samplerate", type=int, default=16000, help="Specify the audio sample rate in Hz (default is 16,000)")
|
| 14 |
+
parser.add_argument("--max_frames", type=int, default=360, help="the max audio frames")
|
| 15 |
+
|
| 16 |
+
def load_wav(wav_file, obj_fs=16000):
|
| 17 |
+
"""Read the audio from the specified source path. """
|
| 18 |
+
wav, fs = torchaudio.load(wav_file)
|
| 19 |
+
if fs != obj_fs:
|
| 20 |
+
print(f'[WARNING]: The sample rate of {wav_file} is not {obj_fs}, resample it.')
|
| 21 |
+
wav, fs = torchaudio.sox_effects.apply_effects_tensor(
|
| 22 |
+
wav, fs, effects=[['rate', str(obj_fs)]]
|
| 23 |
+
)
|
| 24 |
+
if wav.shape[0] > 1:
|
| 25 |
+
wav = wav[0, :].unsqueeze(0)
|
| 26 |
+
return wav
|
| 27 |
+
|
| 28 |
+
def onnx_inference(onnx_path, features, cuda=True):
|
| 29 |
+
"""Perform inference using an ONNX model. """
|
| 30 |
+
def from_numpy(x):
|
| 31 |
+
return x if isinstance(x, np.ndarray) else np.array(x)
|
| 32 |
+
|
| 33 |
+
providers = ['AzureExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
| 34 |
+
session = ort.InferenceSession(onnx_path, providers=providers)
|
| 35 |
+
output_names = [x.name for x in session.get_outputs()]
|
| 36 |
+
input_name = session.get_inputs()[0].name
|
| 37 |
+
|
| 38 |
+
# onnx inference
|
| 39 |
+
y = session.run(output_names, {input_name: features})
|
| 40 |
+
|
| 41 |
+
if isinstance(y, (list, tuple)):
|
| 42 |
+
y = from_numpy(y[0]) if len(y) == 1 else [from_numpy(x) for x in y]
|
| 43 |
+
else:
|
| 44 |
+
y = from_numpy(y)
|
| 45 |
+
return y
|
| 46 |
+
|
| 47 |
+
def compute_embedding(wav_file, model, frames=360, obj_fs=16000):
|
| 48 |
+
# load wav
|
| 49 |
+
wav = load_wav(wav_file, obj_fs)
|
| 50 |
+
|
| 51 |
+
# compute feat
|
| 52 |
+
feature_extractor = FBank(80, obj_fs, mean_nor=True)
|
| 53 |
+
feat = feature_extractor(wav).unsqueeze(0)
|
| 54 |
+
|
| 55 |
+
# Adjust the shape of feature to [1, 1, frames, 80]
|
| 56 |
+
shape = list(feat.shape)
|
| 57 |
+
if shape[1] >= frames:
|
| 58 |
+
feat = feat.narrow(1, 0, frames)
|
| 59 |
+
else:
|
| 60 |
+
shape[1] = frames
|
| 61 |
+
feat = feat.new_full(shape, fill_value=0)
|
| 62 |
+
|
| 63 |
+
#feat = feat.unsqueeze(0).cpu().numpy()
|
| 64 |
+
feat = feat.cpu().numpy()
|
| 65 |
+
|
| 66 |
+
# compute embedding
|
| 67 |
+
embedding = onnx_inference(model, feat).squeeze(0)
|
| 68 |
+
|
| 69 |
+
return embedding
|
| 70 |
+
|
| 71 |
+
def main():
|
| 72 |
+
args = parser.parse_args()
|
| 73 |
+
if args.wavs is None or len(args.wavs) == 2:
|
| 74 |
+
if args.wavs is None:
|
| 75 |
+
try:
|
| 76 |
+
# use example wavs
|
| 77 |
+
examples_dir = '../wav'
|
| 78 |
+
wav_path1, wav_path2 = list(glob.glob(os.path.join(examples_dir, '*.wav')))[0:2]
|
| 79 |
+
print(f'[INFO]: No wavs input, use example wavs instead.')
|
| 80 |
+
except:
|
| 81 |
+
assert Exception('Invalid input wav.')
|
| 82 |
+
else:
|
| 83 |
+
# use input wavs
|
| 84 |
+
wav_path1, wav_path2 = args.wavs
|
| 85 |
+
|
| 86 |
+
embedding1 = compute_embedding(wav_path1, args.model)
|
| 87 |
+
embedding2 = compute_embedding(wav_path2, args.model)
|
| 88 |
+
|
| 89 |
+
# compute similarity score
|
| 90 |
+
print('[INFO]: Computing the similarity score...')
|
| 91 |
+
similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
|
| 92 |
+
scores = similarity(torch.from_numpy(embedding1).unsqueeze(0), torch.from_numpy(embedding2).unsqueeze(0)).item()
|
| 93 |
+
print('[INFO]: The similarity score between two input wavs is %.4f' % scores)
|
| 94 |
+
|
| 95 |
+
else:
|
| 96 |
+
raise Exception('[ERROR]: Supports up to two input files')
|
| 97 |
+
|
| 98 |
+
if __name__ == '__main__':
|
| 99 |
+
main()
|
| 100 |
+
|
run_onnx_res2netv2.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
import numpy as np
|
| 5 |
+
import onnxruntime as ort
|
| 6 |
+
import torch
|
| 7 |
+
import torchaudio
|
| 8 |
+
from processor import FBank
|
| 9 |
+
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument("--model", type=str, default="res2netv2.onnx", help="onnx model path")
|
| 12 |
+
parser.add_argument('--wavs', nargs='+', type=str, help='Wavs')
|
| 13 |
+
parser.add_argument("--samplerate", type=int, default=16000, help="Specify the audio sample rate in Hz (default is 16,000)")
|
| 14 |
+
parser.add_argument("--max_frames", type=int, default=360, help="the max audio frames")
|
| 15 |
+
|
| 16 |
+
def load_wav(wav_file, obj_fs=16000):
|
| 17 |
+
"""Read the audio from the specified source path. """
|
| 18 |
+
wav, fs = torchaudio.load(wav_file)
|
| 19 |
+
if fs != obj_fs:
|
| 20 |
+
print(f'[WARNING]: The sample rate of {wav_file} is not {obj_fs}, resample it.')
|
| 21 |
+
wav, fs = torchaudio.sox_effects.apply_effects_tensor(
|
| 22 |
+
wav, fs, effects=[['rate', str(obj_fs)]]
|
| 23 |
+
)
|
| 24 |
+
if wav.shape[0] > 1:
|
| 25 |
+
wav = wav[0, :].unsqueeze(0)
|
| 26 |
+
return wav
|
| 27 |
+
|
| 28 |
+
def onnx_inference(onnx_path, features, cuda=True):
|
| 29 |
+
"""Perform inference using an ONNX model. """
|
| 30 |
+
def from_numpy(x):
|
| 31 |
+
return x if isinstance(x, np.ndarray) else np.array(x)
|
| 32 |
+
|
| 33 |
+
providers = ['AzureExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
| 34 |
+
session = ort.InferenceSession(onnx_path, providers=providers)
|
| 35 |
+
output_names = [x.name for x in session.get_outputs()]
|
| 36 |
+
input_name = session.get_inputs()[0].name
|
| 37 |
+
|
| 38 |
+
# onnx inference
|
| 39 |
+
y = session.run(output_names, {input_name: features})
|
| 40 |
+
|
| 41 |
+
if isinstance(y, (list, tuple)):
|
| 42 |
+
y = from_numpy(y[0]) if len(y) == 1 else [from_numpy(x) for x in y]
|
| 43 |
+
else:
|
| 44 |
+
y = from_numpy(y)
|
| 45 |
+
return y
|
| 46 |
+
|
| 47 |
+
def compute_embedding(wav_file, model, frames=360, obj_fs=16000):
|
| 48 |
+
# load wav
|
| 49 |
+
wav = load_wav(wav_file, obj_fs)
|
| 50 |
+
|
| 51 |
+
# compute feat
|
| 52 |
+
feature_extractor = FBank(80, obj_fs, mean_nor=True)
|
| 53 |
+
feat = feature_extractor(wav).unsqueeze(0)
|
| 54 |
+
|
| 55 |
+
# Adjust the shape of feature to [1, 1, frames, 80]
|
| 56 |
+
shape = list(feat.shape)
|
| 57 |
+
if shape[1] >= frames:
|
| 58 |
+
feat = feat.narrow(1, 0, frames)
|
| 59 |
+
else:
|
| 60 |
+
shape[1] = frames
|
| 61 |
+
feat = feat.new_full(shape, fill_value=0)
|
| 62 |
+
|
| 63 |
+
feat = feat.unsqueeze(0).cpu().numpy()
|
| 64 |
+
|
| 65 |
+
# compute embedding
|
| 66 |
+
embedding = onnx_inference(model, feat).squeeze(0)
|
| 67 |
+
|
| 68 |
+
return embedding
|
| 69 |
+
|
| 70 |
+
def main():
|
| 71 |
+
args = parser.parse_args()
|
| 72 |
+
if args.wavs is None or len(args.wavs) == 2:
|
| 73 |
+
if args.wavs is None:
|
| 74 |
+
try:
|
| 75 |
+
# use example wavs
|
| 76 |
+
examples_dir = '../wav'
|
| 77 |
+
wav_path1, wav_path2 = list(glob.glob(os.path.join(examples_dir, '*.wav')))[0:2]
|
| 78 |
+
print(f'[INFO]: No wavs input, use example wavs instead.')
|
| 79 |
+
except:
|
| 80 |
+
assert Exception('Invalid input wav.')
|
| 81 |
+
else:
|
| 82 |
+
# use input wavs
|
| 83 |
+
wav_path1, wav_path2 = args.wavs
|
| 84 |
+
|
| 85 |
+
embedding1 = compute_embedding(wav_path1, args.model)
|
| 86 |
+
embedding2 = compute_embedding(wav_path2, args.model)
|
| 87 |
+
|
| 88 |
+
# compute similarity score
|
| 89 |
+
print('[INFO]: Computing the similarity score...')
|
| 90 |
+
similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
|
| 91 |
+
scores = similarity(torch.from_numpy(embedding1).unsqueeze(0), torch.from_numpy(embedding2).unsqueeze(0)).item()
|
| 92 |
+
print('[INFO]: The similarity score between two input wavs is %.4f' % scores)
|
| 93 |
+
|
| 94 |
+
else:
|
| 95 |
+
raise Exception('[ERROR]: Supports up to two input files')
|
| 96 |
+
|
| 97 |
+
if __name__ == '__main__':
|
| 98 |
+
main()
|
| 99 |
+
|