update codes
Browse files- README.md +10 -3
- SenseVoiceAx.py +64 -18
- download_dataset.sh +2 -0
- main.py +9 -3
- print_utils.py +10 -2
- test_wer.py +76 -0
README.md
CHANGED
|
@@ -1,9 +1,12 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: mit
|
| 3 |
-
---
|
| 4 |
# sensevoice.axera
|
| 5 |
FunASR SenseVoice on Axera, official repo: https://github.com/FunAudioLLM/SenseVoice
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
## 功能
|
| 8 |
- 语音识别
|
| 9 |
- 自动识别语言(支持中文、英文、粤语、日语、韩语)
|
|
@@ -60,6 +63,10 @@ RTF: 0.03026517820946964 Latency: 0.15689468383789062s Total length: 5.184s
|
|
| 60 |
python test_wer.py -d datasets -l zh
|
| 61 |
```
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
## 技术讨论
|
| 64 |
|
| 65 |
- Github issues
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# sensevoice.axera
|
| 2 |
FunASR SenseVoice on Axera, official repo: https://github.com/FunAudioLLM/SenseVoice
|
| 3 |
|
| 4 |
+
## TODO
|
| 5 |
+
|
| 6 |
+
- [ ] 支持AX630C
|
| 7 |
+
- [ ] 支持C++
|
| 8 |
+
- [ ] 支持FastAPI
|
| 9 |
+
|
| 10 |
## 功能
|
| 11 |
- 语音识别
|
| 12 |
- 自动识别语言(支持中文、英文、粤语、日语、韩语)
|
|
|
|
| 63 |
python test_wer.py -d datasets -l zh
|
| 64 |
```
|
| 65 |
|
| 66 |
+
## 模型转换
|
| 67 |
+
|
| 68 |
+
参考[model_convert](model_convert/README.md)
|
| 69 |
+
|
| 70 |
## 技术讨论
|
| 71 |
|
| 72 |
- Github issues
|
SenseVoiceAx.py
CHANGED
|
@@ -5,6 +5,7 @@ from frontend import WavFrontend
|
|
| 5 |
import os
|
| 6 |
import time
|
| 7 |
from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
|
|
|
|
| 8 |
|
| 9 |
def sequence_mask(lengths, maxlen=None, dtype=np.float32):
|
| 10 |
# 如果 maxlen 未指定,则取 lengths 中的最大值
|
|
@@ -19,6 +20,8 @@ def sequence_mask(lengths, maxlen=None, dtype=np.float32):
|
|
| 19 |
|
| 20 |
# 比较生成掩码
|
| 21 |
mask = row_vector < matrix
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# 返回指定数据类型的掩码
|
| 24 |
return mask.astype(dtype)[None, ...]
|
|
@@ -67,10 +70,40 @@ def unique_consecutive_np(x, dim=None, return_inverse=False, return_counts=False
|
|
| 67 |
|
| 68 |
return results[0] if len(results) == 1 else results
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
class SenseVoiceAx:
|
| 71 |
-
def __init__(self, model_path, language="auto", use_itn=True, tokenizer=None):
|
| 72 |
-
model_path_root = os.path.join(os.path.dirname(model_path), "
|
| 73 |
-
|
|
|
|
| 74 |
fs=16000,
|
| 75 |
window="hamming",
|
| 76 |
n_mels=80,
|
|
@@ -82,7 +115,8 @@ class SenseVoiceAx:
|
|
| 82 |
self.sample_rate = 16000
|
| 83 |
self.tokenizer = tokenizer
|
| 84 |
self.blank_id = 0
|
| 85 |
-
self.max_len =
|
|
|
|
| 86 |
|
| 87 |
self.lid_dict = {"auto": 0, "zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
|
| 88 |
self.lid_int_dict = {24884: 3, 24885: 4, 24888: 7, 24892: 11, 24896: 12, 24992: 13}
|
|
@@ -90,13 +124,12 @@ class SenseVoiceAx:
|
|
| 90 |
self.textnorm_int_dict = {25016: 14, 25017: 15}
|
| 91 |
self.emo_dict = {"unk": 25009, "happy": 25001, "sad": 25002, "angry": 25003, "neutral": 25004}
|
| 92 |
|
| 93 |
-
self.position_encoding = np.load(f"{
|
| 94 |
-
language_query = np.load(f"{
|
| 95 |
-
textnorm_query = np.load(f"{
|
| 96 |
-
event_emo_query = np.load(f"{
|
| 97 |
self.input_query = np.concatenate((textnorm_query, language_query, event_emo_query), axis=1)
|
| 98 |
self.query_num = self.input_query.shape[1]
|
| 99 |
-
self.masks = sequence_mask(np.array([self.max_len], dtype=np.int32), dtype=np.float32)
|
| 100 |
|
| 101 |
def load_data(self, filepath: str) -> np.ndarray:
|
| 102 |
waveform, _ = librosa.load(filepath, sr=self.sample_rate)
|
|
@@ -147,28 +180,41 @@ class SenseVoiceAx:
|
|
| 147 |
slice_num = int(np.ceil(feat.shape[1] / slice_len))
|
| 148 |
|
| 149 |
asr_res = []
|
|
|
|
| 150 |
for i in range(slice_num):
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
| 152 |
# concat query
|
| 153 |
sub_feat = np.concatenate([self.input_query, sub_feat], axis=1)
|
| 154 |
-
|
| 155 |
-
if
|
| 156 |
sub_feat = np.concatenate([
|
| 157 |
sub_feat,
|
| 158 |
-
np.zeros((1, self.max_len -
|
| 159 |
],
|
| 160 |
axis=1)
|
| 161 |
|
|
|
|
|
|
|
| 162 |
outputs = self.model.run(None, {"speech": sub_feat,
|
| 163 |
-
"masks":
|
| 164 |
"position_encoding": self.position_encoding})
|
| 165 |
ctc_logits, encoder_out_lens = outputs
|
| 166 |
|
| 167 |
token_int = self.postprocess(ctc_logits, encoder_out_lens)
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
return asr_res
|
| 174 |
|
|
|
|
| 5 |
import os
|
| 6 |
import time
|
| 7 |
from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
|
| 8 |
+
from print_utils import rich_transcription_postprocess
|
| 9 |
|
| 10 |
def sequence_mask(lengths, maxlen=None, dtype=np.float32):
|
| 11 |
# 如果 maxlen 未指定,则取 lengths 中的最大值
|
|
|
|
| 20 |
|
| 21 |
# 比较生成掩码
|
| 22 |
mask = row_vector < matrix
|
| 23 |
+
if mask.shape[-1] < lengths[0]:
|
| 24 |
+
mask = np.concatenate([mask, np.zeros((mask.shape[0], lengths[0] - mask.shape[-1]), dtype=np.float32)], axis=-1)
|
| 25 |
|
| 26 |
# 返回指定数据类型的掩码
|
| 27 |
return mask.astype(dtype)[None, ...]
|
|
|
|
| 70 |
|
| 71 |
return results[0] if len(results) == 1 else results
|
| 72 |
|
| 73 |
+
|
| 74 |
+
def longest_common_suffix_prefix_with_tolerance(
|
| 75 |
+
lhs,
|
| 76 |
+
rhs,
|
| 77 |
+
tolerate: int = 0
|
| 78 |
+
) -> int:
|
| 79 |
+
"""
|
| 80 |
+
计算两个数组的最长公共子序列,该子序列必须同时满足:
|
| 81 |
+
- 是 lhs 的后 n 个元素(后缀)
|
| 82 |
+
- 是 rhs 的前 n 个元素(前缀)
|
| 83 |
+
并且允许最多 `tolerate` 个元素不匹配。
|
| 84 |
+
|
| 85 |
+
参数:
|
| 86 |
+
lhs: np.ndarray, 第一个数组
|
| 87 |
+
rhs: np.ndarray, 第二个数组
|
| 88 |
+
tolerate: int, 允许的不匹配元素数量(默认为 0,即完全匹配)
|
| 89 |
+
|
| 90 |
+
返回:
|
| 91 |
+
int: 最长公共后缀/前缀的长度(如果没有则返回 0)
|
| 92 |
+
"""
|
| 93 |
+
max_possible_n = min(len(lhs), len(rhs))
|
| 94 |
+
|
| 95 |
+
for n in range(max_possible_n, 0, -1):
|
| 96 |
+
mismatches = np.sum(lhs[-n:] != rhs[:n])
|
| 97 |
+
if mismatches <= tolerate:
|
| 98 |
+
return n
|
| 99 |
+
|
| 100 |
+
return 0
|
| 101 |
+
|
| 102 |
class SenseVoiceAx:
|
| 103 |
+
def __init__(self, model_path, max_len=68, language="auto", use_itn=True, tokenizer=None):
|
| 104 |
+
model_path_root = os.path.join(os.path.dirname(model_path), "..")
|
| 105 |
+
embedding_root = os.path.join(model_path_root, "embeddings")
|
| 106 |
+
self.frontend = WavFrontend(cmvn_file=f"{model_path_root}/am.mvn",
|
| 107 |
fs=16000,
|
| 108 |
window="hamming",
|
| 109 |
n_mels=80,
|
|
|
|
| 115 |
self.sample_rate = 16000
|
| 116 |
self.tokenizer = tokenizer
|
| 117 |
self.blank_id = 0
|
| 118 |
+
self.max_len = max_len
|
| 119 |
+
self.padding = 16
|
| 120 |
|
| 121 |
self.lid_dict = {"auto": 0, "zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
|
| 122 |
self.lid_int_dict = {24884: 3, 24885: 4, 24888: 7, 24892: 11, 24896: 12, 24992: 13}
|
|
|
|
| 124 |
self.textnorm_int_dict = {25016: 14, 25017: 15}
|
| 125 |
self.emo_dict = {"unk": 25009, "happy": 25001, "sad": 25002, "angry": 25003, "neutral": 25004}
|
| 126 |
|
| 127 |
+
self.position_encoding = np.load(f"{embedding_root}/position_encoding.npy")
|
| 128 |
+
language_query = np.load(f"{embedding_root}/{language}.npy")
|
| 129 |
+
textnorm_query = np.load(f"{embedding_root}/withitn.npy") if use_itn else np.load(f"{embedding_root}/woitn.npy")
|
| 130 |
+
event_emo_query = np.load(f"{embedding_root}/event_emo.npy")
|
| 131 |
self.input_query = np.concatenate((textnorm_query, language_query, event_emo_query), axis=1)
|
| 132 |
self.query_num = self.input_query.shape[1]
|
|
|
|
| 133 |
|
| 134 |
def load_data(self, filepath: str) -> np.ndarray:
|
| 135 |
waveform, _ = librosa.load(filepath, sr=self.sample_rate)
|
|
|
|
| 180 |
slice_num = int(np.ceil(feat.shape[1] / slice_len))
|
| 181 |
|
| 182 |
asr_res = []
|
| 183 |
+
prev_token_int = None
|
| 184 |
for i in range(slice_num):
|
| 185 |
+
if i == 0:
|
| 186 |
+
sub_feat = feat[:, i*slice_len:(i+1)*slice_len, :]
|
| 187 |
+
else:
|
| 188 |
+
sub_feat = feat[:, i*slice_len - self.padding:(i+1)*slice_len - self.padding, :]
|
| 189 |
# concat query
|
| 190 |
sub_feat = np.concatenate([self.input_query, sub_feat], axis=1)
|
| 191 |
+
real_len = sub_feat.shape[1]
|
| 192 |
+
if real_len < self.max_len:
|
| 193 |
sub_feat = np.concatenate([
|
| 194 |
sub_feat,
|
| 195 |
+
np.zeros((1, self.max_len - real_len, sub_feat.shape[-1]), dtype=np.float32)
|
| 196 |
],
|
| 197 |
axis=1)
|
| 198 |
|
| 199 |
+
masks = sequence_mask(np.array([self.max_len], dtype=np.int32), maxlen=real_len, dtype=np.float32)
|
| 200 |
+
|
| 201 |
outputs = self.model.run(None, {"speech": sub_feat,
|
| 202 |
+
"masks": masks,
|
| 203 |
"position_encoding": self.position_encoding})
|
| 204 |
ctc_logits, encoder_out_lens = outputs
|
| 205 |
|
| 206 |
token_int = self.postprocess(ctc_logits, encoder_out_lens)
|
| 207 |
+
|
| 208 |
+
# common prefix
|
| 209 |
+
if self.padding > 0 and prev_token_int is not None:
|
| 210 |
+
# prefix_len = common_prefix_len(prev_token_int, token_int)
|
| 211 |
+
prefix_len = longest_common_suffix_prefix_with_tolerance(prev_token_int, token_int, 6)
|
| 212 |
+
common_prefix = rich_transcription_postprocess(self.tokenizer.tokens2text(token_int[:prefix_len]))
|
| 213 |
+
|
| 214 |
+
asr_res[-1] = asr_res[-1][:-len(common_prefix)]
|
| 215 |
+
prev_token_int = np.copy(token_int)
|
| 216 |
+
|
| 217 |
+
asr_res.append(self.tokenizer.tokens2text(token_int))
|
| 218 |
|
| 219 |
return asr_res
|
| 220 |
|
download_dataset.sh
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
wget https://github.com/ml-inory/whisper.axera/releases/download/v1.0/datasets.zip
|
| 2 |
+
unzip datasets.zip -d ./
|
main.py
CHANGED
|
@@ -19,9 +19,11 @@ def main():
|
|
| 19 |
input_audio = args.input
|
| 20 |
language = args.language
|
| 21 |
use_itn = True # 标点符号预测
|
|
|
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
|
|
|
| 25 |
|
| 26 |
assert os.path.exists(model_path), f"model {model_path} not exist"
|
| 27 |
|
|
@@ -31,7 +33,11 @@ def main():
|
|
| 31 |
print(f"model_path: {model_path}")
|
| 32 |
|
| 33 |
tokenizer = SentencepiecesTokenizer(bpemodel=bpemodel)
|
| 34 |
-
pipeline = SenseVoiceAx(model_path,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
asr_res = pipeline.infer(input_audio, print_rtf=True)
|
| 36 |
print([rich_transcription_postprocess(i) for i in asr_res])
|
| 37 |
# rich_print_asr_res(asr_res)
|
|
|
|
| 19 |
input_audio = args.input
|
| 20 |
language = args.language
|
| 21 |
use_itn = True # 标点符号预测
|
| 22 |
+
max_len = 68
|
| 23 |
|
| 24 |
+
model_path_root = download_model("SenseVoice")
|
| 25 |
+
model_path = os.path.join(model_path_root, "sensevoice_ax650", "sensevoice.axmodel")
|
| 26 |
+
bpemodel = os.path.join(model_path_root, "chn_jpn_yue_eng_ko_spectok.bpe.model")
|
| 27 |
|
| 28 |
assert os.path.exists(model_path), f"model {model_path} not exist"
|
| 29 |
|
|
|
|
| 33 |
print(f"model_path: {model_path}")
|
| 34 |
|
| 35 |
tokenizer = SentencepiecesTokenizer(bpemodel=bpemodel)
|
| 36 |
+
pipeline = SenseVoiceAx(model_path,
|
| 37 |
+
max_len=max_len,
|
| 38 |
+
language=language,
|
| 39 |
+
use_itn=use_itn,
|
| 40 |
+
tokenizer=tokenizer)
|
| 41 |
asr_res = pipeline.infer(input_audio, print_rtf=True)
|
| 42 |
print([rich_transcription_postprocess(i) for i in asr_res])
|
| 43 |
# rich_print_asr_res(asr_res)
|
print_utils.py
CHANGED
|
@@ -116,6 +116,14 @@ def rich_transcription_postprocess(s):
|
|
| 116 |
new_s = new_s.replace("The.", " ")
|
| 117 |
return new_s.strip()
|
| 118 |
|
| 119 |
-
def rich_print_asr_res(asr_res):
|
| 120 |
res = "".join([rich_transcription_postprocess(i) for i in asr_res])
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
new_s = new_s.replace("The.", " ")
|
| 117 |
return new_s.strip()
|
| 118 |
|
| 119 |
+
def rich_print_asr_res(asr_res, will_print=True, remove_punc=False):
|
| 120 |
res = "".join([rich_transcription_postprocess(i) for i in asr_res])
|
| 121 |
+
|
| 122 |
+
if remove_punc:
|
| 123 |
+
res = res.replace(",", "")
|
| 124 |
+
res = res.replace("。", "")
|
| 125 |
+
|
| 126 |
+
if will_print:
|
| 127 |
+
print(res)
|
| 128 |
+
|
| 129 |
+
return res
|
test_wer.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, sys
|
| 2 |
+
import argparse
|
| 3 |
+
from SenseVoiceAx import SenseVoiceAx
|
| 4 |
+
from tokenizer import SentencepiecesTokenizer
|
| 5 |
+
from print_utils import rich_transcription_postprocess, rich_print_asr_res
|
| 6 |
+
from download_utils import download_model
|
| 7 |
+
import jiwer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def get_args():
|
| 11 |
+
parser = argparse.ArgumentParser()
|
| 12 |
+
parser.add_argument("--dataset", "-d", required=True, type=str, help="Input dataset")
|
| 13 |
+
parser.add_argument("--language", "-l", required=False, type=str, default="auto", choices=["auto", "zh", "en", "yue", "ja", "ko"])
|
| 14 |
+
return parser.parse_args()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def main():
|
| 18 |
+
args = get_args()
|
| 19 |
+
|
| 20 |
+
dataset = args.dataset
|
| 21 |
+
language = args.language
|
| 22 |
+
use_itn = False # 标点符号预测
|
| 23 |
+
|
| 24 |
+
model_path_root = download_model("SenseVoice")
|
| 25 |
+
model_path = os.path.join(model_path_root, "sensevoice_ax650", "sensevoice.axmodel")
|
| 26 |
+
bpemodel = os.path.join(model_path_root, "chn_jpn_yue_eng_ko_spectok.bpe.model")
|
| 27 |
+
|
| 28 |
+
assert os.path.exists(model_path), f"model {model_path} not exist"
|
| 29 |
+
|
| 30 |
+
print(f"dataset: {dataset}")
|
| 31 |
+
print(f"language: {language}")
|
| 32 |
+
print(f"use_itn: {use_itn}")
|
| 33 |
+
print(f"model_path: {model_path}")
|
| 34 |
+
|
| 35 |
+
tokenizer = SentencepiecesTokenizer(bpemodel=bpemodel)
|
| 36 |
+
pipeline = SenseVoiceAx(model_path, language, use_itn, tokenizer=tokenizer)
|
| 37 |
+
|
| 38 |
+
# Load dataset
|
| 39 |
+
wav_names = []
|
| 40 |
+
references = []
|
| 41 |
+
with open(os.path.join(dataset, "ground_truth.txt"), "r") as f:
|
| 42 |
+
for line in f:
|
| 43 |
+
line = line.strip()
|
| 44 |
+
w, r = line.split(" ")
|
| 45 |
+
wav_names.append(w)
|
| 46 |
+
references.append(r)
|
| 47 |
+
|
| 48 |
+
# Iterate over dataset
|
| 49 |
+
hyp = []
|
| 50 |
+
wer_file = open("wer.txt", "w")
|
| 51 |
+
for wav_name, reference in zip(wav_names, references):
|
| 52 |
+
wav_path = os.path.join(dataset, "aishell_S0764", wav_name + ".wav")
|
| 53 |
+
|
| 54 |
+
asr_res = pipeline.infer(wav_path, print_rtf=False)
|
| 55 |
+
hypothesis = rich_print_asr_res(asr_res, will_print=False, remove_punc=True)
|
| 56 |
+
hyp.append(hypothesis)
|
| 57 |
+
|
| 58 |
+
wer = jiwer.cer(
|
| 59 |
+
reference,
|
| 60 |
+
hypothesis
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
line_content = f"{wav_name} reference: {reference} hypothesis: {hypothesis} WER: {wer}"
|
| 64 |
+
wer_file.write(line_content + "\n")
|
| 65 |
+
print(line_content)
|
| 66 |
+
|
| 67 |
+
total_wer = jiwer.cer(
|
| 68 |
+
references,
|
| 69 |
+
hyp
|
| 70 |
+
)
|
| 71 |
+
print(f"Total WER: {total_wer}")
|
| 72 |
+
wer_file.write(f"Total WER: {total_wer}")
|
| 73 |
+
wer_file.close()
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main()
|