yujuanqin commited on
Commit
a6fea9f
·
1 Parent(s): ba11a76

add script for ov

Browse files
scripts/funasr_ct/__init__.py ADDED
File without changes
scripts/funasr_ct/ct_transformer.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- encoding: utf-8 -*-
2
+ # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
3
+ # MIT License (https://opensource.org/licenses/MIT)
4
+
5
+ import os.path
6
+ from pathlib import Path
7
+ from typing import List, Union, Tuple
8
+ import numpy as np
9
+ import json
10
+ from funasr_ct.utils import ONNXRuntimeError, OrtInferSession, get_logger, read_yaml
11
+ from funasr_ct.utils import (
12
+ TokenIDConverter,
13
+ split_to_mini_sentence,
14
+ code_mix_split_words,
15
+ code_mix_split_words_jieba,
16
+ )
17
+
18
+ logging = get_logger()
19
+
20
+
21
+ class CT_Transformer:
22
+ """
23
+ Author: Speech Lab of DAMO Academy, Alibaba Group
24
+ CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
25
+ https://arxiv.org/pdf/2003.01309.pdf
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ model_dir: Union[str, Path] = None,
31
+ batch_size: int = 1,
32
+ device_id: Union[str, int] = "-1",
33
+ quantize: bool = False,
34
+ intra_op_num_threads: int = 4,
35
+ cache_dir: str = None,
36
+ **kwargs
37
+ ):
38
+
39
+ if not Path(model_dir).exists():
40
+ try:
41
+ from modelscope.hub.snapshot_download import snapshot_download
42
+ except:
43
+ raise "You are exporting model from modelscope, please install modelscope and try it again. To install modelscope, you could:\n" "\npip3 install -U modelscope\n" "For the users in China, you could install with the command:\n" "\npip3 install -U modelscope -i https://mirror.sjtu.edu.cn/pypi/web/simple"
44
+ try:
45
+ model_dir = snapshot_download(model_dir, cache_dir=cache_dir)
46
+ except:
47
+ raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
48
+ model_dir
49
+ )
50
+
51
+ model_file = os.path.join(model_dir, "model.onnx")
52
+ if quantize:
53
+ model_file = os.path.join(model_dir, "model_quant.onnx")
54
+ if not os.path.exists(model_file):
55
+ print(".onnx does not exist, begin to export onnx")
56
+ try:
57
+ from funasr import AutoModel
58
+ except:
59
+ raise "You are exporting onnx, please install funasr and try it again. To install funasr, you could:\n" "\npip3 install -U funasr\n" "For the users in China, you could install with the command:\n" "\npip3 install -U funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple"
60
+
61
+ model = AutoModel(model=model_dir)
62
+ model_dir = model.export(type="onnx", quantize=quantize, **kwargs)
63
+
64
+ config_file = os.path.join(model_dir, "config.yaml")
65
+ config = read_yaml(config_file)
66
+ token_list = os.path.join(model_dir, "tokens.json")
67
+ with open(token_list, "r", encoding="utf-8") as f:
68
+ token_list = json.load(f)
69
+
70
+ self.converter = TokenIDConverter(token_list)
71
+ self.ort_infer = OrtInferSession(
72
+ model_file, device_id, intra_op_num_threads=intra_op_num_threads
73
+ )
74
+ self.batch_size = 1
75
+ self.punc_list = config["model_conf"]["punc_list"]
76
+ self.period = 0
77
+ for i in range(len(self.punc_list)):
78
+ if self.punc_list[i] == ",":
79
+ self.punc_list[i] = ","
80
+ elif self.punc_list[i] == "?":
81
+ self.punc_list[i] = "?"
82
+ elif self.punc_list[i] == "。":
83
+ self.period = i
84
+ self.jieba_usr_dict_path = os.path.join(model_dir, "jieba_usr_dict")
85
+ if os.path.exists(self.jieba_usr_dict_path):
86
+ self.seg_jieba = True
87
+ self.code_mix_split_words_jieba = code_mix_split_words_jieba(self.jieba_usr_dict_path)
88
+ else:
89
+ self.seg_jieba = False
90
+
91
+ def __call__(self, text: Union[list, str], split_size=20):
92
+ if self.seg_jieba:
93
+ split_text = self.code_mix_split_words_jieba(text)
94
+ else:
95
+ split_text = code_mix_split_words(text)
96
+ split_text_id = self.converter.tokens2ids(split_text)
97
+ mini_sentences = split_to_mini_sentence(split_text, split_size)
98
+ mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
99
+ assert len(mini_sentences) == len(mini_sentences_id)
100
+ cache_sent = []
101
+ cache_sent_id = []
102
+ new_mini_sentence = ""
103
+ new_mini_sentence_punc = []
104
+ cache_pop_trigger_limit = 200
105
+ for mini_sentence_i in range(len(mini_sentences)):
106
+ mini_sentence = mini_sentences[mini_sentence_i]
107
+ mini_sentence_id = mini_sentences_id[mini_sentence_i]
108
+ mini_sentence = cache_sent + mini_sentence
109
+ mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype="int32")
110
+ data = {
111
+ "text": mini_sentence_id[None, :],
112
+ "text_lengths": np.array([len(mini_sentence_id)], dtype="int32"),
113
+ }
114
+ try:
115
+ outputs = self.infer(data["text"], data["text_lengths"])
116
+ y = outputs[0]
117
+ punctuations = np.argmax(y, axis=-1)[0]
118
+ assert punctuations.size == len(mini_sentence)
119
+ except ONNXRuntimeError:
120
+ logging.warning("error")
121
+
122
+ # Search for the last Period/QuestionMark as cache
123
+ if mini_sentence_i < len(mini_sentences) - 1:
124
+ sentenceEnd = -1
125
+ last_comma_index = -1
126
+ for i in range(len(punctuations) - 2, 1, -1):
127
+ if (
128
+ self.punc_list[punctuations[i]] == "。"
129
+ or self.punc_list[punctuations[i]] == "?"
130
+ ):
131
+ sentenceEnd = i
132
+ break
133
+ if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
134
+ last_comma_index = i
135
+
136
+ if (
137
+ sentenceEnd < 0
138
+ and len(mini_sentence) > cache_pop_trigger_limit
139
+ and last_comma_index >= 0
140
+ ):
141
+ # The sentence it too long, cut off at a comma.
142
+ sentenceEnd = last_comma_index
143
+ punctuations[sentenceEnd] = self.period
144
+ cache_sent = mini_sentence[sentenceEnd + 1 :]
145
+ cache_sent_id = mini_sentence_id[sentenceEnd + 1 :].tolist()
146
+ mini_sentence = mini_sentence[0 : sentenceEnd + 1]
147
+ punctuations = punctuations[0 : sentenceEnd + 1]
148
+
149
+ new_mini_sentence_punc += [int(x) for x in punctuations]
150
+ words_with_punc = []
151
+ for i in range(len(mini_sentence)):
152
+ if i > 0:
153
+ if (
154
+ len(mini_sentence[i][0].encode()) == 1
155
+ and len(mini_sentence[i - 1][0].encode()) == 1
156
+ ):
157
+ mini_sentence[i] = " " + mini_sentence[i]
158
+ words_with_punc.append(mini_sentence[i])
159
+ if self.punc_list[punctuations[i]] != "_":
160
+ words_with_punc.append(self.punc_list[punctuations[i]])
161
+ new_mini_sentence += "".join(words_with_punc)
162
+ # Add Period for the end of the sentence
163
+ new_mini_sentence_out = new_mini_sentence
164
+ new_mini_sentence_punc_out = new_mini_sentence_punc
165
+ if mini_sentence_i == len(mini_sentences) - 1:
166
+ if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、":
167
+ new_mini_sentence_out = new_mini_sentence[:-1] + "。"
168
+ new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
169
+ elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?":
170
+ new_mini_sentence_out = new_mini_sentence + "。"
171
+ new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
172
+ return new_mini_sentence_out, new_mini_sentence_punc_out
173
+
174
+ def infer(self, feats: np.ndarray, feats_len: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
175
+ outputs = self.ort_infer([feats, feats_len])
176
+ return outputs
177
+
178
+
179
+ class CT_Transformer_VadRealtime(CT_Transformer):
180
+ """
181
+ Author: Speech Lab of DAMO Academy, Alibaba Group
182
+ CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
183
+ https://arxiv.org/pdf/2003.01309.pdf
184
+ """
185
+
186
+ def __init__(self, *args, **kwargs):
187
+ super().__init__(*args, **kwargs)
188
+
189
+ def __call__(self, text: str, param_dict: map, split_size=20):
190
+ cache_key = "cache"
191
+ assert cache_key in param_dict
192
+ cache = param_dict[cache_key]
193
+ if cache is not None and len(cache) > 0:
194
+ precache = "".join(cache)
195
+ else:
196
+ precache = ""
197
+ cache = []
198
+ full_text = precache + " " + text
199
+ split_text = code_mix_split_words(full_text)
200
+ split_text_id = self.converter.tokens2ids(split_text)
201
+ mini_sentences = split_to_mini_sentence(split_text, split_size)
202
+ mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
203
+ new_mini_sentence_punc = []
204
+ assert len(mini_sentences) == len(mini_sentences_id)
205
+
206
+ cache_sent = []
207
+ cache_sent_id = np.array([], dtype="int32")
208
+ sentence_punc_list = []
209
+ sentence_words_list = []
210
+ cache_pop_trigger_limit = 200
211
+ skip_num = 0
212
+ for mini_sentence_i in range(len(mini_sentences)):
213
+ mini_sentence = mini_sentences[mini_sentence_i]
214
+ mini_sentence_id = mini_sentences_id[mini_sentence_i]
215
+ mini_sentence = cache_sent + mini_sentence
216
+ mini_sentence_id = np.concatenate(
217
+ (cache_sent_id, mini_sentence_id), axis=0, dtype="int32"
218
+ )
219
+ text_length = len(mini_sentence_id)
220
+ vad_mask = self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32)
221
+ data = {
222
+ "input": mini_sentence_id[None, :],
223
+ "text_lengths": np.array([text_length], dtype="int32"),
224
+ "vad_mask": vad_mask,
225
+ "sub_masks": vad_mask,
226
+ }
227
+ try:
228
+ outputs = self.infer(
229
+ data["input"], data["text_lengths"], data["vad_mask"], data["sub_masks"]
230
+ )
231
+ y = outputs[0]
232
+ punctuations = np.argmax(y, axis=-1)[0]
233
+ assert punctuations.size == len(mini_sentence)
234
+ except ONNXRuntimeError:
235
+ logging.warning("error")
236
+
237
+ # Search for the last Period/QuestionMark as cache
238
+ if mini_sentence_i < len(mini_sentences) - 1:
239
+ sentenceEnd = -1
240
+ last_comma_index = -1
241
+ for i in range(len(punctuations) - 2, 1, -1):
242
+ if (
243
+ self.punc_list[punctuations[i]] == "。"
244
+ or self.punc_list[punctuations[i]] == "?"
245
+ ):
246
+ sentenceEnd = i
247
+ break
248
+ if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
249
+ last_comma_index = i
250
+
251
+ if (
252
+ sentenceEnd < 0
253
+ and len(mini_sentence) > cache_pop_trigger_limit
254
+ and last_comma_index >= 0
255
+ ):
256
+ # The sentence it too long, cut off at a comma.
257
+ sentenceEnd = last_comma_index
258
+ punctuations[sentenceEnd] = self.period
259
+ cache_sent = mini_sentence[sentenceEnd + 1 :]
260
+ cache_sent_id = mini_sentence_id[sentenceEnd + 1 :]
261
+ mini_sentence = mini_sentence[0 : sentenceEnd + 1]
262
+ punctuations = punctuations[0 : sentenceEnd + 1]
263
+
264
+ punctuations_np = [int(x) for x in punctuations]
265
+ new_mini_sentence_punc += punctuations_np
266
+ sentence_punc_list += [self.punc_list[int(x)] for x in punctuations_np]
267
+ sentence_words_list += mini_sentence
268
+
269
+ assert len(sentence_punc_list) == len(sentence_words_list)
270
+ words_with_punc = []
271
+ sentence_punc_list_out = []
272
+ for i in range(0, len(sentence_words_list)):
273
+ if i > 0:
274
+ if (
275
+ len(sentence_words_list[i][0].encode()) == 1
276
+ and len(sentence_words_list[i - 1][-1].encode()) == 1
277
+ ):
278
+ sentence_words_list[i] = " " + sentence_words_list[i]
279
+ if skip_num < len(cache):
280
+ skip_num += 1
281
+ else:
282
+ words_with_punc.append(sentence_words_list[i])
283
+ if skip_num >= len(cache):
284
+ sentence_punc_list_out.append(sentence_punc_list[i])
285
+ if sentence_punc_list[i] != "_":
286
+ words_with_punc.append(sentence_punc_list[i])
287
+ sentence_out = "".join(words_with_punc)
288
+
289
+ sentenceEnd = -1
290
+ for i in range(len(sentence_punc_list) - 2, 1, -1):
291
+ if sentence_punc_list[i] == "。" or sentence_punc_list[i] == "?":
292
+ sentenceEnd = i
293
+ break
294
+ cache_out = sentence_words_list[sentenceEnd + 1 :]
295
+ if sentence_out[-1] in self.punc_list:
296
+ sentence_out = sentence_out[:-1]
297
+ sentence_punc_list_out[-1] = "_"
298
+ param_dict[cache_key] = cache_out
299
+ return sentence_out, sentence_punc_list_out, cache_out
300
+
301
+ def vad_mask(self, size, vad_pos, dtype=bool):
302
+ """Create mask for decoder self-attention.
303
+
304
+ :param int size: size of mask
305
+ :param int vad_pos: index of vad index
306
+ :param torch.dtype dtype: result dtype
307
+ :rtype: torch.Tensor (B, Lmax, Lmax)
308
+ """
309
+ ret = np.ones((size, size), dtype=dtype)
310
+ if vad_pos <= 0 or vad_pos >= size:
311
+ return ret
312
+ sub_corner = np.zeros((vad_pos - 1, size - vad_pos), dtype=dtype)
313
+ ret[0 : vad_pos - 1, vad_pos:] = sub_corner
314
+ return ret
315
+
316
+ def infer(
317
+ self, feats: np.ndarray, feats_len: np.ndarray, vad_mask: np.ndarray, sub_masks: np.ndarray
318
+ ) -> Tuple[np.ndarray, np.ndarray]:
319
+ outputs = self.ort_infer([feats, feats_len, vad_mask, sub_masks])
320
+ return outputs
scripts/funasr_ct/utils.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- encoding: utf-8 -*-
2
+
3
+ import functools
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
7
+
8
+ import re
9
+ import numpy as np
10
+ import yaml
11
+
12
+ try:
13
+ from onnxruntime import (
14
+ GraphOptimizationLevel,
15
+ InferenceSession,
16
+ SessionOptions,
17
+ get_available_providers,
18
+ get_device,
19
+ )
20
+ except:
21
+ print("please pip3 install onnxruntime")
22
+ import jieba
23
+ import warnings
24
+
25
+ root_dir = Path(__file__).resolve().parent
26
+
27
+ logger_initialized = {}
28
+
29
+
30
+ def pad_list(xs, pad_value, max_len=None):
31
+ n_batch = len(xs)
32
+ if max_len is None:
33
+ max_len = max(x.size(0) for x in xs)
34
+ # pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
35
+ # numpy format
36
+ pad = (np.zeros((n_batch, max_len)) + pad_value).astype(np.int32)
37
+ for i in range(n_batch):
38
+ pad[i, : xs[i].shape[0]] = xs[i]
39
+
40
+ return pad
41
+
42
+
43
+ """
44
+ def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
45
+ if length_dim == 0:
46
+ raise ValueError("length_dim cannot be 0: {}".format(length_dim))
47
+
48
+ if not isinstance(lengths, list):
49
+ lengths = lengths.tolist()
50
+ bs = int(len(lengths))
51
+ if maxlen is None:
52
+ if xs is None:
53
+ maxlen = int(max(lengths))
54
+ else:
55
+ maxlen = xs.size(length_dim)
56
+ else:
57
+ assert xs is None
58
+ assert maxlen >= int(max(lengths))
59
+
60
+ seq_range = torch.arange(0, maxlen, dtype=torch.int64)
61
+ seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
62
+ seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
63
+ mask = seq_range_expand >= seq_length_expand
64
+
65
+ if xs is not None:
66
+ assert xs.size(0) == bs, (xs.size(0), bs)
67
+
68
+ if length_dim < 0:
69
+ length_dim = xs.dim() + length_dim
70
+ # ind = (:, None, ..., None, :, , None, ..., None)
71
+ ind = tuple(
72
+ slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
73
+ )
74
+ mask = mask[ind].expand_as(xs).to(xs.device)
75
+ return mask
76
+ """
77
+
78
+
79
+ class TokenIDConverter:
80
+ def __init__(
81
+ self,
82
+ token_list: Union[List, str],
83
+ ):
84
+
85
+ self.token_list = token_list
86
+ self.unk_symbol = token_list[-1]
87
+ self.token2id = {v: i for i, v in enumerate(self.token_list)}
88
+ self.unk_id = self.token2id[self.unk_symbol]
89
+
90
+ def get_num_vocabulary_size(self) -> int:
91
+ return len(self.token_list)
92
+
93
+ def ids2tokens(self, integers: Union[np.ndarray, Iterable[int]]) -> List[str]:
94
+ if isinstance(integers, np.ndarray) and integers.ndim != 1:
95
+ raise TokenIDConverterError(f"Must be 1 dim ndarray, but got {integers.ndim}")
96
+ return [self.token_list[i] for i in integers]
97
+
98
+ def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
99
+
100
+ return [self.token2id.get(i, self.unk_id) for i in tokens]
101
+
102
+
103
+ class CharTokenizer:
104
+ def __init__(
105
+ self,
106
+ symbol_value: Union[Path, str, Iterable[str]] = None,
107
+ space_symbol: str = "<space>",
108
+ remove_non_linguistic_symbols: bool = False,
109
+ ):
110
+
111
+ self.space_symbol = space_symbol
112
+ self.non_linguistic_symbols = self.load_symbols(symbol_value)
113
+ self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
114
+
115
+ @staticmethod
116
+ def load_symbols(value: Union[Path, str, Iterable[str]] = None) -> Set:
117
+ if value is None:
118
+ return set()
119
+
120
+ if isinstance(value, Iterable[str]):
121
+ return set(value)
122
+
123
+ file_path = Path(value)
124
+ if not file_path.exists():
125
+ logging.warning("%s doesn't exist.", file_path)
126
+ return set()
127
+
128
+ with file_path.open("r", encoding="utf-8") as f:
129
+ return set(line.rstrip() for line in f)
130
+
131
+ def text2tokens(self, line: Union[str, list]) -> List[str]:
132
+ tokens = []
133
+ while len(line) != 0:
134
+ for w in self.non_linguistic_symbols:
135
+ if line.startswith(w):
136
+ if not self.remove_non_linguistic_symbols:
137
+ tokens.append(line[: len(w)])
138
+ line = line[len(w) :]
139
+ break
140
+ else:
141
+ t = line[0]
142
+ if t == " ":
143
+ t = "<space>"
144
+ tokens.append(t)
145
+ line = line[1:]
146
+ return tokens
147
+
148
+ def tokens2text(self, tokens: Iterable[str]) -> str:
149
+ tokens = [t if t != self.space_symbol else " " for t in tokens]
150
+ return "".join(tokens)
151
+
152
+ def __repr__(self):
153
+ return (
154
+ f"{self.__class__.__name__}("
155
+ f'space_symbol="{self.space_symbol}"'
156
+ f'non_linguistic_symbols="{self.non_linguistic_symbols}"'
157
+ f")"
158
+ )
159
+
160
+
161
+ class Hypothesis(NamedTuple):
162
+ """Hypothesis data type."""
163
+
164
+ yseq: np.ndarray
165
+ score: Union[float, np.ndarray] = 0
166
+ scores: Dict[str, Union[float, np.ndarray]] = dict()
167
+ states: Dict[str, Any] = dict()
168
+
169
+ def asdict(self) -> dict:
170
+ """Convert data to JSON-friendly dict."""
171
+ return self._replace(
172
+ yseq=self.yseq.tolist(),
173
+ score=float(self.score),
174
+ scores={k: float(v) for k, v in self.scores.items()},
175
+ )._asdict()
176
+
177
+
178
+ class TokenIDConverterError(Exception):
179
+ pass
180
+
181
+
182
+ class ONNXRuntimeError(Exception):
183
+ pass
184
+
185
+
186
+ class OrtInferSession:
187
+ def __init__(self, model_file, device_id=-1, intra_op_num_threads=4):
188
+ device_id = str(device_id)
189
+ sess_opt = SessionOptions()
190
+ sess_opt.intra_op_num_threads = intra_op_num_threads
191
+ sess_opt.log_severity_level = 4
192
+ sess_opt.enable_cpu_mem_arena = False
193
+ sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
194
+
195
+ cuda_ep = "CUDAExecutionProvider"
196
+ cuda_provider_options = {
197
+ "device_id": device_id,
198
+ "arena_extend_strategy": "kNextPowerOfTwo",
199
+ "cudnn_conv_algo_search": "EXHAUSTIVE",
200
+ "do_copy_in_default_stream": "true",
201
+ }
202
+ cpu_ep = "CPUExecutionProvider"
203
+ cpu_provider_options = {
204
+ "arena_extend_strategy": "kSameAsRequested",
205
+ }
206
+
207
+ EP_list = []
208
+ if device_id != "-1" and get_device() == "GPU" and cuda_ep in get_available_providers():
209
+ EP_list = [(cuda_ep, cuda_provider_options)]
210
+ EP_list.append((cpu_ep, cpu_provider_options))
211
+
212
+ self._verify_model(model_file)
213
+ self.session = InferenceSession(model_file, sess_options=sess_opt, providers=EP_list)
214
+
215
+ if device_id != "-1" and cuda_ep not in self.session.get_providers():
216
+ warnings.warn(
217
+ f"{cuda_ep} is not avaiable for current env, the inference part is automatically shifted to be executed under {cpu_ep}.\n"
218
+ "Please ensure the installed onnxruntime-gpu version matches your cuda and cudnn version, "
219
+ "you can check their relations from the offical web site: "
220
+ "https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html",
221
+ RuntimeWarning,
222
+ )
223
+
224
+ def __call__(self, input_content: List[Union[np.ndarray, np.ndarray]]) -> np.ndarray:
225
+ input_dict = dict(zip(self.get_input_names(), input_content))
226
+ try:
227
+ return self.session.run(self.get_output_names(), input_dict)
228
+ except Exception as e:
229
+ raise ONNXRuntimeError("ONNXRuntime inferece failed.") from e
230
+
231
+ def get_input_names(
232
+ self,
233
+ ):
234
+ return [v.name for v in self.session.get_inputs()]
235
+
236
+ def get_output_names(
237
+ self,
238
+ ):
239
+ return [v.name for v in self.session.get_outputs()]
240
+
241
+ def get_character_list(self, key: str = "character"):
242
+ return self.meta_dict[key].splitlines()
243
+
244
+ def have_key(self, key: str = "character") -> bool:
245
+ self.meta_dict = self.session.get_modelmeta().custom_metadata_map
246
+ if key in self.meta_dict.keys():
247
+ return True
248
+ return False
249
+
250
+ @staticmethod
251
+ def _verify_model(model_path):
252
+ model_path = Path(model_path)
253
+ if not model_path.exists():
254
+ raise FileNotFoundError(f"{model_path} does not exists.")
255
+ if not model_path.is_file():
256
+ raise FileExistsError(f"{model_path} is not a file.")
257
+
258
+
259
+ def split_to_mini_sentence(words: list, word_limit: int = 20):
260
+ assert word_limit > 1
261
+ if len(words) <= word_limit:
262
+ return [words]
263
+ sentences = []
264
+ length = len(words)
265
+ sentence_len = length // word_limit
266
+ for i in range(sentence_len):
267
+ sentences.append(words[i * word_limit : (i + 1) * word_limit])
268
+ if length % word_limit > 0:
269
+ sentences.append(words[sentence_len * word_limit :])
270
+ return sentences
271
+
272
+
273
+ def code_mix_split_words(text: str):
274
+ words = []
275
+ segs = text.split()
276
+ for seg in segs:
277
+ # There is no space in seg.
278
+ current_word = ""
279
+ for c in seg:
280
+ if len(c.encode()) == 1:
281
+ # This is an ASCII char.
282
+ current_word += c
283
+ else:
284
+ # This is a Chinese char.
285
+ if len(current_word) > 0:
286
+ words.append(current_word)
287
+ current_word = ""
288
+ words.append(c)
289
+ if len(current_word) > 0:
290
+ words.append(current_word)
291
+ return words
292
+
293
+
294
+ def isEnglish(text: str):
295
+ if re.search("^[a-zA-Z']+$", text):
296
+ return True
297
+ else:
298
+ return False
299
+
300
+
301
+ def join_chinese_and_english(input_list):
302
+ line = ""
303
+ for token in input_list:
304
+ if isEnglish(token):
305
+ line = line + " " + token
306
+ else:
307
+ line = line + token
308
+
309
+ line = line.strip()
310
+ return line
311
+
312
+
313
+ def code_mix_split_words_jieba(seg_dict_file: str):
314
+ jieba.load_userdict(seg_dict_file)
315
+
316
+ def _fn(text: str):
317
+ input_list = text.split()
318
+ token_list_all = []
319
+ langauge_list = []
320
+ token_list_tmp = []
321
+ language_flag = None
322
+ for token in input_list:
323
+ if isEnglish(token) and language_flag == "Chinese":
324
+ token_list_all.append(token_list_tmp)
325
+ langauge_list.append("Chinese")
326
+ token_list_tmp = []
327
+ elif not isEnglish(token) and language_flag == "English":
328
+ token_list_all.append(token_list_tmp)
329
+ langauge_list.append("English")
330
+ token_list_tmp = []
331
+
332
+ token_list_tmp.append(token)
333
+
334
+ if isEnglish(token):
335
+ language_flag = "English"
336
+ else:
337
+ language_flag = "Chinese"
338
+
339
+ if token_list_tmp:
340
+ token_list_all.append(token_list_tmp)
341
+ langauge_list.append(language_flag)
342
+
343
+ result_list = []
344
+ for token_list_tmp, language_flag in zip(token_list_all, langauge_list):
345
+ if language_flag == "English":
346
+ result_list.extend(token_list_tmp)
347
+ else:
348
+ seg_list = jieba.cut(join_chinese_and_english(token_list_tmp), HMM=False)
349
+ result_list.extend(seg_list)
350
+
351
+ return result_list
352
+
353
+ return _fn
354
+
355
+
356
+ def read_yaml(yaml_path: Union[str, Path]) -> Dict:
357
+ if not Path(yaml_path).exists():
358
+ raise FileExistsError(f"The {yaml_path} does not exist.")
359
+
360
+ with open(str(yaml_path), "rb") as f:
361
+ data = yaml.load(f, Loader=yaml.Loader)
362
+ return data
363
+
364
+
365
+ @functools.lru_cache()
366
+ def get_logger(name="funasr_onnx"):
367
+ """Initialize and get a logger by name.
368
+ If the logger has not been initialized, this method will initialize the
369
+ logger by adding one or two handlers, otherwise the initialized logger will
370
+ be directly returned. During initialization, a StreamHandler will always be
371
+ added.
372
+ Args:
373
+ name (str): Logger name.
374
+ Returns:
375
+ logging.Logger: The expected logger.
376
+ """
377
+ logger = logging.getLogger(name)
378
+ if name in logger_initialized:
379
+ return logger
380
+
381
+ for logger_name in logger_initialized:
382
+ if name.startswith(logger_name):
383
+ return logger
384
+
385
+ formatter = logging.Formatter(
386
+ "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
387
+ )
388
+
389
+ sh = logging.StreamHandler()
390
+ sh.setFormatter(formatter)
391
+ logger.addHandler(sh)
392
+ logger_initialized[name] = True
393
+ logger.propagate = False
394
+ logging.basicConfig(level=logging.ERROR)
395
+ return logger
scripts/run_whisper_finetuned_with_punc_ov.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import time
4
+ from pathlib import Path
5
+ import csv
6
+ import json
7
+ import yaml
8
+ from typing import List, Dict, Optional
9
+
10
+
11
+ import librosa
12
+ import openvino_genai
13
+ import numpy as np
14
+ from scripts.funasr_ct.ct_transformer import CT_Transformer
15
+ from scripts.asr_utils import get_origin_text_dict, get_text_distance
16
+
17
+ def save_csv(file_path, rows):
18
+ with open(file_path, "w", encoding="utf-8", newline="") as f:
19
+ writer = csv.writer(f)
20
+ writer.writerows(rows)
21
+ print(f"write csv to {file_path}")
22
+
23
+
24
+ def load_audio(audio_path: str, sr: int = 16000):
25
+ # 读取音频并转成 16k 单声道 numpy float32
26
+ audio, _ = librosa.load(audio_path, sr=sr, mono=True)
27
+ return audio
28
+
29
+ def read_wav(filepath):
30
+ raw_speech, samplerate = librosa.load(filepath, sr=16000)
31
+ return raw_speech.tolist()
32
+
33
+ def transcribe_file(
34
+ audio_path: str,
35
+ model,
36
+ lang="en"
37
+ ):
38
+ raw_speech = read_wav(audio_path)
39
+ res = model.generate(raw_speech,language=lang)
40
+ # print(res.texts)
41
+ # print(f"inference time: {time.time() - t0}")
42
+ return str(res)
43
+ def load_model(device):
44
+ device = "GPU" # GPU can be used as well
45
+ # model_path = r"D:\yujuan\yoyo-translator-win\models\whisper-large-v3-turbo-int8"
46
+ model_path = r"D:\yujuan\models\whisper-turbo-25000-int8p\whisper-turbo-25000-int8p"
47
+ punc_model = r"D:\yujuan\models\funasr_ct\ct-punc"
48
+
49
+ t0 = time.time()
50
+ asr = openvino_genai.WhisperPipeline(model_path, device)
51
+ punc = CT_Transformer(punc_model, device=device)
52
+ print("load model time: ", time.time() - t0)
53
+ return asr, punc
54
+
55
+ def inference(audio: Path, asr, punc, lang):
56
+ try:
57
+ t0 = time.time()
58
+ asr_text = transcribe_file(
59
+ str(audio), asr, lang
60
+ )
61
+ t1 = time.time()
62
+ if lang =="<|zh|>":
63
+ punc_text = punc(asr_text)[0]
64
+ else:
65
+ punc_text = asr_text
66
+ t2 = time.time()
67
+ print(f"{audio.name} -> {asr_text} -> {punc_text}; \n asr cost: {t1-t0}; punc cost: {t2-t1}")
68
+ return punc_text, t2-t0
69
+ except Exception as e:
70
+ print(f"{audio.name} -> 失败: {e}")
71
+ def run_test_audios():
72
+ device = "GPU" # GPU can be used as well
73
+ lang = "<|en|>"
74
+ asr, punc = load_model(device)
75
+
76
+ audios = Path(r"D:\yujuan\TestTranslator\tests\test_data\test_audios")
77
+ rows = [["file_name", "time", "inference_result"]]
78
+ for audio in sorted(audios.glob("*en*/*.wav")): # *s/randomforest*.wav"
79
+ text, t = inference(audio, asr, punc, lang)
80
+ rows.append([f"{audio.parent.name}/{audio.name}", t, text])
81
+ save_csv("csv/finetune_whisper_with_punc.csv", rows)
82
+
83
+ def run_recordings():
84
+ device = "GPU" # GPU can be used as well
85
+ lang = "<|zh|>"
86
+ asr, punc = load_model(device)
87
+
88
+ audios = Path(r"D:\yujuan\TestTranslator\tests\test_data\recordings")
89
+ rows = [["file_name", "time", "inference_result"]]
90
+ original = get_origin_text_dict()
91
+ for audio in sorted(audios.glob("*.wav"), key=lambda x: int(x.stem)):
92
+ text, t = inference(audio, asr, punc, lang)
93
+ d, nd, diff = get_text_distance(original[audio.stem], text)
94
+ rows.append([audio.name, round(t, 3), text, d, round(nd,3), diff])
95
+ save_csv("csv/finetune_whisper_with_punc.csv", rows)
96
+
97
+ if __name__ == "__main__":
98
+ # main()
99
+ run_recordings()