H5N1AIDS commited on
Commit
68377a3
·
verified ·
1 Parent(s): f9fa8b0

Upload F5-TTS-ONNX-Inference.py

Browse files
Files changed (1) hide show
  1. F5-TTS-ONNX-Inference.py +298 -0
F5-TTS-ONNX-Inference.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import site
3
+ import time
4
+ import jieba
5
+ import torch
6
+ import onnxruntime
7
+ import soundfile as sf
8
+ import numpy as np
9
+ from pydub import AudioSegment
10
+ from pypinyin import lazy_pinyin, Style
11
+ python_package_path = site.getsitepackages()[-1]
12
+
13
+ vocab_path = "/home/DakeQQ/Downloads/F5TTS_v1_Base/vocab.txt" # The F5-TTS model vocab download path. URL: https://huggingface.co/SWivid/F5-TTS/tree/main/F5TTS_v1_Base
14
+ onnx_model_A = "/home/DakeQQ/Downloads/F5_Optimized/F5_Preprocess.onnx" # The exported onnx model path.
15
+ onnx_model_B = "/home/DakeQQ/Downloads/F5_Optimized/F5_Transformer.onnx" # The exported onnx model path.
16
+ onnx_model_C = "/home/DakeQQ/Downloads/F5_Optimized/F5_Decode.onnx" # The exported onnx model path.
17
+ generated_audio = "./generated_audio.wav"
18
+ test_in_english = False
19
+
20
+ if test_in_english:
21
+ reference_audio = python_package_path + "/f5_tts/infer/examples/basic/basic_ref_en.wav"
22
+ ref_text = "Some call me nature, others call me mother nature."
23
+ gen_text = "Some call me Dake, others call me QQ."
24
+ else:
25
+ reference_audio = python_package_path + "/f5_tts/infer/examples/basic/basic_ref_zh.wav" # The reference audio path.
26
+ ref_text = "对,这就是我,万人敬仰的太乙真人。" # The ASR result of reference audio.
27
+ gen_text = "对,这就是我,万人敬仰的大可奇奇。" # The target TTS.
28
+
29
+
30
+ ORT_Accelerate_Providers = ['CPUExecutionProvider'] # If you have accelerate devices for : ['CUDAExecutionProvider', 'TensorrtExecutionProvider', 'CoreMLExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider', 'MIGraphXExecutionProvider', 'AzureExecutionProvider']
31
+ # else keep empty.
32
+ RANDOM_SEED = 9527 # Set seed to reproduce the generated audio
33
+ NFE_STEP = 32 # F5-TTS model setting, 0~31
34
+ FUSE_NFE = 1 # Maintain the same values as the exported model.
35
+ SPEED = 1.0 # Set for talking speed. Only works with dynamic_axes=True
36
+ MAX_THREADS = 8 # Max CPU parallel threads.
37
+ DEVICE_ID = 0 # The GPU id, default to 0.
38
+ MODEL_SAMPLE_RATE = 24000 # Do not modify it.
39
+ HOP_LENGTH = 256 # It affects the generated audio length and speech speed.
40
+
41
+ if "OpenVINOExecutionProvider" in ORT_Accelerate_Providers:
42
+ provider_options = [
43
+ {
44
+ 'device_type': 'CPU', # [CPU, NPU, GPU, GPU.0, GPU.1]]
45
+ 'precision': 'ACCURACY', # [FP32, FP16, ACCURACY]
46
+ 'num_of_threads': MAX_THREADS,
47
+ 'num_streams': 1,
48
+ 'enable_opencl_throttling': True,
49
+ 'enable_qdq_optimizer': False # Enable it carefully
50
+ }
51
+ ]
52
+ elif "CUDAExecutionProvider" in ORT_Accelerate_Providers:
53
+ provider_options = [
54
+ {
55
+ 'device_id': DEVICE_ID,
56
+ 'gpu_mem_limit': 8 * 1024 * 1024 * 1024, # 8 GB
57
+ 'arena_extend_strategy': 'kNextPowerOfTwo',
58
+ 'cudnn_conv_algo_search': 'EXHAUSTIVE',
59
+ 'cudnn_conv_use_max_workspace': '1',
60
+ 'do_copy_in_default_stream': '1',
61
+ 'cudnn_conv1d_pad_to_nc1d': '1',
62
+ 'enable_cuda_graph': '0', # Set to '0' to avoid potential errors when enabled.
63
+ 'use_tf32': '0'
64
+ }
65
+ ]
66
+ else:
67
+ # Please config by yourself for others providers.
68
+ provider_options = None
69
+
70
+
71
+ with open(vocab_path, "r", encoding="utf-8") as f:
72
+ vocab_char_map = {}
73
+ for i, char in enumerate(f):
74
+ vocab_char_map[char[:-1]] = i
75
+ vocab_size = len(vocab_char_map)
76
+
77
+
78
+ # From the official code
79
+ def convert_char_to_pinyin(text_list, polyphone=True):
80
+ if jieba.dt.initialized is False:
81
+ jieba.default_logger.setLevel(50) # CRITICAL
82
+ jieba.initialize()
83
+
84
+ final_text_list = []
85
+ custom_trans = str.maketrans(
86
+ {";": ",", "“": '"', "”": '"', "‘": "'", "’": "'"}
87
+ ) # add custom trans here, to address oov
88
+
89
+ def is_chinese(c):
90
+ return (
91
+ "\u3100" <= c <= "\u9fff" # common chinese characters
92
+ )
93
+
94
+ for text in text_list:
95
+ char_list = []
96
+ text = text.translate(custom_trans)
97
+ for seg in jieba.cut(text):
98
+ seg_byte_len = len(bytes(seg, "UTF-8"))
99
+ if seg_byte_len == len(seg): # if pure alphabets and symbols
100
+ if char_list and seg_byte_len > 1 and char_list[-1] not in " :'\"":
101
+ char_list.append(" ")
102
+ char_list.extend(seg)
103
+ elif polyphone and seg_byte_len == 3 * len(seg): # if pure east asian characters
104
+ seg_ = lazy_pinyin(seg, style=Style.TONE3, tone_sandhi=True)
105
+ for i, c in enumerate(seg):
106
+ if is_chinese(c):
107
+ char_list.append(" ")
108
+ char_list.append(seg_[i])
109
+ else: # if mixed characters, alphabets and symbols
110
+ for c in seg:
111
+ if ord(c) < 256:
112
+ char_list.extend(c)
113
+ elif is_chinese(c):
114
+ char_list.append(" ")
115
+ char_list.extend(lazy_pinyin(c, style=Style.TONE3, tone_sandhi=True))
116
+ else:
117
+ char_list.append(c)
118
+ final_text_list.append(char_list)
119
+ return final_text_list
120
+
121
+
122
+ # From the official code
123
+ def list_str_to_idx(
124
+ text: list[str] | list[list[str]],
125
+ vocab_char_map: dict[str, int], # {char: idx}
126
+ padding_value=-1
127
+ ):
128
+ get_idx = vocab_char_map.get
129
+ list_idx_tensors = [torch.tensor([get_idx(c, 0) for c in t], dtype=torch.int32) for t in text]
130
+ text = torch.nn.utils.rnn.pad_sequence(list_idx_tensors, padding_value=padding_value, batch_first=True)
131
+ return text
132
+
133
+
134
+ def normalize_to_int16(audio):
135
+ max_val = np.max(np.abs(audio))
136
+ scaling_factor = 32767.0 / max_val if max_val > 0 else 1.0
137
+ return (audio * float(scaling_factor)).astype(np.int16)
138
+
139
+
140
+ # ONNX Runtime settings
141
+ onnxruntime.set_seed(RANDOM_SEED)
142
+ session_opts = onnxruntime.SessionOptions()
143
+ session_opts.log_severity_level = 4 # fatal level = 4, it an adjustable value.
144
+ session_opts.log_verbosity_level = 4 # fatal level = 4, it an adjustable value.
145
+ session_opts.inter_op_num_threads = MAX_THREADS # Run different nodes with num_threads. Set 0 for auto.
146
+ session_opts.intra_op_num_threads = MAX_THREADS # Under the node, execute the operators with num_threads. Set 0 for auto.
147
+ session_opts.enable_cpu_mem_arena = True # True for execute speed; False for less memory usage.
148
+ session_opts.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
149
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
150
+ session_opts.add_session_config_entry("session.intra_op.allow_spinning", "1")
151
+ session_opts.add_session_config_entry("session.inter_op.allow_spinning", "1")
152
+ session_opts.add_session_config_entry("session.set_denormal_as_zero", "1")
153
+
154
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
155
+ ort_session_A = onnxruntime.InferenceSession(onnx_model_A, sess_options=session_opts, providers=['CPUExecutionProvider'], provider_options=None)
156
+ model_type = ort_session_A._inputs_meta[0].type
157
+ in_name_A = ort_session_A.get_inputs()
158
+ out_name_A = ort_session_A.get_outputs()
159
+ in_name_A0 = in_name_A[0].name
160
+ in_name_A1 = in_name_A[1].name
161
+ in_name_A2 = in_name_A[2].name
162
+ out_name_A0 = out_name_A[0].name
163
+ out_name_A1 = out_name_A[1].name
164
+ out_name_A2 = out_name_A[2].name
165
+ out_name_A3 = out_name_A[3].name
166
+ out_name_A4 = out_name_A[4].name
167
+ out_name_A5 = out_name_A[5].name
168
+ out_name_A6 = out_name_A[6].name
169
+ out_name_A7 = out_name_A[7].name
170
+
171
+ if "CPUExecutionProvider" in ORT_Accelerate_Providers or not ORT_Accelerate_Providers:
172
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
173
+ else:
174
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_BASIC
175
+ ort_session_B = onnxruntime.InferenceSession(onnx_model_B, sess_options=session_opts, providers=ORT_Accelerate_Providers, provider_options=provider_options)
176
+ ORT_Accelerate_Providers = ort_session_B.get_providers()[0]
177
+ # For Windows DirectML + Intel/AMD/Nvidia GPU,
178
+ # pip install onnxruntime-directml --upgrade
179
+ # ort_session_B = onnxruntime.InferenceSession(onnx_model_B, sess_options=session_opts, providers=['DmlExecutionProvider'])
180
+ print(f"\nUsable Providers: {ORT_Accelerate_Providers}")
181
+ model_dtype = ort_session_B._inputs_meta[0].type
182
+ in_name_B = ort_session_B.get_inputs()
183
+ out_name_B = ort_session_B.get_outputs()
184
+ in_name_B0 = in_name_B[0].name
185
+ in_name_B1 = in_name_B[1].name
186
+ in_name_B2 = in_name_B[2].name
187
+ in_name_B3 = in_name_B[3].name
188
+ in_name_B4 = in_name_B[4].name
189
+ in_name_B5 = in_name_B[5].name
190
+ in_name_B6 = in_name_B[6].name
191
+ in_name_B7 = in_name_B[7].name
192
+ out_name_B0 = out_name_B[0].name
193
+ out_name_B1 = out_name_B[1].name
194
+
195
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
196
+ ort_session_C = onnxruntime.InferenceSession(onnx_model_C, sess_options=session_opts, providers=['CPUExecutionProvider'], provider_options=None)
197
+ in_name_C = ort_session_C.get_inputs()
198
+ out_name_C = ort_session_C.get_outputs()
199
+ in_name_C0 = in_name_C[0].name
200
+ in_name_C1 = in_name_C[1].name
201
+ out_name_C0 = out_name_C[0].name
202
+
203
+ # Load the input audio
204
+ print(f"\nReference Audio: {reference_audio}")
205
+ audio = np.array(AudioSegment.from_file(reference_audio).set_channels(1).set_frame_rate(MODEL_SAMPLE_RATE).get_array_of_samples(), dtype=np.float32)
206
+ audio = normalize_to_int16(audio)
207
+ audio_len = len(audio)
208
+ audio = audio.reshape(1, 1, -1)
209
+
210
+ zh_pause_punc = r"。,、;:?!"
211
+ ref_text_len = len(ref_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, ref_text))
212
+ gen_text_len = len(gen_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, gen_text))
213
+ ref_audio_len = audio_len // HOP_LENGTH + 1
214
+ max_duration = np.array([ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / SPEED)], dtype=np.int64)
215
+ gen_text = convert_char_to_pinyin([ref_text + gen_text])
216
+ text_ids = list_str_to_idx(gen_text, vocab_char_map).numpy()
217
+ time_step = np.array([0], dtype=np.int32)
218
+
219
+ if "CPUExecutionProvider" in ORT_Accelerate_Providers or not ORT_Accelerate_Providers:
220
+ device_type = 'cpu'
221
+ elif "CUDAExecutionProvider" in ORT_Accelerate_Providers or "TensorrtExecutionProvider" in ORT_Accelerate_Providers:
222
+ device_type = 'cuda'
223
+ elif "DmlExecutionProvider" in ORT_Accelerate_Providers:
224
+ device_type = 'dml'
225
+ else:
226
+ device_type = None
227
+
228
+ print("\n\nRun F5-TTS by ONNX Runtime.")
229
+ start_count = time.time()
230
+ noise, rope_cos_q, rope_sin_q, rope_cos_k, rope_sin_k, cat_mel_text, cat_mel_text_drop, ref_signal_len = ort_session_A.run(
231
+ [out_name_A0, out_name_A1, out_name_A2, out_name_A3, out_name_A4, out_name_A5, out_name_A6, out_name_A7],
232
+ {
233
+ in_name_A0: audio,
234
+ in_name_A1: text_ids,
235
+ in_name_A2: max_duration
236
+ })
237
+
238
+ if device_type:
239
+ inputs = [
240
+ onnxruntime.OrtValue.ortvalue_from_numpy(noise, device_type, DEVICE_ID),
241
+ onnxruntime.OrtValue.ortvalue_from_numpy(rope_cos_q, device_type, DEVICE_ID),
242
+ onnxruntime.OrtValue.ortvalue_from_numpy(rope_sin_q, device_type, DEVICE_ID),
243
+ onnxruntime.OrtValue.ortvalue_from_numpy(rope_cos_k, device_type, DEVICE_ID),
244
+ onnxruntime.OrtValue.ortvalue_from_numpy(rope_sin_k, device_type, DEVICE_ID),
245
+ onnxruntime.OrtValue.ortvalue_from_numpy(cat_mel_text, device_type, DEVICE_ID),
246
+ onnxruntime.OrtValue.ortvalue_from_numpy(cat_mel_text_drop, device_type, DEVICE_ID),
247
+ onnxruntime.OrtValue.ortvalue_from_numpy(time_step, device_type, DEVICE_ID)
248
+ ]
249
+ outputs = [
250
+ inputs[0],
251
+ inputs[-1]
252
+ ]
253
+
254
+ io_binding = ort_session_B.io_binding()
255
+ for i in range(len(inputs)):
256
+ io_binding.bind_ortvalue_input(
257
+ name=in_name_B[i].name,
258
+ ortvalue=inputs[i]
259
+ )
260
+ for i in range(len(outputs)):
261
+ io_binding.bind_ortvalue_output(
262
+ name=out_name_B[i].name,
263
+ ortvalue=outputs[i]
264
+ )
265
+
266
+ print("NFE_STEP: 0")
267
+ for i in range(0, NFE_STEP, FUSE_NFE):
268
+ ort_session_B.run_with_iobinding(io_binding)
269
+ print(f"NFE_STEP: {i + FUSE_NFE}")
270
+ noise = onnxruntime.OrtValue.numpy(io_binding.get_outputs()[0])
271
+ else:
272
+ print("NFE_STEP: 0")
273
+ for i in range(0, NFE_STEP - 1, FUSE_NFE):
274
+ noise, time_step = ort_session_B.run(
275
+ [out_name_B0, out_name_B1],
276
+ {
277
+ in_name_B0: noise,
278
+ in_name_B1: rope_cos_q,
279
+ in_name_B2: rope_sin_q,
280
+ in_name_B3: rope_cos_k,
281
+ in_name_B4: rope_sin_k,
282
+ in_name_B5: cat_mel_text,
283
+ in_name_B6: cat_mel_text_drop,
284
+ in_name_B7: time_step
285
+ })
286
+ print(f"NFE_STEP: {i + FUSE_NFE}")
287
+
288
+ generated_signal = ort_session_C.run(
289
+ [out_name_C0],
290
+ {
291
+ in_name_C0: noise,
292
+ in_name_C1: ref_signal_len
293
+ })[0]
294
+ end_count = time.time()
295
+
296
+ # Save to audio
297
+ sf.write(generated_audio, generated_signal.reshape(-1), MODEL_SAMPLE_RATE, format='WAVEX')
298
+ print(f"\nAudio generation is complete.\n\nONNXRuntime Time Cost in Seconds:\n{end_count - start_count:.3f}")