Sad44587 commited on
Commit
efc0997
·
verified ·
1 Parent(s): df33074

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -14
app.py CHANGED
@@ -1,24 +1,257 @@
 
 
 
 
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  client = InferenceClient("google/gemma-1.1-2b-it")
5
  client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
6
 
7
  def models(Query):
8
-
9
  messages = []
10
-
11
  messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {Query}"})
12
-
13
  Response = ""
14
-
15
  for message in client.chat_completion(
16
  messages,
17
  max_tokens=2048,
18
  stream=True
19
  ):
20
  token = message.choices[0].delta.content
21
-
22
  Response += token
23
  yield Response
24
 
@@ -53,23 +286,32 @@ Example format:
53
  <answer> [Final Answer] </answer> (must give final answer in this format)
54
  <reflection> [Evaluation of the solution] </reflection>
55
  <reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {query} [/INST] [ASSISTANT] """
56
-
57
  stream = client.text_generation(message, max_new_tokens=4096, stream=True, details=True, return_full_text=False)
58
  output = ""
59
-
60
  for response in stream:
61
  output += response.token.text
62
  return output
63
 
64
- description="# Light ChatBox\n### Enter a question and.. Tada this reponse generate in 0.5 second!"
65
 
66
  with gr.Blocks() as demo1:
67
- gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
 
68
  with gr.Blocks() as demo2:
69
- gr.Interface(description="Very low but critical thinker",fn=nemo, inputs=["text"], outputs="text", api_name="critical_thinker", concurrency_limit=10)
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- with gr.Blocks() as demo:
72
- gr.TabbedInterface([demo1, demo2] , ["Fast", "Critical"])
73
 
74
- demo.queue(max_size=300000)
75
- demo.launch()
 
 
1
+ import os
2
+ import time
3
+ import uuid
4
+ from datetime import datetime
5
+
6
  import gradio as gr
7
+ import soundfile as sf
8
  from huggingface_hub import InferenceClient
9
 
10
+ from model import get_pretrained_model, language_to_models
11
+
12
+
13
+ def MyPrint(s):
14
+ now = datetime.now()
15
+ date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
16
+ print(f"{date_time}: {s}")
17
+
18
+
19
+ title = "# Next-gen Kaldi: Text-to-speech (TTS)"
20
+
21
+ description = """
22
+ This space shows how to convert text to speech with Next-gen Kaldi.
23
+ It is running on CPU within a docker container provided by Hugging Face.
24
+ See more information by visiting the following links:
25
+ - <https://github.com/k2-fsa/sherpa-onnx>
26
+ If you want to deploy it locally, please see
27
+ <https://k2-fsa.github.io/sherpa/>
28
+ If you want to use Android APKs, please see
29
+ <https://k2-fsa.github.io/sherpa/onnx/tts/apk.html>
30
+ If you want to use Android text-to-speech engine APKs, please see
31
+ <https://k2-fsa.github.io/sherpa/onnx/tts/apk-engine.html>
32
+ If you want to download an all-in-one exe for Windows, please see
33
+ <https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models>
34
+ """
35
+
36
+ # css style is copied from
37
+ # https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L113
38
+ css = """
39
+ .result {display:flex;flex-direction:column}
40
+ .result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%}
41
+ .result_item_success {background-color:mediumaquamarine;color:white;align-self:start}
42
+ .result_item_error {background-color:#ff7070;color:white;align-self:start}
43
+ """
44
+
45
+ examples = [
46
+ [
47
+ "Chinese (Mandarin, 普通话)",
48
+ "csukuangfj/matcha-icefall-zh-baker|1 speaker",
49
+ "某某银行的副行长和一些行政领导表示,他们去过长江和长白山; 经济不断增长。2024年12月31号,拨打110或者18920240511。123456块钱。",
50
+ 0,
51
+ 1.0,
52
+ ],
53
+ [
54
+ "Chinese (Mandarin, 普通话)",
55
+ "csukuangfj/vits-zh-hf-fanchen-wnj|1 speaker",
56
+ "在一个阳光明媚的夏天,小马、小羊和小狗它们一块儿在广阔的草地上,嬉戏玩耍,这时小猴来了,还带着它心爱的足球活蹦乱跳地跑前、跑后教小马、小羊、小狗踢足球。",
57
+ 0,
58
+ 1.0,
59
+ ],
60
+ [
61
+ "Chinese (Mandarin, 普通话)",
62
+ "csukuangfj/vits-zh-hf-fanchen-C|187 speakers",
63
+ '小米的使命是,始终坚持做"感动人心、价格厚道"的好产品,让全球每个人都能享受科技带来的美好生活。',
64
+ 0,
65
+ 1.0,
66
+ ],
67
+ ["Min-nan (闽南话)", "csukuangfj/vits-mms-nan", "ài piaǸ chiah ē iaN̂", 0, 1.0],
68
+ ["Thai", "csukuangfj/vits-mms-tha", "ฉันรักคุณ", 0, 1.0],
69
+ [
70
+ "Chinese (Mandarin, 普通话)",
71
+ "csukuangfj/sherpa-onnx-vits-zh-ll|5 speakers",
72
+ "当夜幕降临,星光点点,伴随着微风拂面,我在静谧中感受着时光的流转,思念如涟漪荡漾,梦境如画卷展开,我与自然融为一体,沉静在这片宁静的美丽之中,感受着生命的奇迹与温柔。",
73
+ 2,
74
+ 1.0,
75
+ ],
76
+ ]
77
+
78
+
79
+ def update_model_dropdown(language: str):
80
+ if language in language_to_models:
81
+ choices = language_to_models[language]
82
+ return gr.Dropdown(
83
+ choices=choices,
84
+ value=choices[0],
85
+ interactive=True,
86
+ )
87
+
88
+ raise ValueError(f"Unsupported language: {language}")
89
+
90
+
91
+ def build_html_output(s: str, style: str = "result_item_success"):
92
+ return f"""
93
+ <div class='result'>
94
+ <div class='result_item {style}'>
95
+ {s}
96
+ </div>
97
+ </div>
98
+ """
99
+
100
+
101
+ def process(language: str, repo_id: str, text: str, sid: str, speed: float):
102
+ MyPrint(f"Input text: {text}. sid: {sid}, speed: {speed}")
103
+ sid = int(sid)
104
+ tts = get_pretrained_model(repo_id, speed)
105
+
106
+ start = time.time()
107
+ audio = tts.generate(text, sid=sid)
108
+ end = time.time()
109
+
110
+ if len(audio.samples) == 0:
111
+ raise ValueError(
112
+ "Error in generating audios. Please read previous error messages."
113
+ )
114
+
115
+ duration = len(audio.samples) / audio.sample_rate
116
+
117
+ elapsed_seconds = end - start
118
+ rtf = elapsed_seconds / duration
119
+
120
+ info = f"""
121
+ Wave duration : {duration:.3f} s <br/>
122
+ Processing time: {elapsed_seconds:.3f} s <br/>
123
+ RTF: {elapsed_seconds:.3f}/{duration:.3f} = {rtf:.3f} <br/>
124
+ """
125
+
126
+ MyPrint(info)
127
+ MyPrint(f"\nrepo_id: {repo_id}\ntext: {text}\nsid: {sid}\nspeed: {speed}")
128
+
129
+ filename = str(uuid.uuid4())
130
+ filename = f"{filename}.wav"
131
+ sf.write(
132
+ filename,
133
+ audio.samples,
134
+ samplerate=audio.sample_rate,
135
+ subtype="PCM_16",
136
+ )
137
+
138
+ return filename, build_html_output(info)
139
+
140
+
141
+ # Démo Next-gen Kaldi TTS
142
+ with gr.Blocks(css=css) as demo_tts:
143
+ gr.Markdown(title)
144
+ language_choices = list(language_to_models.keys())
145
+
146
+ language_radio = gr.Radio(
147
+ label="Language",
148
+ choices=language_choices,
149
+ value=language_choices[0],
150
+ )
151
+
152
+ model_dropdown = gr.Dropdown(
153
+ choices=language_to_models[language_choices[0]],
154
+ label="Select a model",
155
+ value=language_to_models[language_choices[0]][0],
156
+ )
157
+
158
+ language_radio.change(
159
+ update_model_dropdown,
160
+ inputs=language_radio,
161
+ outputs=model_dropdown,
162
+ )
163
+
164
+ with gr.Tabs():
165
+ with gr.TabItem("Please input your text"):
166
+ input_text = gr.Textbox(
167
+ label="Input text",
168
+ info="Your text",
169
+ lines=3,
170
+ placeholder="Please input your text here",
171
+ )
172
+
173
+ input_sid = gr.Textbox(
174
+ label="Speaker ID",
175
+ info="Speaker ID",
176
+ lines=1,
177
+ max_lines=1,
178
+ value="0",
179
+ placeholder="Speaker ID. Valid only for mult-speaker model",
180
+ )
181
+
182
+ input_speed = gr.Slider(
183
+ minimum=0.1,
184
+ maximum=10,
185
+ value=1,
186
+ step=0.1,
187
+ label="Speed (larger->faster; smaller->slower)",
188
+ )
189
+
190
+ input_button = gr.Button("Submit")
191
+
192
+ output_audio = gr.Audio(label="Output")
193
+
194
+ output_info = gr.HTML(label="Info")
195
+
196
+ gr.Examples(
197
+ examples=examples,
198
+ fn=process,
199
+ inputs=[
200
+ language_radio,
201
+ model_dropdown,
202
+ input_text,
203
+ input_sid,
204
+ input_speed,
205
+ ],
206
+ outputs=[
207
+ output_audio,
208
+ output_info,
209
+ ],
210
+ )
211
+
212
+ input_button.click(
213
+ process,
214
+ inputs=[
215
+ language_radio,
216
+ model_dropdown,
217
+ input_text,
218
+ input_sid,
219
+ input_speed,
220
+ ],
221
+ outputs=[
222
+ output_audio,
223
+ output_info,
224
+ ],
225
+ )
226
+
227
+ gr.Markdown(description)
228
+
229
+
230
+ def download_espeak_ng_data():
231
+ os.system(
232
+ """
233
+ cd /tmp
234
+ wget -qq https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/espeak-ng-data.tar.bz2
235
+ tar xf espeak-ng-data.tar.bz2
236
+ """
237
+ )
238
+
239
+
240
+ # --- Deuxième script : Chat via InferenceClient ---
241
+
242
  client = InferenceClient("google/gemma-1.1-2b-it")
243
  client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
244
 
245
  def models(Query):
 
246
  messages = []
 
247
  messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {Query}"})
 
248
  Response = ""
 
249
  for message in client.chat_completion(
250
  messages,
251
  max_tokens=2048,
252
  stream=True
253
  ):
254
  token = message.choices[0].delta.content
 
255
  Response += token
256
  yield Response
257
 
 
286
  <answer> [Final Answer] </answer> (must give final answer in this format)
287
  <reflection> [Evaluation of the solution] </reflection>
288
  <reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {query} [/INST] [ASSISTANT] """
 
289
  stream = client.text_generation(message, max_new_tokens=4096, stream=True, details=True, return_full_text=False)
290
  output = ""
 
291
  for response in stream:
292
  output += response.token.text
293
  return output
294
 
295
+ description_chat = "# Light ChatBox\n### Enter a question and.. Tada this reponse generate in 0.5 second!"
296
 
297
  with gr.Blocks() as demo1:
298
+ gr.Interface(description=description_chat, fn=models, inputs=["text"], outputs="text")
299
+
300
  with gr.Blocks() as demo2:
301
+ gr.Interface(description="Very low but critical thinker", fn=nemo, inputs=["text"], outputs="text", api_name="critical_thinker", concurrency_limit=10)
302
+
303
+ with gr.Blocks() as demo_chat:
304
+ gr.TabbedInterface([demo1, demo2], ["Fast", "Critical"])
305
+
306
+
307
+ # --- Interface globale combinée ---
308
+ with gr.Blocks() as demo_combined:
309
+ gr.Markdown("# Application combinée")
310
+ gr.TabbedInterface([demo_tts, demo_chat], ["TTS", "Chat"])
311
+
312
+ demo_combined.queue(max_size=300000)
313
 
 
 
314
 
315
+ if __name__ == "__main__":
316
+ download_espeak_ng_data()
317
+ demo_combined.launch()