|
|
import os |
|
|
import time |
|
|
import uuid |
|
|
from datetime import datetime |
|
|
|
|
|
import gradio as gr |
|
|
import soundfile as sf |
|
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
from model import get_pretrained_model, language_to_models |
|
|
|
|
|
|
|
|
def MyPrint(s): |
|
|
now = datetime.now() |
|
|
date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") |
|
|
print(f"{date_time}: {s}") |
|
|
|
|
|
|
|
|
title = "# Next-gen Kaldi: Text-to-speech (TTS)" |
|
|
|
|
|
description = """ |
|
|
This space shows how to convert text to speech with Next-gen Kaldi. |
|
|
It is running on CPU within a docker container provided by Hugging Face. |
|
|
See more information by visiting the following links: |
|
|
- <https://github.com/k2-fsa/sherpa-onnx> |
|
|
If you want to deploy it locally, please see |
|
|
<https://k2-fsa.github.io/sherpa/> |
|
|
If you want to use Android APKs, please see |
|
|
<https://k2-fsa.github.io/sherpa/onnx/tts/apk.html> |
|
|
If you want to use Android text-to-speech engine APKs, please see |
|
|
<https://k2-fsa.github.io/sherpa/onnx/tts/apk-engine.html> |
|
|
If you want to download an all-in-one exe for Windows, please see |
|
|
<https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models> |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
css = """ |
|
|
.result {display:flex;flex-direction:column} |
|
|
.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%} |
|
|
.result_item_success {background-color:mediumaquamarine;color:white;align-self:start} |
|
|
.result_item_error {background-color:#ff7070;color:white;align-self:start} |
|
|
""" |
|
|
|
|
|
examples = [ |
|
|
[ |
|
|
"Chinese (Mandarin, 普通话)", |
|
|
"csukuangfj/matcha-icefall-zh-baker|1 speaker", |
|
|
"某某银行的副行长和一些行政领导表示,他们去过长江和长白山; 经济不断增长。2024年12月31号,拨打110或者18920240511。123456块钱。", |
|
|
0, |
|
|
1.0, |
|
|
], |
|
|
[ |
|
|
"Chinese (Mandarin, 普通话)", |
|
|
"csukuangfj/vits-zh-hf-fanchen-wnj|1 speaker", |
|
|
"在一个阳光明媚的夏天,小马、小羊和小狗它们一块儿在广阔的草地上,嬉戏玩耍,这时小猴来了,还带着它心爱的足球活蹦乱跳地跑前、跑后教小马、小羊、小狗踢足球。", |
|
|
0, |
|
|
1.0, |
|
|
], |
|
|
[ |
|
|
"Chinese (Mandarin, 普通话)", |
|
|
"csukuangfj/vits-zh-hf-fanchen-C|187 speakers", |
|
|
'小米的使命是,始终坚持做"感动人心、价格厚道"的好产品,让全球每个人都能享受科技带来的美好生活。', |
|
|
0, |
|
|
1.0, |
|
|
], |
|
|
["Min-nan (闽南话)", "csukuangfj/vits-mms-nan", "ài piaǸ chiah ē iaN̂", 0, 1.0], |
|
|
["Thai", "csukuangfj/vits-mms-tha", "ฉันรักคุณ", 0, 1.0], |
|
|
[ |
|
|
"Chinese (Mandarin, 普通话)", |
|
|
"csukuangfj/sherpa-onnx-vits-zh-ll|5 speakers", |
|
|
"当夜幕降临,星光点点,伴随着微风拂面,我在静谧中感受着时光的流转,思念如涟漪荡漾,梦境如画卷展开,我与自然融为一体,沉静在这片宁静的美丽之中,感受着生命的奇迹与温柔。", |
|
|
2, |
|
|
1.0, |
|
|
], |
|
|
] |
|
|
|
|
|
|
|
|
def update_model_dropdown(language: str): |
|
|
if language in language_to_models: |
|
|
choices = language_to_models[language] |
|
|
return gr.Dropdown( |
|
|
choices=choices, |
|
|
value=choices[0], |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
raise ValueError(f"Unsupported language: {language}") |
|
|
|
|
|
|
|
|
def build_html_output(s: str, style: str = "result_item_success"): |
|
|
return f""" |
|
|
<div class='result'> |
|
|
<div class='result_item {style}'> |
|
|
{s} |
|
|
</div> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
def process(language: str, repo_id: str, text: str, sid: str, speed: float): |
|
|
MyPrint(f"Input text: {text}. sid: {sid}, speed: {speed}") |
|
|
sid = int(sid) |
|
|
tts = get_pretrained_model(repo_id, speed) |
|
|
|
|
|
start = time.time() |
|
|
audio = tts.generate(text, sid=sid) |
|
|
end = time.time() |
|
|
|
|
|
if len(audio.samples) == 0: |
|
|
raise ValueError( |
|
|
"Error in generating audios. Please read previous error messages." |
|
|
) |
|
|
|
|
|
duration = len(audio.samples) / audio.sample_rate |
|
|
|
|
|
elapsed_seconds = end - start |
|
|
rtf = elapsed_seconds / duration |
|
|
|
|
|
info = f""" |
|
|
Wave duration : {duration:.3f} s <br/> |
|
|
Processing time: {elapsed_seconds:.3f} s <br/> |
|
|
RTF: {elapsed_seconds:.3f}/{duration:.3f} = {rtf:.3f} <br/> |
|
|
""" |
|
|
|
|
|
MyPrint(info) |
|
|
MyPrint(f"\nrepo_id: {repo_id}\ntext: {text}\nsid: {sid}\nspeed: {speed}") |
|
|
|
|
|
filename = str(uuid.uuid4()) |
|
|
filename = f"{filename}.wav" |
|
|
sf.write( |
|
|
filename, |
|
|
audio.samples, |
|
|
samplerate=audio.sample_rate, |
|
|
subtype="PCM_16", |
|
|
) |
|
|
|
|
|
return filename, build_html_output(info) |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css) as demo_tts: |
|
|
gr.Markdown(title) |
|
|
language_choices = list(language_to_models.keys()) |
|
|
|
|
|
language_radio = gr.Radio( |
|
|
label="Language", |
|
|
choices=language_choices, |
|
|
value=language_choices[0], |
|
|
) |
|
|
|
|
|
model_dropdown = gr.Dropdown( |
|
|
choices=language_to_models[language_choices[0]], |
|
|
label="Select a model", |
|
|
value=language_to_models[language_choices[0]][0], |
|
|
) |
|
|
|
|
|
language_radio.change( |
|
|
update_model_dropdown, |
|
|
inputs=language_radio, |
|
|
outputs=model_dropdown, |
|
|
) |
|
|
|
|
|
with gr.Tabs(): |
|
|
with gr.TabItem("Please input your text"): |
|
|
input_text = gr.Textbox( |
|
|
label="Input text", |
|
|
info="Your text", |
|
|
lines=3, |
|
|
placeholder="Please input your text here", |
|
|
) |
|
|
|
|
|
input_sid = gr.Textbox( |
|
|
label="Speaker ID", |
|
|
info="Speaker ID", |
|
|
lines=1, |
|
|
max_lines=1, |
|
|
value="0", |
|
|
placeholder="Speaker ID. Valid only for mult-speaker model", |
|
|
) |
|
|
|
|
|
input_speed = gr.Slider( |
|
|
minimum=0.1, |
|
|
maximum=10, |
|
|
value=1, |
|
|
step=0.1, |
|
|
label="Speed (larger->faster; smaller->slower)", |
|
|
) |
|
|
|
|
|
input_button = gr.Button("Submit") |
|
|
|
|
|
output_audio = gr.Audio(label="Output") |
|
|
|
|
|
output_info = gr.HTML(label="Info") |
|
|
|
|
|
gr.Examples( |
|
|
examples=examples, |
|
|
fn=process, |
|
|
inputs=[ |
|
|
language_radio, |
|
|
model_dropdown, |
|
|
input_text, |
|
|
input_sid, |
|
|
input_speed, |
|
|
], |
|
|
outputs=[ |
|
|
output_audio, |
|
|
output_info, |
|
|
], |
|
|
) |
|
|
|
|
|
input_button.click( |
|
|
process, |
|
|
inputs=[ |
|
|
language_radio, |
|
|
model_dropdown, |
|
|
input_text, |
|
|
input_sid, |
|
|
input_speed, |
|
|
], |
|
|
outputs=[ |
|
|
output_audio, |
|
|
output_info, |
|
|
], |
|
|
) |
|
|
|
|
|
gr.Markdown(description) |
|
|
|
|
|
|
|
|
def download_espeak_ng_data(): |
|
|
os.system( |
|
|
""" |
|
|
cd /tmp |
|
|
wget -qq https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/espeak-ng-data.tar.bz2 |
|
|
tar xf espeak-ng-data.tar.bz2 |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
client = InferenceClient("google/gemma-1.1-2b-it") |
|
|
client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407") |
|
|
|
|
|
def models(Query): |
|
|
messages = [] |
|
|
messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {Query}"}) |
|
|
Response = "" |
|
|
for message in client.chat_completion( |
|
|
messages, |
|
|
max_tokens=2048, |
|
|
stream=True |
|
|
): |
|
|
token = message.choices[0].delta.content |
|
|
Response += token |
|
|
yield Response |
|
|
|
|
|
def nemo(query): |
|
|
budget = 3 |
|
|
message = f"""[INST] [SYSTEM] You are a helpful french assistant in normal conversation. |
|
|
When given a problem to solve, you are an expert problem-solving assistant. |
|
|
Your task is to provide a detailed, step-by-step solution to a given question. |
|
|
Follow these instructions carefully: |
|
|
1. Read the given question carefully and reset counter between <count> and </count> to {budget} (maximum 3 steps). |
|
|
2. Think critically like a human researcher or scientist. Break down the problem using first principles to conceptually understand and answer the question. |
|
|
3. Generate a detailed, logical step-by-step solution. |
|
|
4. Enclose each step of your solution within <step> and </step> tags. |
|
|
5. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them. |
|
|
6. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decide whether you need to return to the previous steps. |
|
|
7. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags. |
|
|
8. Provide a critical, honest, and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags. |
|
|
9. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags. |
|
|
Example format: |
|
|
<count> [starting budget] </count> |
|
|
<step> [Content of step 1] </step> |
|
|
<count> [remaining budget] </count> |
|
|
<step> [Content of step 2] </step> |
|
|
<reflection> [Evaluation of the steps so far] </reflection> |
|
|
<reward> [Float between 0.0 and 1.0] </reward> |
|
|
<count> [remaining budget] </count> |
|
|
<step> [Content of step 3 or Content of some previous step] </step> |
|
|
<count> [remaining budget] </count> |
|
|
... |
|
|
<step> [Content of final step] </step> |
|
|
<count> [remaining budget] </count> |
|
|
<answer> [Final Answer] </answer> (must give final answer in this format) |
|
|
<reflection> [Evaluation of the solution] </reflection> |
|
|
<reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {query} [/INST] [ASSISTANT] """ |
|
|
stream = client.text_generation(message, max_new_tokens=4096, stream=True, details=True, return_full_text=False) |
|
|
output = "" |
|
|
for response in stream: |
|
|
output += response.token.text |
|
|
return output |
|
|
|
|
|
description_chat = "# Light ChatBox\n### Enter a question and.. Tada this reponse generate in 0.5 second!" |
|
|
|
|
|
with gr.Blocks() as demo1: |
|
|
gr.Interface(description=description_chat, fn=models, inputs=["text"], outputs="text") |
|
|
|
|
|
with gr.Blocks() as demo2: |
|
|
gr.Interface(description="Very low but critical thinker", fn=nemo, inputs=["text"], outputs="text", api_name="critical_thinker", concurrency_limit=10) |
|
|
|
|
|
with gr.Blocks() as demo_chat: |
|
|
gr.TabbedInterface([demo1, demo2], ["Fast", "Critical"]) |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo_combined: |
|
|
gr.Markdown("# Application combinée") |
|
|
gr.TabbedInterface([demo_tts, demo_chat], ["TTS", "Chat"]) |
|
|
|
|
|
demo_combined.queue(max_size=300000) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
download_espeak_ng_data() |
|
|
demo_combined.launch() |
|
|
|