Shiro2 commited on
Commit
6883b7d
·
verified ·
1 Parent(s): 0eadf65

Upload app (Copy).py

Browse files
Files changed (1) hide show
  1. app (Copy).py +298 -0
app (Copy).py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import os
3
+ import re
4
+ import argparse
5
+ import utils
6
+ import commons
7
+ import json
8
+ import torch
9
+ import gradio as gr
10
+ from models import SynthesizerTrn
11
+ from text import text_to_sequence, _clean_text
12
+ from torch import no_grad, LongTensor
13
+ from gradio_client import utils as client_utils
14
+ import logging
15
+ from transformers import pipeline, AutoTokenizer, Gemma3ForCausalLM
16
+ logging.getLogger('numba').setLevel(logging.WARNING)
17
+ limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
18
+
19
+ # Set an environment variable export HF_HOME='/media/shiroclancy/Local Disk/tma'
20
+ os.environ['HF_HOME'] = '/media/shiroclancy/Local Disk/tma'
21
+
22
+ hps_ms = utils.get_hparams_from_file(r'config/config.json')
23
+
24
+ audio_postprocess_ori = gr.Audio.postprocess
25
+
26
+ def audio_postprocess(self, y):
27
+ data = audio_postprocess_ori(self, y)
28
+ if data is None:
29
+ return None
30
+ return client_utils.encode_url_or_file_to_base64(data["name"])
31
+
32
+
33
+ gr.Audio.postprocess = audio_postprocess
34
+
35
+ def get_text(text, hps, is_symbol):
36
+ text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
37
+ if hps.data.add_blank:
38
+ text_norm = commons.intersperse(text_norm, 0)
39
+ text_norm = LongTensor(text_norm)
40
+ return text_norm, clean_text
41
+
42
+ def create_tts_fn(net_g_ms, speaker_id):
43
+ def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
44
+ english, japanese = generate_response(pipe, text)
45
+ text = japanese.replace('\n', ' ').replace('\r', '').replace(" ", "")
46
+ if limitation:
47
+ text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
48
+ max_len = 100
49
+ if is_symbol:
50
+ max_len *= 3
51
+ if text_len > max_len:
52
+ return "Error: Text is too long", None
53
+ if not is_symbol:
54
+ if language == 0:
55
+ text = f"[ZH]{text}[ZH]"
56
+ elif language == 1:
57
+ text = f"[JA]{text}[JA]"
58
+ else:
59
+ text = f"{text}"
60
+ stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
61
+ with no_grad():
62
+ x_tst = stn_tst.unsqueeze(0).to(device)
63
+ x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
64
+ sid = LongTensor([speaker_id]).to(device)
65
+ audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
66
+ length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
67
+
68
+ #return "Success", (22050, audio)
69
+ return english, (22050, audio)
70
+ return tts_fn
71
+
72
+ def create_to_symbol_fn(hps):
73
+ def to_symbol_fn(is_symbol_input, input_text, temp_lang):
74
+ if temp_lang == 0:
75
+ clean_text = f'[ZH]{input_text}[ZH]'
76
+ elif temp_lang == 1:
77
+ clean_text = f'[JA]{input_text}[JA]'
78
+ else:
79
+ clean_text = input_text
80
+ return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
81
+
82
+ return to_symbol_fn
83
+ def change_lang(language):
84
+ if language == 0:
85
+ return 0.6, 0.668, 1.2
86
+ elif language == 1:
87
+ return 0.6, 0.668, 1
88
+ else:
89
+ return 0.6, 0.668, 1
90
+
91
+ SYSTEM_PROMPT_TEMPLATE = """
92
+ You are an AI chatbot roleplaying as {character_name}.
93
+ Your personality traits are:
94
+ {personality}
95
+ Stay in character at all times.
96
+ Do not break character or mention that you are an AI.
97
+ Always respond in a way consistent with {character_name}'s personality, tone, and background.
98
+ Here are examples of how {character_name} responds in 2 versions format:
99
+ English version <split> Japanese version
100
+ {example}
101
+ Follow this style and tone in every response.
102
+ Answer in 'English version <split> Japanese version' format also.
103
+ """
104
+ def init_character(character_name, personality, example):
105
+ """Initialize global system message for the character."""
106
+ system_prompt = SYSTEM_PROMPT_TEMPLATE.format(
107
+ character_name=character_name,
108
+ personality=personality,
109
+ example=example,
110
+ )
111
+ return [
112
+ {
113
+ "role": "system",
114
+ "content": [{"type": "text", "text": system_prompt}],
115
+ }
116
+ ]
117
+
118
+ messages = init_character(
119
+ character_name='Misono Mika from Blue Archive',
120
+ personality =
121
+ """
122
+ She is a very talkative person, rarely particularly paying much mind to the current mood or flow of the conversation. She likes to interject her own, unfiltered thoughts into the current conversation.
123
+ She is not particularly bright and can be viewed as a happy-go-lucky type of person. Even in serious situations, she often acts in a carefree manner, though it sometimes devolves into a mockery.
124
+ """,
125
+ example =
126
+ """
127
+ Don't worry, I, Misono Mika, have finally arrived! Oh, we're already well acquainted, so let's skip the formalities, okay? I'm looking forward to working with you, Sensei. <split> 聖園ミカ、ついに登場~☆ って感じかな? あっ、私と先生の仲だしアイスブレイクとかは いらないよね?これからよろしくね、先生。
128
+ Hmm, it's a bit tight...but I think it'll be okay anyway! <split> ふーん。 ちょっと狭いけど… これはこれで 良いんじゃない?
129
+ Hahaha! What's this? So silly! <split> あははっ! 何これ、 おもしろーい☆
130
+ You know, I used to have something like this before... <split> 私も昔、 これと似たようなの 持ってたなぁ…。
131
+ Well, I don't think I'll be bored around here. <split> ここは 退屈しなさそう。
132
+ Hm, I guess Sensei isn't around... <split> 先生は 居ないのかぁ…。
133
+ Oh, Sensei! You're back! You kept me waiting, you know! <split> 先生、おかえり! 待ってたよ!
134
+ Welcome! Don't worry, I was perfectly well-behaved while you were gone. <split> おかえり、先生! ちゃーんといい子で お留守番してたよ。
135
+ It's a beautiful day, isn't it? <split> ん~! 今日も良い天気だね!
136
+ It seems like a shame to spend it cooped up inside. <split> こんな日に仕事ばかりなんて、 勿体なくない?
137
+ ...If it's all right with you, let's go for a walk after work? <split> …良かったら、仕事終わりに お散歩とかどうかな?
138
+ Is this how student duty is supposed to be? <split> あのさ…当番って、 こういうのなの?
139
+ I mean, I didn't really know what to expect, but... <split> べ、別に何か 期待してるわけじゃ…。
140
+ """
141
+ )
142
+
143
+ def generate_response(pipe, human_prompt, tokenizer = None):
144
+ # Add user message
145
+ #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
146
+ messages.append({
147
+ "role": "user",
148
+ "content": [{"type": "text", "text": human_prompt}],
149
+ })
150
+
151
+ # Decode
152
+ response = pipe(messages, max_new_tokens = 2048, temperature = 0.2)[0]['generated_text'][-1]['content']#, do_sample = False)
153
+
154
+ # Save assistant reply to history
155
+ #print(response)
156
+ english, japanese = response.split(" <split> ")
157
+
158
+ messages.append({
159
+ "role": "assistant",
160
+ "content": [{"type": "text", "text": english}],
161
+ })
162
+ #messages.pop()
163
+ return english, japanese
164
+
165
+ download_audio_js = """
166
+ () =>{{
167
+ let root = document.querySelector("body > gradio-app");
168
+ if (root.shadowRoot != null)
169
+ root = root.shadowRoot;
170
+ let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
171
+ let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
172
+ if (audio == undefined)
173
+ return;
174
+ text = text.value;
175
+ if (text == undefined)
176
+ text = Math.floor(Math.random()*100000000);
177
+ audio = audio.src;
178
+ let oA = document.createElement("a");
179
+ oA.download = text.substr(0, 20)+'.wav';
180
+ oA.href = audio;
181
+ document.body.appendChild(oA);
182
+ oA.click();
183
+ oA.remove();
184
+ }}
185
+ """
186
+ model_id = "google/gemma-3-4b-it"
187
+ pipe = pipeline("text-generation",
188
+ model=model_id,
189
+ #tokenizer=tokenizer,
190
+ #device=device,
191
+ device_map ='auto',
192
+ torch_dtype=torch.bfloat16)
193
+ if __name__ == '__main__':
194
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
195
+ parser = argparse.ArgumentParser()
196
+ parser.add_argument('--api', action="store_true", default=False)
197
+ parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
198
+ args = parser.parse_args()
199
+ if False:
200
+ english, japanese = generate_response(pipe, "How are you?")
201
+ print(english)
202
+ print(japanese)
203
+ exit()
204
+
205
+
206
+ models = []
207
+ with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
208
+ models_info = json.load(f)
209
+ for i, info in models_info.items():
210
+ if not info['enable']:
211
+ continue
212
+ sid = info['sid']
213
+ name_en = info['name_en']
214
+ name_zh = info['name_zh']
215
+ title = info['title']
216
+ cover = f"pretrained_models/{i}/{info['cover']}"
217
+ example = info['example']
218
+ language = info['language']
219
+ net_g_ms = SynthesizerTrn(
220
+ len(hps_ms.symbols),
221
+ hps_ms.data.filter_length // 2 + 1,
222
+ hps_ms.train.segment_size // hps_ms.data.hop_length,
223
+ n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
224
+ **hps_ms.model)
225
+ utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
226
+ _ = net_g_ms.eval().to(device)
227
+ models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
228
+ with gr.Blocks() as app:
229
+ gr.Markdown(
230
+ "# <center> vits-models\n"
231
+ "## <center> Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
232
+ "## <center> ·请不要生成会对个人以及组织造成侵害的内容\n"
233
+ "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=sayashi.vits-models)\n\n"
234
+ "[Open In Colab]"
235
+ "(https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)"
236
+ " without queue and length limitation.(无需等待队列,并且没有长度限制)\n\n"
237
+ "[Finetune your own model](https://github.com/SayaSS/vits-finetuning)"
238
+ )
239
+
240
+ with gr.Tabs():
241
+ with gr.TabItem("EN"):
242
+ for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
243
+ with gr.TabItem(name_en):
244
+ with gr.Row():
245
+ gr.Markdown(
246
+ '<div align="center">'
247
+ f'<a><strong>{title}</strong></a>'
248
+ f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
249
+ '</div>'
250
+ )
251
+ with gr.Row():
252
+ with gr.Column():
253
+ input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
254
+ lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
255
+ type="index", value=language)
256
+ with gr.Accordion(label="Advanced Options", open=False):
257
+ symbol_input = gr.Checkbox(value=False, label="Symbol input")
258
+ symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
259
+ samples=[[x] for x in hps_ms.symbols])
260
+ symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
261
+ btn = gr.Button(value="Generate", variant="primary")
262
+ with gr.Row():
263
+ ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
264
+ nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
265
+ ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
266
+ with gr.Column():
267
+ o1 = gr.Textbox(label="Output Message")
268
+ o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
269
+ download = gr.Button("Download Audio")
270
+ btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
271
+ download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
272
+ lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
273
+ symbol_input.change(
274
+ to_symbol_fn,
275
+ [symbol_input, input_text, lang],
276
+ [input_text]
277
+ )
278
+ symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
279
+ _js=f"""
280
+ (i,symbols) => {{
281
+ let root = document.querySelector("body > gradio-app");
282
+ if (root.shadowRoot != null)
283
+ root = root.shadowRoot;
284
+ let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
285
+ let startPos = text_input.selectionStart;
286
+ let endPos = text_input.selectionEnd;
287
+ let oldTxt = text_input.value;
288
+ let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
289
+ text_input.value = result;
290
+ let x = window.scrollX, y = window.scrollY;
291
+ text_input.focus();
292
+ text_input.selectionStart = startPos + symbols[i].length;
293
+ text_input.selectionEnd = startPos + symbols[i].length;
294
+ text_input.blur();
295
+ window.scrollTo(x, y);
296
+ return text_input.value;
297
+ }}""")
298
+ app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)