Upload app (Copy).py
Browse files- app (Copy).py +17 -24
app (Copy).py
CHANGED
|
@@ -16,9 +16,6 @@ from transformers import pipeline, AutoTokenizer, Gemma3ForCausalLM
|
|
| 16 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 17 |
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
| 18 |
|
| 19 |
-
# Set an environment variable export HF_HOME='/media/shiroclancy/Local Disk/tma'
|
| 20 |
-
os.environ['HF_HOME'] = '/media/shiroclancy/Local Disk/tma'
|
| 21 |
-
|
| 22 |
hps_ms = utils.get_hparams_from_file(r'config/config.json')
|
| 23 |
|
| 24 |
audio_postprocess_ori = gr.Audio.postprocess
|
|
@@ -66,7 +63,7 @@ def create_tts_fn(net_g_ms, speaker_id):
|
|
| 66 |
length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
|
| 67 |
|
| 68 |
#return "Success", (22050, audio)
|
| 69 |
-
return english, (22050, audio)
|
| 70 |
return tts_fn
|
| 71 |
|
| 72 |
def create_to_symbol_fn(hps):
|
|
@@ -140,26 +137,19 @@ messages = init_character(
|
|
| 140 |
"""
|
| 141 |
)
|
| 142 |
|
| 143 |
-
def generate_response(
|
| 144 |
# Add user message
|
| 145 |
-
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 146 |
messages.append({
|
| 147 |
"role": "user",
|
| 148 |
"content": [{"type": "text", "text": human_prompt}],
|
| 149 |
})
|
| 150 |
-
|
| 151 |
-
# Decode
|
| 152 |
-
response = pipe(messages, max_new_tokens = 2048, temperature = 0.2)[0]['generated_text'][-1]['content']#, do_sample = False)
|
| 153 |
|
| 154 |
# Save assistant reply to history
|
| 155 |
#print(response)
|
| 156 |
english, japanese = response.split(" <split> ")
|
| 157 |
-
|
| 158 |
-
messages.
|
| 159 |
-
"role": "assistant",
|
| 160 |
-
"content": [{"type": "text", "text": english}],
|
| 161 |
-
})
|
| 162 |
-
#messages.pop()
|
| 163 |
return english, japanese
|
| 164 |
|
| 165 |
download_audio_js = """
|
|
@@ -183,13 +173,15 @@ download_audio_js = """
|
|
| 183 |
oA.remove();
|
| 184 |
}}
|
| 185 |
"""
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
|
|
|
|
|
|
| 193 |
if __name__ == '__main__':
|
| 194 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 195 |
parser = argparse.ArgumentParser()
|
|
@@ -264,10 +256,11 @@ if __name__ == '__main__':
|
|
| 264 |
nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
|
| 265 |
ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
|
| 266 |
with gr.Column():
|
| 267 |
-
o1 = gr.Textbox(label="Output Message")
|
|
|
|
| 268 |
o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
|
| 269 |
download = gr.Button("Download Audio")
|
| 270 |
-
btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
|
| 271 |
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
|
| 272 |
lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
|
| 273 |
symbol_input.change(
|
|
|
|
| 16 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 17 |
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
| 18 |
|
|
|
|
|
|
|
|
|
|
| 19 |
hps_ms = utils.get_hparams_from_file(r'config/config.json')
|
| 20 |
|
| 21 |
audio_postprocess_ori = gr.Audio.postprocess
|
|
|
|
| 63 |
length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
|
| 64 |
|
| 65 |
#return "Success", (22050, audio)
|
| 66 |
+
return english, japanese, (22050, audio)
|
| 67 |
return tts_fn
|
| 68 |
|
| 69 |
def create_to_symbol_fn(hps):
|
|
|
|
| 137 |
"""
|
| 138 |
)
|
| 139 |
|
| 140 |
+
def generate_response(model, human_prompt, tokenizer = None):
|
| 141 |
# Add user message
|
|
|
|
| 142 |
messages.append({
|
| 143 |
"role": "user",
|
| 144 |
"content": [{"type": "text", "text": human_prompt}],
|
| 145 |
})
|
| 146 |
+
response = model.create_chat_completion(messages = messages)['choices'][0]['message']['content']
|
|
|
|
|
|
|
| 147 |
|
| 148 |
# Save assistant reply to history
|
| 149 |
#print(response)
|
| 150 |
english, japanese = response.split(" <split> ")
|
| 151 |
+
|
| 152 |
+
messages.pop()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
return english, japanese
|
| 154 |
|
| 155 |
download_audio_js = """
|
|
|
|
| 173 |
oA.remove();
|
| 174 |
}}
|
| 175 |
"""
|
| 176 |
+
from llama_cpp import Llama
|
| 177 |
+
|
| 178 |
+
llm = Llama.from_pretrained(
|
| 179 |
+
repo_id="google/gemma-3-4b-it-qat-q4_0-gguf",
|
| 180 |
+
filename='gemma-3-4b-it-q4_0.gguf',
|
| 181 |
+
local_dir='/kaggle/working/model',
|
| 182 |
+
n_ctx = 2048
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
if __name__ == '__main__':
|
| 186 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 187 |
parser = argparse.ArgumentParser()
|
|
|
|
| 256 |
nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
|
| 257 |
ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
|
| 258 |
with gr.Column():
|
| 259 |
+
o1 = gr.Textbox(label="Output Message(English)")
|
| 260 |
+
o3 = gr.Textbox(label="Output Message(Japanese)")
|
| 261 |
o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
|
| 262 |
download = gr.Button("Download Audio")
|
| 263 |
+
btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o3, o2], api_name=f"tts-{name_en}")
|
| 264 |
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
|
| 265 |
lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
|
| 266 |
symbol_input.change(
|