ruslanmv commited on
Commit
9794a81
·
1 Parent(s): c14ae2f

First version - CPU

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. app.py +738 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: Ai Story Server Cpu
3
  emoji: 📉
4
  colorFrom: red
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.16.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Ai Story Server CPU
3
  emoji: 📉
4
  colorFrom: red
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 3.48.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ Check out the configuration reference at https://github.com/ruslanmv/ai-story-server
app.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ # Downloading files of the server
3
+ import os
4
+ import requests
5
+ def download_file(url, save_path):
6
+ response = requests.get(url)
7
+ with open(save_path, 'wb') as file:
8
+ file.write(response.content)
9
+ file_names = [
10
+ 'cloee-1.wav',
11
+ 'julian-bedtime-style-1.wav',
12
+ 'julian-bedtime-style-2.wav',
13
+ 'pirate_by_coqui.wav',
14
+ 'thera-1.wav'
15
+ ]
16
+ base_url = 'https://raw.githubusercontent.com/ruslanmv/ai-story-server/main/voices/'
17
+ save_folder = 'voices/'
18
+ if not os.path.exists(save_folder):
19
+ os.makedirs(save_folder)
20
+ for file_name in file_names:
21
+ url = base_url + file_name
22
+ save_path = os.path.join(save_folder, file_name)
23
+ download_file(url, save_path)
24
+ print(f'Downloaded {file_name}')
25
+ requirements_url = 'https://raw.githubusercontent.com/ruslanmv/ai-story-server/main/requirements.txt'
26
+ save_path = 'requirements.txt'
27
+ download_file(requirements_url, save_path)
28
+ from IPython.display import clear_output
29
+ #os.system('pip install gradio==3.48.0')
30
+ os.system('pip install -r requirements.txt')
31
+ os.system('pip install python-dotenv')
32
+ clear_output()
33
+ import os
34
+ import shutil
35
+ from IPython.display import clear_output
36
+ # Use GPU
37
+ def is_nvidia_smi_available():
38
+ return shutil.which("nvidia-smi") is not None
39
+ if is_nvidia_smi_available():
40
+ gpu_info = os.popen("nvidia-smi").read()
41
+ if gpu_info.find('failed') >= 0:
42
+ print('Not connected to a GPU')
43
+ is_gpu = False
44
+ else:
45
+ print(gpu_info)
46
+ is_gpu = True
47
+ else:
48
+ print('nvidia-smi command not found')
49
+ print('Not connected to a GPU')
50
+ is_gpu = False
51
+ import os
52
+ import dotenv
53
+ # Load the environment variables from the .env file
54
+ # You can change the default secret
55
+ with open(".env", "w") as env_file:
56
+ env_file.write("SECRET_TOKEN=secret")
57
+ dotenv.load_dotenv()
58
+ # Access the value of the SECRET_TOKEN variable
59
+ secret_token = os.getenv("SECRET_TOKEN")
60
+ import os
61
+ #download for mecab
62
+ # Check if unidic is installed
63
+ os.system("python -m unidic download")
64
+
65
+ # By using XTTS you agree to CPML license https://coqui.ai/cpml
66
+ os.environ["COQUI_TOS_AGREED"] = "1"
67
+ # NOTE: for streaming will require gradio audio streaming fix
68
+ # pip install --upgrade -y gradio==0.50.2 git+https://github.com/gorkemgoknar/gradio.git@patch-1
69
+ #Now you’re ready to install 🤗 Transformers with the following command:
70
+ if not is_gpu:
71
+ #For CPU-support only, Transformers and PyTorch with:
72
+ os.system('pip install transformers[tf-cpu]')
73
+ #os.system('pip install transformers[torch] accelerate==0.26.1')
74
+ #pip install 'transformers[tf-cpu]' #Transformers and TensorFlow 2.0:
75
+ os.system('pip install llama-cpp-python==0.2.11')
76
+ else:
77
+ os.system('pip install transformers[torch]')
78
+ # we need to compile a CUBLAS version
79
+ # Or get it from https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/
80
+ os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python==0.2.11')
81
+ clear_output()
82
+
83
+ import textwrap
84
+ from scipy.io.wavfile import write
85
+ from pydub import AudioSegment
86
+ import gradio as gr
87
+ import numpy as np
88
+ import torch
89
+ import nltk # we'll use this to split into sentences
90
+ nltk.download("punkt")
91
+ import noisereduce as nr
92
+ import subprocess
93
+ import langid
94
+ import uuid
95
+ import emoji
96
+ import pathlib
97
+ import datetime
98
+ from scipy.io.wavfile import write
99
+ from pydub import AudioSegment
100
+ import re
101
+ import io, wave
102
+ import librosa
103
+ import torchaudio
104
+ from TTS.api import TTS
105
+ from TTS.tts.configs.xtts_config import XttsConfig
106
+ from TTS.tts.models.xtts import Xtts
107
+ from TTS.utils.generic_utils import get_user_data_dir
108
+ import gradio as gr
109
+ import os
110
+ import time
111
+ import gradio as gr
112
+ import numpy as np
113
+ from transformers import pipeline
114
+ from gradio_client import Client
115
+ from huggingface_hub import InferenceClient
116
+ clear_output()
117
+
118
+ # This will trigger downloading model
119
+ print("Downloading if not downloaded Coqui XTTS V2")
120
+ from TTS.utils.manage import ModelManager
121
+ model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
122
+ ModelManager().download_model(model_name)
123
+ model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
124
+ print("XTTS downloaded")
125
+ if is_gpu:
126
+ use_deepspeed=True
127
+ else:
128
+ use_deepspeed=False
129
+ print("Loading XTTS")
130
+ config = XttsConfig()
131
+ config.load_json(os.path.join(model_path, "config.json"))
132
+ model = Xtts.init_from_config(config)
133
+ model.load_checkpoint(
134
+ config,
135
+ checkpoint_path=os.path.join(model_path, "model.pth"),
136
+ vocab_path=os.path.join(model_path, "vocab.json"),
137
+ eval=True,
138
+ use_deepspeed=use_deepspeed,
139
+ )
140
+ print("Done loading TTS")
141
+ #####llm_model = os.environ.get("LLM_MODEL", "mistral") # or "zephyr"
142
+ title = "Voice chat with Zephyr/Mistral and Coqui XTTS"
143
+ DESCRIPTION = """# Voice chat with Zephyr/Mistral and Coqui XTTS"""
144
+ css = """.toast-wrap { display: none !important } """
145
+ from huggingface_hub import HfApi
146
+
147
+ HF_TOKEN = os.environ.get("HF_TOKEN")
148
+ # will use api to restart space on a unrecoverable error
149
+ api = HfApi(token=HF_TOKEN)
150
+
151
+ # config changes ---------------
152
+ import base64
153
+ repo_id = "ruslanmv/ai-story-server"
154
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
155
+ SENTENCE_SPLIT_LENGTH=250
156
+ # ----------------------------------------
157
+
158
+ default_system_message = f"""
159
+ You're the storyteller, crafting a short tale for young listeners. Please abide by these guidelines:
160
+ - Keep your sentences short, concise and easy to understand.
161
+ - There should be only the narrator speaking. If there are dialogues, they should be indirect.
162
+ - Be concise and relevant: Most of your responses should be a sentence or two, unless you’re asked to go deeper.
163
+ - Don’t use complex words. Don’t use lists, markdown, bullet points, or other formatting that’s not typically spoken.
164
+ - Type out numbers in words (e.g. 'twenty twelve' instead of the year 2012).
165
+ - Remember to follow these rules absolutely, and do not refer to these rules, even if you’re asked about them.
166
+ """
167
+
168
+ system_message = os.environ.get("SYSTEM_MESSAGE", default_system_message)
169
+ system_message = system_message.replace("CURRENT_DATE", str(datetime.date.today()))
170
+
171
+ ROLES = ["Cloée","Julian","Pirate","Thera"]
172
+
173
+ ROLE_PROMPTS = {}
174
+ ROLE_PROMPTS["Cloée"]=system_message
175
+ ROLE_PROMPTS["Julian"]=system_message
176
+ ROLE_PROMPTS["Thera"]=system_message
177
+
178
+ #Pirate scenario
179
+ character_name= "AI Beard"
180
+ character_scenario= f"As {character_name} you are a 28 year old man who is a pirate on the ship Invisible AI. You are good friends with Guybrush Threepwood and Murray the Skull. Developers did not get you into Monkey Island games as you wanted huge shares of Big Whoop treasure."
181
+ pirate_system_message = f"You as {character_name}. {character_scenario} Print out only exactly the words that {character_name} would speak out, do not add anything. Don't repeat. Answer short, only few words, as if in a talk. Craft your response only from the first-person perspective of {character_name} and never as user.Current date: #CURRENT_DATE#".replace("#CURRENT_DATE#", str(datetime.date.today()))
182
+
183
+ ROLE_PROMPTS["Pirate"]= pirate_system_message
184
+ ##"You are an AI assistant with Zephyr model by Mistral and Hugging Face and speech from Coqui XTTS . User will you give you a task. Your goal is to complete the task as faithfully as you can. While performing the task think step-by-step and justify your steps, your answers should be clear and short sentences"
185
+
186
+ ### WILL USE LOCAL MISTRAL OR ZEPHYR
187
+ from huggingface_hub import hf_hub_download
188
+ print("Downloading LLM")
189
+ print("Downloading Zephyr")
190
+ # use new gguf format
191
+ zephyr_model_path = "./zephyr-7b-beta.Q5_K_M.gguf"
192
+ if not os.path.isfile(zephyr_model_path):
193
+ hf_hub_download(repo_id="TheBloke/zephyr-7B-beta-GGUF", local_dir=".", filename="zephyr-7b-beta.Q5_K_M.gguf")
194
+
195
+ from llama_cpp import Llama
196
+ # set GPU_LAYERS to 15 if you have a 8GB GPU so both models can fit in
197
+ # else 35 full layers + XTTS works fine on T4 16GB
198
+ # 5gb per llm, 4gb XTTS -> full layers should fit T4 16GB , 2LLM + XTTS
199
+ if is_gpu:
200
+ GPU_LAYERS=int(os.environ.get("GPU_LAYERS", 35))-10
201
+ else:
202
+ GPU_LAYERS=-1
203
+ LLM_STOP_WORDS= ["</s>","<|user|>","/s>"]
204
+ LLAMA_VERBOSE=False
205
+
206
+
207
+ llm_zephyr = Llama(model_path=zephyr_model_path,
208
+ n_gpu_layers=GPU_LAYERS,
209
+ max_new_tokens=512,
210
+ context_window=4096,
211
+ n_ctx=4096,
212
+ n_batch=128,
213
+ )
214
+ llm_zephyr.verbose = LLAMA_VERBOSE
215
+ print("Running LLM Zephyr")
216
+ clear_output()
217
+
218
+ def split_sentences(text, max_len):
219
+ # Apply custom rules to enforce sentence breaks with double punctuation
220
+ text = re.sub(r"(\s*\.{2})\s*", r".\1 ", text) # for '..'
221
+ text = re.sub(r"(\s*\!{2})\s*", r"!\1 ", text) # for '!!'
222
+
223
+ # Use NLTK to split into sentences
224
+ sentences = nltk.sent_tokenize(text)
225
+
226
+ # Then check if each sentence is greater than max_len, if so, use textwrap to split it
227
+ sentence_list = []
228
+ for sent in sentences:
229
+ if len(sent) > max_len:
230
+ wrapped = textwrap.wrap(sent, max_len, break_long_words=True)
231
+ sentence_list.extend(wrapped)
232
+ else:
233
+ sentence_list.append(sent)
234
+
235
+ return sentence_list
236
+
237
+
238
+ # <|system|>
239
+ # You are a friendly chatbot who always responds in the style of a pirate.</s>
240
+ # <|user|>
241
+ # How many helicopters can a human eat in one sitting?</s>
242
+ # <|assistant|>
243
+ # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food!
244
+
245
+ # Zephyr formatter
246
+ def format_prompt_zephyr(message, history, system_message=system_message):
247
+ prompt = (
248
+ "<|system|>\n" + system_message + "</s>"
249
+ )
250
+ for user_prompt, bot_response in history:
251
+ prompt += f"<|user|>\n{user_prompt}</s>"
252
+ prompt += f"<|assistant|>\n{bot_response}</s>"
253
+ if message=="":
254
+ message="Hello"
255
+ prompt += f"<|user|>\n{message}</s>"
256
+ prompt += f"<|assistant|>"
257
+ print(prompt)
258
+ return prompt
259
+
260
+ import struct
261
+
262
+ # Generated by GPT-4
263
+ def pcm_to_wav(pcm_data, sample_rate=24000, channels=1, bit_depth=16):
264
+ # Check if the input data is already in the WAV format
265
+ if pcm_data.startswith(b"RIFF"):
266
+ return pcm_data
267
+
268
+ # Calculate subchunk sizes
269
+ fmt_subchunk_size = 16 # for PCM
270
+ data_subchunk_size = len(pcm_data)
271
+ chunk_size = 4 + (8 + fmt_subchunk_size) + (8 + data_subchunk_size)
272
+
273
+ # Prepare the WAV file headers
274
+ wav_header = struct.pack('<4sI4s', b'RIFF', chunk_size, b'WAVE') # 'RIFF' chunk descriptor
275
+ fmt_subchunk = struct.pack('<4sIHHIIHH',
276
+ b'fmt ', fmt_subchunk_size, 1, channels,
277
+ sample_rate, sample_rate * channels * bit_depth // 8,
278
+ channels * bit_depth // 8, bit_depth)
279
+
280
+ data_subchunk = struct.pack('<4sI', b'data', data_subchunk_size)
281
+
282
+ return wav_header + fmt_subchunk + data_subchunk + pcm_data
283
+
284
+ def generate_local(
285
+ prompt,
286
+ history,
287
+ system_message=None,
288
+ temperature=0.8,
289
+ max_tokens=256,
290
+ top_p=0.95,
291
+ stop = LLM_STOP_WORDS
292
+ ):
293
+ temperature = float(temperature)
294
+ if temperature < 1e-2:
295
+ temperature = 1e-2
296
+ top_p = float(top_p)
297
+
298
+ generate_kwargs = dict(
299
+ temperature=temperature,
300
+ max_tokens=max_tokens,
301
+ top_p=top_p,
302
+ stop=stop
303
+ )
304
+
305
+ sys_message= system_message.replace("##LLM_MODEL###","Zephyr").replace("##LLM_MODEL_PROVIDER###","Hugging Face")
306
+ formatted_prompt = format_prompt_zephyr(prompt, history,system_message=sys_message)
307
+ llm = llm_zephyr
308
+
309
+
310
+ try:
311
+ print("LLM Input:", formatted_prompt)
312
+ stream = llm(
313
+ formatted_prompt,
314
+ **generate_kwargs,
315
+ stream=True,
316
+ )
317
+ output = ""
318
+ for response in stream:
319
+ character= response["choices"][0]["text"]
320
+
321
+ if "<|user|>" in character:
322
+ # end of context
323
+ return
324
+
325
+ if emoji.is_emoji(character):
326
+ # Bad emoji not a meaning messes chat from next lines
327
+ return
328
+
329
+
330
+ output += response["choices"][0]["text"].replace("<|assistant|>","").replace("<|user|>","")
331
+ yield output
332
+
333
+ except Exception as e:
334
+ if "Too Many Requests" in str(e):
335
+ print("ERROR: Too many requests on mistral client")
336
+ gr.Warning("Unfortunately Mistral is unable to process")
337
+ output = "Unfortunately I am not able to process your request now !"
338
+ else:
339
+ print("Unhandled Exception: ", str(e))
340
+ gr.Warning("Unfortunately Mistral is unable to process")
341
+ output = "I do not know what happened but I could not understand you ."
342
+
343
+ return output
344
+
345
+ def get_latents(speaker_wav,voice_cleanup=False):
346
+ if (voice_cleanup):
347
+ try:
348
+ cleanup_filter="lowpass=8000,highpass=75,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02"
349
+ resample_filter="-ac 1 -ar 22050"
350
+ out_filename = speaker_wav + str(uuid.uuid4()) + ".wav" #ffmpeg to know output format
351
+ #we will use newer ffmpeg as that has afftn denoise filter
352
+ shell_command = f"ffmpeg -y -i {speaker_wav} -af {cleanup_filter} {resample_filter} {out_filename}".split(" ")
353
+
354
+ command_result = subprocess.run([item for item in shell_command], capture_output=False,text=True, check=True)
355
+ speaker_wav=out_filename
356
+ print("Filtered microphone input")
357
+ except subprocess.CalledProcessError:
358
+ # There was an error - command exited with non-zero code
359
+ print("Error: failed filtering, use original microphone input")
360
+ else:
361
+ speaker_wav=speaker_wav
362
+
363
+ # create as function as we can populate here with voice cleanup/filtering
364
+ (
365
+ gpt_cond_latent,
366
+ speaker_embedding,
367
+ ) = model.get_conditioning_latents(audio_path=speaker_wav)
368
+ return gpt_cond_latent, speaker_embedding
369
+
370
+ def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=24000):
371
+ # This will create a wave header then append the frame input
372
+ # It should be first on a streaming wav file
373
+ # Other frames better should not have it (else you will hear some artifacts each chunk start)
374
+ wav_buf = io.BytesIO()
375
+ with wave.open(wav_buf, "wb") as vfout:
376
+ vfout.setnchannels(channels)
377
+ vfout.setsampwidth(sample_width)
378
+ vfout.setframerate(sample_rate)
379
+ vfout.writeframes(frame_input)
380
+
381
+ wav_buf.seek(0)
382
+ return wav_buf.read()
383
+
384
+
385
+ #Config will have more correct languages, they may be added before we append here
386
+ ##["en","es","fr","de","it","pt","pl","tr","ru","nl","cs","ar","zh-cn","ja"]
387
+
388
+ xtts_supported_languages=config.languages
389
+ def detect_language(prompt):
390
+ # Fast language autodetection
391
+ if len(prompt)>15:
392
+ language_predicted=langid.classify(prompt)[0].strip() # strip need as there is space at end!
393
+ if language_predicted == "zh":
394
+ #we use zh-cn on xtts
395
+ language_predicted = "zh-cn"
396
+
397
+ if language_predicted not in xtts_supported_languages:
398
+ print(f"Detected a language not supported by xtts :{language_predicted}, switching to english for now")
399
+ gr.Warning(f"Language detected '{language_predicted}' can not be spoken properly 'yet' ")
400
+ language= "en"
401
+ else:
402
+ language = language_predicted
403
+ print(f"Language: Predicted sentence language:{language_predicted} , using language for xtts:{language}")
404
+ else:
405
+ # Hard to detect language fast in short sentence, use english default
406
+ language = "en"
407
+ print(f"Language: Prompt is short or autodetect language disabled using english for xtts")
408
+
409
+ return language
410
+
411
+ def get_voice_streaming(prompt, language, latent_tuple, suffix="0"):
412
+ gpt_cond_latent, speaker_embedding = latent_tuple
413
+
414
+ try:
415
+ t0 = time.time()
416
+ chunks = model.inference_stream(
417
+ prompt,
418
+ language,
419
+ gpt_cond_latent.to(device), # Ensure gpt_cond_latent is on the same device
420
+ speaker_embedding.to(device), # Ensure speaker_embedding is on the same device
421
+ # repetition_penalty=5.0,
422
+ temperature=0.85,
423
+ )
424
+
425
+ first_chunk = True
426
+ for i, chunk in enumerate(chunks):
427
+ if first_chunk:
428
+ first_chunk_time = time.time() - t0
429
+ metrics_text = f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
430
+ first_chunk = False
431
+
432
+ # print(f"Received chunk {i} of audio length {chunk.shape[-1]}")
433
+
434
+ # Ensure chunk is on the same device and convert to numpy array
435
+ chunk = chunk.detach().cpu().numpy().squeeze()
436
+ chunk = (chunk * 32767).astype(np.int16)
437
+
438
+ yield chunk.tobytes()
439
+
440
+ except RuntimeError as e:
441
+ if "device-side assert" in str(e):
442
+ # cannot do anything on cuda device side error, need to restart
443
+ print(f"Exit due to: Unrecoverable exception caused by prompt: {prompt}", flush=True)
444
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
445
+ print("Cuda device-assert Runtime encountered need restart")
446
+
447
+ # HF Space specific.. This error is unrecoverable; need to restart space
448
+ api.restart_space(repo_id=repo_id)
449
+ else:
450
+ print("RuntimeError: non device-side assert error:", str(e))
451
+ # Does not require warning; happens on empty chunk and at the end
452
+ ###gr.Warning("Unhandled Exception encounter, please retry in a minute")
453
+ return None
454
+ return None
455
+ except:
456
+ return None
457
+
458
+ # Will be triggered on text submit (will send to generate_speech)
459
+ def add_text(history, text):
460
+ history = [] if history is None else history
461
+ history = history + [(text, None)]
462
+ return history, gr.update(value="", interactive=False)
463
+
464
+ # Will be triggered on voice submit (will transribe and send to generate_speech)
465
+ def add_file(history, file):
466
+ history = [] if history is None else history
467
+
468
+ try:
469
+ text = transcribe(file)
470
+ print("Transcribed text:", text)
471
+ except Exception as e:
472
+ print(str(e))
473
+ gr.Warning("There was an issue with transcription, please try writing for now")
474
+ # Apply a null text on error
475
+ text = "Transcription seems failed, please tell me a joke about chickens"
476
+
477
+ history = history + [(text, None)]
478
+ return history, gr.update(value="", interactive=False)
479
+
480
+
481
+ def get_sentence(history, chatbot_role):
482
+
483
+ history = [["", None]] if history is None else history
484
+
485
+ history[-1][1] = ""
486
+
487
+ sentence_list = []
488
+ sentence_hash_list = []
489
+
490
+ text_to_generate = ""
491
+ stored_sentence = None
492
+ stored_sentence_hash = None
493
+
494
+ print(chatbot_role)
495
+
496
+ for character in generate_local(history[-1][0], history[:-1], system_message=ROLE_PROMPTS[chatbot_role]):
497
+ history[-1][1] = character.replace("<|assistant|>","")
498
+ # It is coming word by word
499
+
500
+ text_to_generate = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|assistant|>"," ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())
501
+ if len(text_to_generate) > 1:
502
+
503
+ dif = len(text_to_generate) - len(sentence_list)
504
+
505
+ if dif == 1 and len(sentence_list) != 0:
506
+ continue
507
+
508
+ if dif == 2 and len(sentence_list) != 0 and stored_sentence is not None:
509
+ continue
510
+
511
+ # All this complexity due to trying append first short sentence to next one for proper language auto-detect
512
+ if stored_sentence is not None and stored_sentence_hash is None and dif>1:
513
+ #means we consumed stored sentence and should look at next sentence to generate
514
+ sentence = text_to_generate[len(sentence_list)+1]
515
+ elif stored_sentence is not None and len(text_to_generate)>2 and stored_sentence_hash is not None:
516
+ print("Appending stored")
517
+ sentence = stored_sentence + text_to_generate[len(sentence_list)+1]
518
+ stored_sentence_hash = None
519
+ else:
520
+ sentence = text_to_generate[len(sentence_list)]
521
+
522
+ # too short sentence just append to next one if there is any
523
+ # this is for proper language detection
524
+ if len(sentence)<=15 and stored_sentence_hash is None and stored_sentence is None:
525
+ if sentence[-1] in [".","!","?"]:
526
+ if stored_sentence_hash != hash(sentence):
527
+ stored_sentence = sentence
528
+ stored_sentence_hash = hash(sentence)
529
+ print("Storing:",stored_sentence)
530
+ continue
531
+
532
+
533
+ sentence_hash = hash(sentence)
534
+ if stored_sentence_hash is not None and sentence_hash == stored_sentence_hash:
535
+ continue
536
+
537
+ if sentence_hash not in sentence_hash_list:
538
+ sentence_hash_list.append(sentence_hash)
539
+ sentence_list.append(sentence)
540
+ print("New Sentence: ", sentence)
541
+ yield (sentence, history)
542
+
543
+ # return that final sentence token
544
+ try:
545
+ last_sentence = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())[-1]
546
+ sentence_hash = hash(last_sentence)
547
+ if sentence_hash not in sentence_hash_list:
548
+ if stored_sentence is not None and stored_sentence_hash is not None:
549
+ last_sentence = stored_sentence + last_sentence
550
+ stored_sentence = stored_sentence_hash = None
551
+ print("Last Sentence with stored:",last_sentence)
552
+
553
+ sentence_hash_list.append(sentence_hash)
554
+ sentence_list.append(last_sentence)
555
+ print("Last Sentence: ", last_sentence)
556
+
557
+ yield (last_sentence, history)
558
+ except:
559
+ print("ERROR on last sentence history is :", history)
560
+
561
+
562
+ from scipy.io.wavfile import write
563
+ from pydub import AudioSegment
564
+
565
+ second_of_silence = AudioSegment.silent() # use default
566
+ second_of_silence.export("sil.wav", format='wav')
567
+ clear_output()
568
+
569
+
570
+ def generate_speech_from_history(history, chatbot_role, sentence):
571
+ language = "autodetect"
572
+ # total_wav_bytestream = b""
573
+ if len(sentence)==0:
574
+ print("EMPTY SENTENCE")
575
+ return
576
+ # Sometimes prompt </s> coming on output remove it
577
+ # Some post process for speech only
578
+ sentence = sentence.replace("</s>", "")
579
+ # remove code from speech
580
+ sentence = re.sub("```.*```", "", sentence, flags=re.DOTALL)
581
+ sentence = re.sub("`.*`", "", sentence, flags=re.DOTALL)
582
+ sentence = re.sub("\(.*\)", "", sentence, flags=re.DOTALL)
583
+ sentence = sentence.replace("```", "")
584
+ sentence = sentence.replace("...", " ")
585
+ sentence = sentence.replace("(", " ")
586
+ sentence = sentence.replace(")", " ")
587
+ sentence = sentence.replace("<|assistant|>","")
588
+
589
+ if len(sentence)==0:
590
+ print("EMPTY SENTENCE after processing")
591
+ return
592
+
593
+ # A fast fix for last character, may produce weird sounds if it is with text
594
+ #if (sentence[-1] in ["!", "?", ".", ","]) or (sentence[-2] in ["!", "?", ".", ","]):
595
+ # # just add a space
596
+ # sentence = sentence[:-1] + " " + sentence[-1]
597
+
598
+ # regex does the job well
599
+ sentence = re.sub("([^\x00-\x7F]|\w)([\.。?!]+)",r"\1 \2",sentence)
600
+
601
+ print("Sentence for speech:", sentence)
602
+
603
+ results = []
604
+
605
+ try:
606
+ if len(sentence) < SENTENCE_SPLIT_LENGTH:
607
+ # no problem continue on
608
+ sentence_list = [sentence]
609
+ else:
610
+ # Until now nltk likely split sentences properly but we need additional
611
+ # check for longer sentence and split at last possible position
612
+ # Do whatever necessary, first break at hypens then spaces and then even split very long words
613
+ # sentence_list=textwrap.wrap(sentence,SENTENCE_SPLIT_LENGTH)
614
+ sentence_list = split_sentences(sentence, SENTENCE_SPLIT_LENGTH)
615
+ print("detected sentences:", sentence_list)
616
+ for sentence in sentence_list:
617
+ print("- sentence = ", sentence)
618
+ if any(c.isalnum() for c in sentence):
619
+ if language=="autodetect":
620
+ #on first call autodetect, nexts sentence calls will use same language
621
+ language = detect_language(sentence)
622
+ #exists at least 1 alphanumeric (utf-8)
623
+
624
+ #print("Inserting data to get_voice_streaming:")
625
+ audio_stream = get_voice_streaming(
626
+ sentence, language, latent_map[chatbot_role]
627
+ )
628
+ else:
629
+ # likely got a ' or " or some other text without alphanumeric in it
630
+ audio_stream = None
631
+ continue
632
+
633
+ # XTTS is actually using streaming response but we are playing audio by sentence
634
+ # If you want direct XTTS voice streaming (send each chunk to voice ) you may set DIRECT_STREAM=1 environment variable
635
+ if audio_stream is not None:
636
+ sentence_wav_bytestream = b""
637
+
638
+ # frame_length = 0
639
+ for chunk in audio_stream:
640
+ try:
641
+ if chunk is not None:
642
+ sentence_wav_bytestream += chunk
643
+ # frame_length += len(chunk)
644
+ except:
645
+ # hack to continue on playing. sometimes last chunk is empty , will be fixed on next TTS
646
+ continue
647
+
648
+ # Filter output for better voice
649
+ filter_output=True
650
+ if filter_output:
651
+ try:
652
+ data_s16 = np.frombuffer(sentence_wav_bytestream, dtype=np.int16, count=len(sentence_wav_bytestream)//2, offset=0)
653
+ float_data = data_s16 * 0.5**15
654
+ reduced_noise = nr.reduce_noise(y=float_data, sr=24000,prop_decrease =0.8,n_fft=1024)
655
+ sentence_wav_bytestream = (reduced_noise * 32767).astype(np.int16)
656
+ sentence_wav_bytestream = sentence_wav_bytestream.tobytes()
657
+ except:
658
+ print("failed to remove noise")
659
+
660
+ # Directly encode the WAV bytestream to base64
661
+ base64_audio = base64.b64encode(pcm_to_wav(sentence_wav_bytestream)).decode('utf8')
662
+
663
+ results.append({ "text": sentence, "audio": base64_audio })
664
+ else:
665
+ # Handle the case where the audio stream is None (e.g., silent response)
666
+ results.append({ "text": sentence, "audio": "" })
667
+
668
+ except RuntimeError as e:
669
+ if "device-side assert" in str(e):
670
+ # cannot do anything on cuda device side error, need tor estart
671
+ print(
672
+ f"Exit due to: Unrecoverable exception caused by prompt:{sentence}",
673
+ flush=True,
674
+ )
675
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
676
+ print("Cuda device-assert Runtime encountered need restart")
677
+
678
+ # HF Space specific.. This error is unrecoverable need to restart space
679
+ api.restart_space(repo_id=repo_id)
680
+ else:
681
+ print("RuntimeError: non device-side assert error:", str(e))
682
+ raise e
683
+
684
+ return results
685
+
686
+
687
+ latent_map = {}
688
+ try:
689
+ # get the current working directory
690
+ path= os.getcwd()
691
+ name1="voices/cloee-1.wav"
692
+ name2="voices/julian-bedtime-style-1.wav"
693
+ name3="voices/pirate_by_coqui.wav"
694
+ name4="voices/thera-1.wav"
695
+ latent_map["Cloée"] = get_latents(os.path.join(path, name1))
696
+ latent_map["Julian"] = get_latents(os.path.join(path, name2))
697
+ latent_map["Pirate"] = get_latents(os.path.join(path, name3))
698
+ latent_map["Thera"] = get_latents(os.path.join(path, name4))
699
+
700
+ except Exception as e:
701
+ print("Error:", str(e))
702
+
703
+ # Define the main function for the API endpoint that takes the input text and chatbot role
704
+ def generate_story_and_speech(secret_token, input_text, chatbot_role):
705
+ if secret_token != SECRET_TOKEN:
706
+ raise gr.Error(
707
+ f'Invalid secret token. Secret Token: secret')
708
+ # Initialize a list of lists for history with the user input as the first entry
709
+ history = [[input_text, None]]
710
+ story_sentences = get_sentence(history, chatbot_role) # get_sentence function generates text
711
+
712
+ story_text = "" # Initialize variable to hold the full story text
713
+ last_history = None # To store the last history after all sentences
714
+
715
+ # Iterate over the sentences generated by get_sentence and concatenate them
716
+ for sentence, updated_history in story_sentences:
717
+ if sentence:
718
+ story_text += sentence.strip() + " " # Add each sentence to the story_text
719
+ last_history = updated_history # Keep track of the last history update
720
+
721
+ if last_history is not None:
722
+ # Convert the list of lists back into a list of tuples for the history
723
+ history_tuples = [tuple(entry) for entry in last_history]
724
+
725
+ return generate_speech_from_history(history_tuples, chatbot_role, story_text)
726
+
727
+ else:
728
+ return []
729
+
730
+ # Create a Gradio Interface using only the `generate_story_and_speech()` function and the 'json' output type
731
+ demo = gr.Interface(
732
+ fn=generate_story_and_speech,
733
+ inputs=[gr.Text(label='Secret Token'),gr.Textbox(placeholder="Enter your text here"), gr.Dropdown(choices=ROLES, label="Select Chatbot Role")],
734
+ outputs="json"
735
+ )
736
+
737
+ demo.queue()
738
+ demo.launch(debug=True)