ruslanmv commited on
Commit
4abeebf
·
1 Parent(s): 2c5e417

First commit

Browse files
Files changed (2) hide show
  1. app.py +229 -695
  2. requirements.txt +25 -0
app.py CHANGED
@@ -1,739 +1,273 @@
 
 
 
1
  from __future__ import annotations
2
- # Downloading files of the server
3
  import os
4
  import requests
5
- def download_file(url, save_path):
6
- response = requests.get(url)
7
- with open(save_path, 'wb') as file:
8
- file.write(response.content)
9
- file_names = [
10
- 'cloee-1.wav',
11
- 'julian-bedtime-style-1.wav',
12
- 'julian-bedtime-style-2.wav',
13
- 'pirate_by_coqui.wav',
14
- 'thera-1.wav'
15
- ]
16
- base_url = 'https://raw.githubusercontent.com/ruslanmv/ai-story-server/main/voices/'
17
- save_folder = 'voices/'
18
- if not os.path.exists(save_folder):
19
- os.makedirs(save_folder)
20
- for file_name in file_names:
21
- url = base_url + file_name
22
- save_path = os.path.join(save_folder, file_name)
23
- download_file(url, save_path)
24
- print(f'Downloaded {file_name}')
25
- requirements_url = 'https://raw.githubusercontent.com/ruslanmv/ai-story-server/main/requirements.txt'
26
- save_path = 'requirements.txt'
27
- download_file(requirements_url, save_path)
28
- #os.system('pip install gradio==3.48.0')
29
- os.system('pip install -r requirements.txt')
30
- os.system('pip install python-dotenv')
31
- os.system('pip install ipython')
32
- from IPython.display import clear_output
33
- clear_output()
34
- import os
35
- import shutil
36
- from IPython.display import clear_output
37
- # Use GPU
38
- def is_nvidia_smi_available():
39
- return shutil.which("nvidia-smi") is not None
40
- if is_nvidia_smi_available():
41
- gpu_info = os.popen("nvidia-smi").read()
42
- if gpu_info.find('failed') >= 0:
43
- print('Not connected to a GPU')
44
- is_gpu = False
45
- else:
46
- print(gpu_info)
47
- is_gpu = True
48
- else:
49
- print('nvidia-smi command not found')
50
- print('Not connected to a GPU')
51
- is_gpu = False
52
- import os
53
- import dotenv
54
- # Load the environment variables from the .env file
55
- # You can change the default secret
56
- with open(".env", "w") as env_file:
57
- env_file.write("SECRET_TOKEN=secret")
58
- dotenv.load_dotenv()
59
- # Access the value of the SECRET_TOKEN variable
60
- secret_token = os.getenv("SECRET_TOKEN")
61
- import os
62
- #download for mecab
63
- # Check if unidic is installed
64
- os.system("python -m unidic download")
65
-
66
- # By using XTTS you agree to CPML license https://coqui.ai/cpml
67
- os.environ["COQUI_TOS_AGREED"] = "1"
68
- # NOTE: for streaming will require gradio audio streaming fix
69
- # pip install --upgrade -y gradio==0.50.2 git+https://github.com/gorkemgoknar/gradio.git@patch-1
70
- #Now you’re ready to install 🤗 Transformers with the following command:
71
- if not is_gpu:
72
- #For CPU-support only, Transformers and PyTorch with:
73
- os.system('pip install transformers[tf-cpu]')
74
- #os.system('pip install transformers[torch] accelerate==0.26.1')
75
- #pip install 'transformers[tf-cpu]' #Transformers and TensorFlow 2.0:
76
- os.system('pip install llama-cpp-python==0.2.11')
77
- else:
78
- os.system('pip install transformers[torch]')
79
- # we need to compile a CUBLAS version
80
- # Or get it from https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/
81
- os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python==0.2.11')
82
- clear_output()
83
-
84
- import textwrap
85
- from scipy.io.wavfile import write
86
- from pydub import AudioSegment
87
- import gradio as gr
88
- import numpy as np
89
- import torch
90
- import nltk # we'll use this to split into sentences
91
- nltk.download("punkt")
92
- import noisereduce as nr
93
- import subprocess
94
- import langid
95
- import uuid
96
- import emoji
97
- import pathlib
98
  import datetime
99
- from scipy.io.wavfile import write
100
- from pydub import AudioSegment
101
  import re
102
- import io, wave
103
- import librosa
104
- import torchaudio
105
- from TTS.api import TTS
106
- from TTS.tts.configs.xtts_config import XttsConfig
107
- from TTS.tts.models.xtts import Xtts
108
- from TTS.utils.generic_utils import get_user_data_dir
109
- import gradio as gr
110
- import os
111
  import time
 
 
 
 
112
  import gradio as gr
 
 
 
113
  import numpy as np
114
- from transformers import pipeline
115
- from gradio_client import Client
116
- from huggingface_hub import InferenceClient
117
- clear_output()
118
 
119
- # This will trigger downloading model
120
- print("Downloading if not downloaded Coqui XTTS V2")
 
121
  from TTS.utils.manage import ModelManager
122
- model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
123
- ModelManager().download_model(model_name)
124
- model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
125
- print("XTTS downloaded")
126
- if is_gpu:
127
- use_deepspeed=True
128
- else:
129
- use_deepspeed=False
130
- print("Loading XTTS")
131
- config = XttsConfig()
132
- config.load_json(os.path.join(model_path, "config.json"))
133
- model = Xtts.init_from_config(config)
134
- model.load_checkpoint(
135
- config,
136
- checkpoint_path=os.path.join(model_path, "model.pth"),
137
- vocab_path=os.path.join(model_path, "vocab.json"),
138
- eval=True,
139
- use_deepspeed=use_deepspeed,
140
- )
141
- print("Done loading TTS")
142
- #####llm_model = os.environ.get("LLM_MODEL", "mistral") # or "zephyr"
143
- title = "Voice chat with Zephyr/Mistral and Coqui XTTS"
144
- DESCRIPTION = """# Voice chat with Zephyr/Mistral and Coqui XTTS"""
145
- css = """.toast-wrap { display: none !important } """
146
- from huggingface_hub import HfApi
147
-
148
- HF_TOKEN = os.environ.get("HF_TOKEN")
149
- # will use api to restart space on a unrecoverable error
150
- api = HfApi(token=HF_TOKEN)
151
-
152
- # config changes ---------------
153
- import base64
154
- repo_id = "ruslanmv/ai-story-server"
155
- SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
156
- SENTENCE_SPLIT_LENGTH=250
157
- # ----------------------------------------
158
-
159
- default_system_message = f"""
160
- You're the storyteller, crafting a short tale for young listeners. Please abide by these guidelines:
161
- - Keep your sentences short, concise and easy to understand.
162
- - There should be only the narrator speaking. If there are dialogues, they should be indirect.
163
- - Be concise and relevant: Most of your responses should be a sentence or two, unless you’re asked to go deeper.
164
- - Don’t use complex words. Don’t use lists, markdown, bullet points, or other formatting that’s not typically spoken.
165
- - Type out numbers in words (e.g. 'twenty twelve' instead of the year 2012).
166
- - Remember to follow these rules absolutely, and do not refer to these rules, even if you’re asked about them.
167
- """
168
-
169
- system_message = os.environ.get("SYSTEM_MESSAGE", default_system_message)
170
- system_message = system_message.replace("CURRENT_DATE", str(datetime.date.today()))
171
-
172
- ROLES = ["Cloée","Julian","Pirate","Thera"]
173
-
174
- ROLE_PROMPTS = {}
175
- ROLE_PROMPTS["Cloée"]=system_message
176
- ROLE_PROMPTS["Julian"]=system_message
177
- ROLE_PROMPTS["Thera"]=system_message
178
 
179
- #Pirate scenario
180
- character_name= "AI Beard"
181
- character_scenario= f"As {character_name} you are a 28 year old man who is a pirate on the ship Invisible AI. You are good friends with Guybrush Threepwood and Murray the Skull. Developers did not get you into Monkey Island games as you wanted huge shares of Big Whoop treasure."
182
- pirate_system_message = f"You as {character_name}. {character_scenario} Print out only exactly the words that {character_name} would speak out, do not add anything. Don't repeat. Answer short, only few words, as if in a talk. Craft your response only from the first-person perspective of {character_name} and never as user.Current date: #CURRENT_DATE#".replace("#CURRENT_DATE#", str(datetime.date.today()))
 
 
183
 
184
- ROLE_PROMPTS["Pirate"]= pirate_system_message
185
- ##"You are an AI assistant with Zephyr model by Mistral and Hugging Face and speech from Coqui XTTS . User will you give you a task. Your goal is to complete the task as faithfully as you can. While performing the task think step-by-step and justify your steps, your answers should be clear and short sentences"
 
186
 
187
- ### WILL USE LOCAL MISTRAL OR ZEPHYR
188
- from huggingface_hub import hf_hub_download
189
- print("Downloading LLM")
190
- print("Downloading Zephyr")
191
- # use new gguf format
192
- zephyr_model_path = "./zephyr-7b-beta.Q5_K_M.gguf"
193
- if not os.path.isfile(zephyr_model_path):
194
- hf_hub_download(repo_id="TheBloke/zephyr-7B-beta-GGUF", local_dir=".", filename="zephyr-7b-beta.Q5_K_M.gguf")
195
 
196
- from llama_cpp import Llama
197
- # set GPU_LAYERS to 15 if you have a 8GB GPU so both models can fit in
198
- # else 35 full layers + XTTS works fine on T4 16GB
199
- # 5gb per llm, 4gb XTTS -> full layers should fit T4 16GB , 2LLM + XTTS
200
- if is_gpu:
201
- GPU_LAYERS=int(os.environ.get("GPU_LAYERS", 35))-10
202
- else:
203
- GPU_LAYERS=-1
204
- LLM_STOP_WORDS= ["</s>","<|user|>","/s>"]
205
- LLAMA_VERBOSE=False
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
- llm_zephyr = Llama(model_path=zephyr_model_path,
209
- n_gpu_layers=GPU_LAYERS,
210
- max_new_tokens=512,
211
- context_window=4096,
212
- n_ctx=4096,
213
- n_batch=128,
214
- )
215
- llm_zephyr.verbose = LLAMA_VERBOSE
216
- print("Running LLM Zephyr")
217
- clear_output()
 
218
 
219
  def split_sentences(text, max_len):
220
- # Apply custom rules to enforce sentence breaks with double punctuation
221
- text = re.sub(r"(\s*\.{2})\s*", r".\1 ", text) # for '..'
222
- text = re.sub(r"(\s*\!{2})\s*", r"!\1 ", text) # for '!!'
223
-
224
- # Use NLTK to split into sentences
225
  sentences = nltk.sent_tokenize(text)
 
 
 
226
 
227
- # Then check if each sentence is greater than max_len, if so, use textwrap to split it
228
- sentence_list = []
229
- for sent in sentences:
230
- if len(sent) > max_len:
231
- wrapped = textwrap.wrap(sent, max_len, break_long_words=True)
232
- sentence_list.extend(wrapped)
233
- else:
234
- sentence_list.append(sent)
235
-
236
- return sentence_list
237
-
238
-
239
- # <|system|>
240
- # You are a friendly chatbot who always responds in the style of a pirate.</s>
241
- # <|user|>
242
- # How many helicopters can a human eat in one sitting?</s>
243
- # <|assistant|>
244
- # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food!
245
-
246
- # Zephyr formatter
247
- def format_prompt_zephyr(message, history, system_message=system_message):
248
- prompt = (
249
- "<|system|>\n" + system_message + "</s>"
250
- )
251
  for user_prompt, bot_response in history:
252
- prompt += f"<|user|>\n{user_prompt}</s>"
253
- prompt += f"<|assistant|>\n{bot_response}</s>"
254
- if message=="":
255
- message="Hello"
256
- prompt += f"<|user|>\n{message}</s>"
257
- prompt += f"<|assistant|>"
258
- print(prompt)
259
  return prompt
260
 
261
- import struct
262
-
263
- # Generated by GPT-4
264
- def pcm_to_wav(pcm_data, sample_rate=24000, channels=1, bit_depth=16):
265
- # Check if the input data is already in the WAV format
266
- if pcm_data.startswith(b"RIFF"):
267
- return pcm_data
268
-
269
- # Calculate subchunk sizes
270
- fmt_subchunk_size = 16 # for PCM
271
- data_subchunk_size = len(pcm_data)
272
- chunk_size = 4 + (8 + fmt_subchunk_size) + (8 + data_subchunk_size)
273
-
274
- # Prepare the WAV file headers
275
- wav_header = struct.pack('<4sI4s', b'RIFF', chunk_size, b'WAVE') # 'RIFF' chunk descriptor
276
- fmt_subchunk = struct.pack('<4sIHHIIHH',
277
- b'fmt ', fmt_subchunk_size, 1, channels,
278
- sample_rate, sample_rate * channels * bit_depth // 8,
279
- channels * bit_depth // 8, bit_depth)
280
-
281
- data_subchunk = struct.pack('<4sI', b'data', data_subchunk_size)
282
-
283
- return wav_header + fmt_subchunk + data_subchunk + pcm_data
284
-
285
- def generate_local(
286
- prompt,
287
- history,
288
- system_message=None,
289
- temperature=0.8,
290
- max_tokens=256,
291
- top_p=0.95,
292
- stop = LLM_STOP_WORDS
293
- ):
294
- temperature = float(temperature)
295
- if temperature < 1e-2:
296
- temperature = 1e-2
297
- top_p = float(top_p)
298
-
299
- generate_kwargs = dict(
300
- temperature=temperature,
301
- max_tokens=max_tokens,
302
- top_p=top_p,
303
- stop=stop
304
- )
305
 
306
- sys_message= system_message.replace("##LLM_MODEL###","Zephyr").replace("##LLM_MODEL_PROVIDER###","Hugging Face")
307
- formatted_prompt = format_prompt_zephyr(prompt, history,system_message=sys_message)
308
- llm = llm_zephyr
309
-
310
-
311
- try:
312
- print("LLM Input:", formatted_prompt)
313
- stream = llm(
314
- formatted_prompt,
315
- **generate_kwargs,
316
- stream=True,
 
 
 
 
 
 
 
 
 
 
 
317
  )
318
- output = ""
319
- for response in stream:
320
- character= response["choices"][0]["text"]
321
-
322
- if "<|user|>" in character:
323
- # end of context
324
- return
325
-
326
- if emoji.is_emoji(character):
327
- # Bad emoji not a meaning messes chat from next lines
328
- return
329
-
330
-
331
- output += response["choices"][0]["text"].replace("<|assistant|>","").replace("<|user|>","")
332
- yield output
333
-
334
- except Exception as e:
335
- if "Too Many Requests" in str(e):
336
- print("ERROR: Too many requests on mistral client")
337
- gr.Warning("Unfortunately Mistral is unable to process")
338
- output = "Unfortunately I am not able to process your request now !"
339
- else:
340
- print("Unhandled Exception: ", str(e))
341
- gr.Warning("Unfortunately Mistral is unable to process")
342
- output = "I do not know what happened but I could not understand you ."
343
-
344
- return output
345
-
346
- def get_latents(speaker_wav,voice_cleanup=False):
347
- if (voice_cleanup):
348
- try:
349
- cleanup_filter="lowpass=8000,highpass=75,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02"
350
- resample_filter="-ac 1 -ar 22050"
351
- out_filename = speaker_wav + str(uuid.uuid4()) + ".wav" #ffmpeg to know output format
352
- #we will use newer ffmpeg as that has afftn denoise filter
353
- shell_command = f"ffmpeg -y -i {speaker_wav} -af {cleanup_filter} {resample_filter} {out_filename}".split(" ")
354
-
355
- command_result = subprocess.run([item for item in shell_command], capture_output=False,text=True, check=True)
356
- speaker_wav=out_filename
357
- print("Filtered microphone input")
358
- except subprocess.CalledProcessError:
359
- # There was an error - command exited with non-zero code
360
- print("Error: failed filtering, use original microphone input")
361
- else:
362
- speaker_wav=speaker_wav
363
-
364
- # create as function as we can populate here with voice cleanup/filtering
365
- (
366
- gpt_cond_latent,
367
- speaker_embedding,
368
- ) = model.get_conditioning_latents(audio_path=speaker_wav)
369
- return gpt_cond_latent, speaker_embedding
370
-
371
- def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=24000):
372
- # This will create a wave header then append the frame input
373
- # It should be first on a streaming wav file
374
- # Other frames better should not have it (else you will hear some artifacts each chunk start)
375
- wav_buf = io.BytesIO()
376
- with wave.open(wav_buf, "wb") as vfout:
377
- vfout.setnchannels(channels)
378
- vfout.setsampwidth(sample_width)
379
- vfout.setframerate(sample_rate)
380
- vfout.writeframes(frame_input)
381
-
382
- wav_buf.seek(0)
383
- return wav_buf.read()
384
-
385
-
386
- #Config will have more correct languages, they may be added before we append here
387
- ##["en","es","fr","de","it","pt","pl","tr","ru","nl","cs","ar","zh-cn","ja"]
388
-
389
- xtts_supported_languages=config.languages
390
- def detect_language(prompt):
391
- # Fast language autodetection
392
- if len(prompt)>15:
393
- language_predicted=langid.classify(prompt)[0].strip() # strip need as there is space at end!
394
- if language_predicted == "zh":
395
- #we use zh-cn on xtts
396
- language_predicted = "zh-cn"
397
-
398
- if language_predicted not in xtts_supported_languages:
399
- print(f"Detected a language not supported by xtts :{language_predicted}, switching to english for now")
400
- gr.Warning(f"Language detected '{language_predicted}' can not be spoken properly 'yet' ")
401
- language= "en"
402
- else:
403
- language = language_predicted
404
- print(f"Language: Predicted sentence language:{language_predicted} , using language for xtts:{language}")
405
- else:
406
- # Hard to detect language fast in short sentence, use english default
407
- language = "en"
408
- print(f"Language: Prompt is short or autodetect language disabled using english for xtts")
409
-
410
- return language
411
-
412
- def get_voice_streaming(prompt, language, latent_tuple, suffix="0"):
413
- gpt_cond_latent, speaker_embedding = latent_tuple
414
-
415
  try:
416
- t0 = time.time()
417
- chunks = model.inference_stream(
418
- prompt,
419
  language,
420
- gpt_cond_latent.to(device), # Ensure gpt_cond_latent is on the same device
421
- speaker_embedding.to(device), # Ensure speaker_embedding is on the same device
422
- # repetition_penalty=5.0,
423
  temperature=0.85,
424
  )
425
-
426
- first_chunk = True
427
- for i, chunk in enumerate(chunks):
428
- if first_chunk:
429
- first_chunk_time = time.time() - t0
430
- metrics_text = f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
431
- first_chunk = False
432
-
433
- # print(f"Received chunk {i} of audio length {chunk.shape[-1]}")
434
-
435
- # Ensure chunk is on the same device and convert to numpy array
436
- chunk = chunk.detach().cpu().numpy().squeeze()
437
- chunk = (chunk * 32767).astype(np.int16)
438
-
439
- yield chunk.tobytes()
440
-
441
- except RuntimeError as e:
442
- if "device-side assert" in str(e):
443
- # cannot do anything on cuda device side error, need to restart
444
- print(f"Exit due to: Unrecoverable exception caused by prompt: {prompt}", flush=True)
445
- gr.Warning("Unhandled Exception encounter, please retry in a minute")
446
- print("Cuda device-assert Runtime encountered need restart")
447
-
448
- # HF Space specific.. This error is unrecoverable; need to restart space
449
- api.restart_space(repo_id=repo_id)
450
- else:
451
- print("RuntimeError: non device-side assert error:", str(e))
452
- # Does not require warning; happens on empty chunk and at the end
453
- ###gr.Warning("Unhandled Exception encounter, please retry in a minute")
454
- return None
455
- return None
456
- except:
457
- return None
458
-
459
- # Will be triggered on text submit (will send to generate_speech)
460
- def add_text(history, text):
461
- history = [] if history is None else history
462
- history = history + [(text, None)]
463
- return history, gr.update(value="", interactive=False)
464
-
465
- # Will be triggered on voice submit (will transribe and send to generate_speech)
466
- def add_file(history, file):
467
- history = [] if history is None else history
468
-
469
- try:
470
- text = transcribe(file)
471
- print("Transcribed text:", text)
472
- except Exception as e:
473
- print(str(e))
474
- gr.Warning("There was an issue with transcription, please try writing for now")
475
- # Apply a null text on error
476
- text = "Transcription seems failed, please tell me a joke about chickens"
477
-
478
- history = history + [(text, None)]
479
- return history, gr.update(value="", interactive=False)
480
-
481
-
482
- def get_sentence(history, chatbot_role):
483
-
484
- history = [["", None]] if history is None else history
485
-
486
- history[-1][1] = ""
487
-
488
- sentence_list = []
489
- sentence_hash_list = []
490
-
491
- text_to_generate = ""
492
- stored_sentence = None
493
- stored_sentence_hash = None
494
-
495
- print(chatbot_role)
496
-
497
- for character in generate_local(history[-1][0], history[:-1], system_message=ROLE_PROMPTS[chatbot_role]):
498
- history[-1][1] = character.replace("<|assistant|>","")
499
- # It is coming word by word
500
-
501
- text_to_generate = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|assistant|>"," ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())
502
- if len(text_to_generate) > 1:
503
-
504
- dif = len(text_to_generate) - len(sentence_list)
505
-
506
- if dif == 1 and len(sentence_list) != 0:
507
- continue
508
-
509
- if dif == 2 and len(sentence_list) != 0 and stored_sentence is not None:
510
- continue
511
-
512
- # All this complexity due to trying append first short sentence to next one for proper language auto-detect
513
- if stored_sentence is not None and stored_sentence_hash is None and dif>1:
514
- #means we consumed stored sentence and should look at next sentence to generate
515
- sentence = text_to_generate[len(sentence_list)+1]
516
- elif stored_sentence is not None and len(text_to_generate)>2 and stored_sentence_hash is not None:
517
- print("Appending stored")
518
- sentence = stored_sentence + text_to_generate[len(sentence_list)+1]
519
- stored_sentence_hash = None
520
- else:
521
- sentence = text_to_generate[len(sentence_list)]
522
-
523
- # too short sentence just append to next one if there is any
524
- # this is for proper language detection
525
- if len(sentence)<=15 and stored_sentence_hash is None and stored_sentence is None:
526
- if sentence[-1] in [".","!","?"]:
527
- if stored_sentence_hash != hash(sentence):
528
- stored_sentence = sentence
529
- stored_sentence_hash = hash(sentence)
530
- print("Storing:",stored_sentence)
531
- continue
532
-
533
-
534
- sentence_hash = hash(sentence)
535
- if stored_sentence_hash is not None and sentence_hash == stored_sentence_hash:
536
- continue
537
-
538
- if sentence_hash not in sentence_hash_list:
539
- sentence_hash_list.append(sentence_hash)
540
- sentence_list.append(sentence)
541
- print("New Sentence: ", sentence)
542
- yield (sentence, history)
543
-
544
- # return that final sentence token
545
- try:
546
- last_sentence = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())[-1]
547
- sentence_hash = hash(last_sentence)
548
- if sentence_hash not in sentence_hash_list:
549
- if stored_sentence is not None and stored_sentence_hash is not None:
550
- last_sentence = stored_sentence + last_sentence
551
- stored_sentence = stored_sentence_hash = None
552
- print("Last Sentence with stored:",last_sentence)
553
-
554
- sentence_hash_list.append(sentence_hash)
555
- sentence_list.append(last_sentence)
556
- print("Last Sentence: ", last_sentence)
557
-
558
- yield (last_sentence, history)
559
- except:
560
- print("ERROR on last sentence history is :", history)
561
-
562
-
563
- from scipy.io.wavfile import write
564
- from pydub import AudioSegment
565
-
566
- second_of_silence = AudioSegment.silent() # use default
567
- second_of_silence.export("sil.wav", format='wav')
568
- clear_output()
569
-
570
-
571
- def generate_speech_from_history(history, chatbot_role, sentence):
572
- language = "autodetect"
573
- # total_wav_bytestream = b""
574
- if len(sentence)==0:
575
- print("EMPTY SENTENCE")
576
- return
577
- # Sometimes prompt </s> coming on output remove it
578
- # Some post process for speech only
579
- sentence = sentence.replace("</s>", "")
580
- # remove code from speech
581
- sentence = re.sub("```.*```", "", sentence, flags=re.DOTALL)
582
- sentence = re.sub("`.*`", "", sentence, flags=re.DOTALL)
583
- sentence = re.sub("\(.*\)", "", sentence, flags=re.DOTALL)
584
- sentence = sentence.replace("```", "")
585
- sentence = sentence.replace("...", " ")
586
- sentence = sentence.replace("(", " ")
587
- sentence = sentence.replace(")", " ")
588
- sentence = sentence.replace("<|assistant|>","")
589
-
590
- if len(sentence)==0:
591
- print("EMPTY SENTENCE after processing")
592
- return
593
-
594
- # A fast fix for last character, may produce weird sounds if it is with text
595
- #if (sentence[-1] in ["!", "?", ".", ","]) or (sentence[-2] in ["!", "?", ".", ","]):
596
- # # just add a space
597
- # sentence = sentence[:-1] + " " + sentence[-1]
598
-
599
- # regex does the job well
600
- sentence = re.sub("([^\x00-\x7F]|\w)([\.。?!]+)",r"\1 \2",sentence)
601
-
602
- print("Sentence for speech:", sentence)
603
-
604
- results = []
605
-
606
- try:
607
- if len(sentence) < SENTENCE_SPLIT_LENGTH:
608
- # no problem continue on
609
- sentence_list = [sentence]
610
- else:
611
- # Until now nltk likely split sentences properly but we need additional
612
- # check for longer sentence and split at last possible position
613
- # Do whatever necessary, first break at hypens then spaces and then even split very long words
614
- # sentence_list=textwrap.wrap(sentence,SENTENCE_SPLIT_LENGTH)
615
- sentence_list = split_sentences(sentence, SENTENCE_SPLIT_LENGTH)
616
- print("detected sentences:", sentence_list)
617
- for sentence in sentence_list:
618
- print("- sentence = ", sentence)
619
- if any(c.isalnum() for c in sentence):
620
- if language=="autodetect":
621
- #on first call autodetect, nexts sentence calls will use same language
622
- language = detect_language(sentence)
623
- #exists at least 1 alphanumeric (utf-8)
624
-
625
- #print("Inserting data to get_voice_streaming:")
626
- audio_stream = get_voice_streaming(
627
- sentence, language, latent_map[chatbot_role]
628
- )
629
- else:
630
- # likely got a ' or " or some other text without alphanumeric in it
631
- audio_stream = None
632
- continue
633
-
634
- # XTTS is actually using streaming response but we are playing audio by sentence
635
- # If you want direct XTTS voice streaming (send each chunk to voice ) you may set DIRECT_STREAM=1 environment variable
636
- if audio_stream is not None:
637
- sentence_wav_bytestream = b""
638
-
639
- # frame_length = 0
640
- for chunk in audio_stream:
641
- try:
642
- if chunk is not None:
643
- sentence_wav_bytestream += chunk
644
- # frame_length += len(chunk)
645
- except:
646
- # hack to continue on playing. sometimes last chunk is empty , will be fixed on next TTS
647
- continue
648
-
649
- # Filter output for better voice
650
- filter_output=True
651
- if filter_output:
652
- try:
653
- data_s16 = np.frombuffer(sentence_wav_bytestream, dtype=np.int16, count=len(sentence_wav_bytestream)//2, offset=0)
654
- float_data = data_s16 * 0.5**15
655
- reduced_noise = nr.reduce_noise(y=float_data, sr=24000,prop_decrease =0.8,n_fft=1024)
656
- sentence_wav_bytestream = (reduced_noise * 32767).astype(np.int16)
657
- sentence_wav_bytestream = sentence_wav_bytestream.tobytes()
658
- except:
659
- print("failed to remove noise")
660
-
661
- # Directly encode the WAV bytestream to base64
662
- base64_audio = base64.b64encode(pcm_to_wav(sentence_wav_bytestream)).decode('utf8')
663
-
664
- results.append({ "text": sentence, "audio": base64_audio })
665
- else:
666
- # Handle the case where the audio stream is None (e.g., silent response)
667
- results.append({ "text": sentence, "audio": "" })
668
-
669
  except RuntimeError as e:
670
- if "device-side assert" in str(e):
671
- # cannot do anything on cuda device side error, need tor estart
672
- print(
673
- f"Exit due to: Unrecoverable exception caused by prompt:{sentence}",
674
- flush=True,
675
- )
676
- gr.Warning("Unhandled Exception encounter, please retry in a minute")
677
- print("Cuda device-assert Runtime encountered need restart")
678
-
679
- # HF Space specific.. This error is unrecoverable need to restart space
680
  api.restart_space(repo_id=repo_id)
681
- else:
682
- print("RuntimeError: non device-side assert error:", str(e))
683
- raise e
684
 
685
- return results
 
 
686
 
 
 
 
 
 
687
 
688
- latent_map = {}
689
- try:
690
- # get the current working directory
691
- path= os.getcwd()
692
- name1="voices/cloee-1.wav"
693
- name2="voices/julian-bedtime-style-1.wav"
694
- name3="voices/pirate_by_coqui.wav"
695
- name4="voices/thera-1.wav"
696
- latent_map["Cloée"] = get_latents(os.path.join(path, name1))
697
- latent_map["Julian"] = get_latents(os.path.join(path, name2))
698
- latent_map["Pirate"] = get_latents(os.path.join(path, name3))
699
- latent_map["Thera"] = get_latents(os.path.join(path, name4))
700
 
701
- except Exception as e:
702
- print("Error:", str(e))
703
 
704
- # Define the main function for the API endpoint that takes the input text and chatbot role
705
- def generate_story_and_speech(secret_token, input_text, chatbot_role):
706
- if secret_token != SECRET_TOKEN:
707
- raise gr.Error(
708
- f'Invalid secret token. Secret Token: secret')
709
- # Initialize a list of lists for history with the user input as the first entry
 
 
710
  history = [[input_text, None]]
711
- story_sentences = get_sentence(history, chatbot_role) # get_sentence function generates text
712
-
713
- story_text = "" # Initialize variable to hold the full story text
714
- last_history = None # To store the last history after all sentences
715
-
716
- # Iterate over the sentences generated by get_sentence and concatenate them
717
- for sentence, updated_history in story_sentences:
718
- if sentence:
719
- story_text += sentence.strip() + " " # Add each sentence to the story_text
720
- last_history = updated_history # Keep track of the last history update
721
-
722
- if last_history is not None:
723
- # Convert the list of lists back into a list of tuples for the history
724
- history_tuples = [tuple(entry) for entry in last_history]
725
 
726
- return generate_speech_from_history(history_tuples, chatbot_role, story_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727
 
728
- else:
729
- return []
 
730
 
731
- # Create a Gradio Interface using only the `generate_story_and_speech()` function and the 'json' output type
 
 
 
 
 
 
 
 
 
 
 
732
  demo = gr.Interface(
733
  fn=generate_story_and_speech,
734
- inputs=[gr.Text(label='Secret Token'),gr.Textbox(placeholder="Enter your text here"), gr.Dropdown(choices=ROLES, label="Select Chatbot Role")],
735
- outputs="json"
 
 
 
 
 
 
 
736
  )
737
 
738
- demo.queue()
739
- demo.launch(debug=True)
 
 
1
+ # ===================================================================================
2
+ # 1. SETUP AND IMPORTS
3
+ # ===================================================================================
4
  from __future__ import annotations
 
5
  import os
6
  import requests
7
+ import base64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  import datetime
9
+ import struct
 
10
  import re
11
+ import textwrap
 
 
 
 
 
 
 
 
12
  import time
13
+ import uuid
14
+
15
+ # --- Hugging Face Spaces & ZeroGPU ---
16
+ import spaces # Required for ZeroGPU
17
  import gradio as gr
18
+
19
+ # --- Core ML & Data Libraries ---
20
+ import torch
21
  import numpy as np
22
+ from huggingface_hub import HfApi, hf_hub_download
23
+ from llama_cpp import Llama
 
 
24
 
25
+ # --- TTS Libraries ---
26
+ from TTS.tts.configs.xtts_config import XttsConfig
27
+ from TTS.tts.models.xtts import Xtts
28
  from TTS.utils.manage import ModelManager
29
+ from TTS.utils.generic_utils import get_user_data_dir
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # --- Text & Audio Processing ---
32
+ import nltk
33
+ import langid
34
+ import emoji
35
+ import noisereduce as nr
36
+ import dotenv
37
 
38
+ # ===================================================================================
39
+ # 2. GLOBAL CONFIGURATION & HELPER FUNCTIONS
40
+ # ===================================================================================
41
 
42
+ # --- Download NLTK data once ---
43
+ nltk.download("punkt", quiet=True)
44
+ os.environ["COQUI_TOS_AGREED"] = "1"
 
 
 
 
 
45
 
46
+ # --- Define global variables for caching models ---
47
+ # This prevents reloading the models on every single run, which would be very slow.
48
+ tts_model = None
49
+ llm_model = None
 
 
 
 
 
 
50
 
51
+ # --- Configuration ---
52
+ HF_TOKEN = os.environ.get("HF_TOKEN")
53
+ api = HfApi(token=HF_TOKEN) if HF_TOKEN else None
54
+ repo_id = "ruslanmv/ai-story-server"
55
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'secret') # Default secret
56
+ SENTENCE_SPLIT_LENGTH = 250
57
+ LLM_STOP_WORDS = ["</s>", "<|user|>", "/s>"]
58
+
59
+ # --- System Prompts and Roles ---
60
+ default_system_message = (
61
+ "You're a storyteller crafting a short tale for young listeners. Keep sentences short and simple. "
62
+ "Use narrative style only, without lists or complex words. Type numbers as words (e.g., 'ten')."
63
+ )
64
+ system_message = os.environ.get("SYSTEM_MESSAGE", default_system_message)
65
+ ROLES = ["Cloée", "Julian", "Pirate", "Thera"]
66
+ ROLE_PROMPTS = {role: system_message for role in ROLES}
67
+ ROLE_PROMPTS["Pirate"] = (
68
+ "You are AI Beard, a pirate. Craft your response from his first-person perspective. "
69
+ "Keep answers short, as if in a real conversation. Only provide the words AI Beard would speak."
70
+ )
71
 
72
+ # --- Audio and Text Helper Functions ---
73
+ def pcm_to_wav(pcm_data, sample_rate=24000, channels=1, bit_depth=16):
74
+ if pcm_data.startswith(b"RIFF"):
75
+ return pcm_data
76
+ chunk_size = 36 + len(pcm_data)
77
+ return struct.pack('<4sI4s4sIHHIIHH4sI',
78
+ b'RIFF', chunk_size, b'WAVE', b'fmt ',
79
+ 16, 1, channels, sample_rate,
80
+ sample_rate * channels * bit_depth // 8,
81
+ channels * bit_depth // 8, bit_depth,
82
+ b'data', len(pcm_data)) + pcm_data
83
 
84
  def split_sentences(text, max_len):
 
 
 
 
 
85
  sentences = nltk.sent_tokenize(text)
86
+ return [sub_sent for sent in sentences for sub_sent in (
87
+ textwrap.wrap(sent, max_len, break_long_words=True) if len(sent) > max_len else [sent]
88
+ )]
89
 
90
+ def format_prompt_zephyr(message, history, system_message):
91
+ prompt = f"<|system|>\n{system_message}</s>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  for user_prompt, bot_response in history:
93
+ prompt += f"<|user|>\n{user_prompt}</s><|assistant|>\n{bot_response}</s>"
94
+ prompt += f"<|user|>\n{message}</s><|assistant|>"
 
 
 
 
 
95
  return prompt
96
 
97
+ # ===================================================================================
98
+ # 3. CORE AI FUNCTIONS (Model Loading & Inference)
99
+ # ===================================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ def load_models():
102
+ """Loads and caches the TTS and LLM models if they haven't been loaded yet."""
103
+ global tts_model, llm_model
104
+
105
+ device = "cuda" if torch.cuda.is_available() else "cpu"
106
+
107
+ # --- Load Coqui TTS XTTS Model ---
108
+ if tts_model is None:
109
+ print("Loading Coqui XTTS V2 model for the first time...")
110
+ model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
111
+ ModelManager().download_model(model_name)
112
+ model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
113
+
114
+ config = XttsConfig()
115
+ config.load_json(os.path.join(model_path, "config.json"))
116
+ tts_model = Xtts.init_from_config(config)
117
+ tts_model.load_checkpoint(
118
+ config,
119
+ checkpoint_path=os.path.join(model_path, "model.pth"),
120
+ vocab_path=os.path.join(model_path, "vocab.json"),
121
+ eval=True,
122
+ use_deepspeed=True,
123
  )
124
+ tts_model.to(device)
125
+ print("XTTS model loaded and cached successfully.")
126
+
127
+ # --- Load Large Language Model (Zephyr) ---
128
+ if llm_model is None:
129
+ print("Loading LLM (Zephyr) for the first time...")
130
+ zephyr_model_path = hf_hub_download(
131
+ repo_id="TheBloke/zephyr-7B-beta-GGUF",
132
+ filename="zephyr-7b-beta.Q5_K_M.gguf"
133
+ )
134
+ llm_model = Llama(
135
+ model_path=zephyr_model_path,
136
+ n_gpu_layers=-1, # Offload all layers to GPU
137
+ n_ctx=4096,
138
+ n_batch=512,
139
+ verbose=False
140
+ )
141
+ print("LLM loaded and cached successfully.")
142
+
143
+ return tts_model, llm_model
144
+
145
+ def generate_text_stream(llm_instance, prompt, history, system_message):
146
+ """Generates text using the loaded LLM."""
147
+ formatted_prompt = format_prompt_zephyr(prompt, history, system_message)
148
+ stream = llm_instance(
149
+ formatted_prompt,
150
+ temperature=0.7,
151
+ max_tokens=512,
152
+ top_p=0.95,
153
+ stop=LLM_STOP_WORDS,
154
+ stream=True
155
+ )
156
+ for response in stream:
157
+ char = response["choices"][0]["text"]
158
+ if "<|user|>" in char or emoji.is_emoji(char):
159
+ return
160
+ yield char
161
+
162
+ def generate_audio_stream(tts_instance, text, language, latents):
163
+ """Generates audio using the loaded TTS model."""
164
+ gpt_cond_latent, speaker_embedding = latents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  try:
166
+ chunks = tts_instance.inference_stream(
167
+ text,
 
168
  language,
169
+ gpt_cond_latent,
170
+ speaker_embedding,
 
171
  temperature=0.85,
172
  )
173
+ for chunk in chunks:
174
+ if chunk is not None:
175
+ yield chunk.detach().cpu().numpy().squeeze().tobytes()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  except RuntimeError as e:
177
+ print(f"Error during TTS inference: {e}")
178
+ if "device-side assert" in str(e) and api:
179
+ gr.Warning("Critical GPU error. Restarting the Space...")
 
 
 
 
 
 
 
180
  api.restart_space(repo_id=repo_id)
 
 
 
181
 
182
+ # ===================================================================================
183
+ # 4. MAIN GRADIO FUNCTION (Decorated for ZeroGPU)
184
+ # ===================================================================================
185
 
186
+ @spaces.GPU(duration=120) # Request GPU for 120 seconds
187
+ def generate_story_and_speech(secret_token_input, input_text, chatbot_role):
188
+ """The main function called by the Gradio interface."""
189
+ if secret_token_input != SECRET_TOKEN:
190
+ raise gr.Error('Invalid secret token provided.')
191
 
192
+ if not input_text:
193
+ return []
 
 
 
 
 
 
 
 
 
 
194
 
195
+ # --- Step 1: Load models (will use cache after first run) ---
196
+ tts, llm = load_models()
197
 
198
+ # --- Pre-compute voice latents ---
199
+ latent_map = {}
200
+ for role, filename in [("Cloée", "cloee-1.wav"), ("Julian", "julian-bedtime-style-1.wav"),
201
+ ("Pirate", "pirate_by_coqui.wav"), ("Thera", "thera-1.wav")]:
202
+ path = os.path.join("voices", filename)
203
+ latent_map[role] = tts.get_conditioning_latents(audio_path=path, gpt_cond_len=30, max_ref_length=60)
204
+
205
+ # --- Step 2: Generate the full story text ---
206
  history = [[input_text, None]]
207
+ full_story_text = "".join(
208
+ generate_text_stream(llm, history[-1][0], history[:-1], system_message=ROLE_PROMPTS[chatbot_role])
209
+ )
210
+
211
+ # --- Step 3: Post-process text and generate audio sentence by sentence ---
212
+ full_story_text = re.sub(r"([^\x00-\x7F]|\w)([.?!]+)", r"\1 \2", full_story_text.strip())
213
+ if not full_story_text:
214
+ return []
 
 
 
 
 
 
215
 
216
+ sentences = split_sentences(full_story_text, SENTENCE_SPLIT_LENGTH)
217
+ lang = langid.classify(sentences[0])[0] if sentences else 'en'
218
+
219
+ results = []
220
+ for sentence in sentences:
221
+ if not any(c.isalnum() for c in sentence):
222
+ continue
223
+
224
+ audio_chunks = generate_audio_stream(tts, sentence, lang, latent_map[chatbot_role])
225
+ if audio_chunks:
226
+ pcm_data = b"".join(chunk for chunk in audio_chunks if chunk)
227
+
228
+ # Optional: Noise reduction
229
+ try:
230
+ data_s16 = np.frombuffer(pcm_data, dtype=np.int16)
231
+ float_data = data_s16.astype(np.float32) / 32767.0
232
+ reduced_noise = nr.reduce_noise(y=float_data, sr=24000)
233
+ final_pcm = (reduced_noise * 32767).astype(np.int16).tobytes()
234
+ except Exception:
235
+ final_pcm = pcm_data
236
+
237
+ base64_audio = base64.b64encode(pcm_to_wav(final_pcm)).decode('utf-8')
238
+ results.append({"text": sentence, "audio": base64_audio})
239
+
240
+ return results
241
 
242
+ # ===================================================================================
243
+ # 5. GRADIO INTERFACE LAUNCH
244
+ # ===================================================================================
245
 
246
+ # --- Download voice files on startup ---
247
+ print("Downloading voice files...")
248
+ file_names = ['cloee-1.wav', 'julian-bedtime-style-1.wav', 'pirate_by_coqui.wav', 'thera-1.wav']
249
+ base_url = 'https://raw.githubusercontent.com/ruslanmv/ai-story-server/main/voices/'
250
+ os.makedirs('voices', exist_ok=True)
251
+ for name in file_names:
252
+ if not os.path.exists(os.path.join('voices', name)):
253
+ response = requests.get(base_url + name)
254
+ with open(os.path.join('voices', name), 'wb') as f:
255
+ f.write(response.content)
256
+
257
+ # --- Define the Gradio Interface ---
258
  demo = gr.Interface(
259
  fn=generate_story_and_speech,
260
+ inputs=[
261
+ gr.Text(label='Secret Token', type='password', value=SECRET_TOKEN),
262
+ gr.Textbox(placeholder="What should the story be about?", label="Story Prompt"),
263
+ gr.Dropdown(choices=ROLES, label="Select a Storyteller", value="Cloée")
264
+ ],
265
+ outputs=gr.JSON(label="Story and Audio Output"),
266
+ title="AI Storyteller with ZeroGPU",
267
+ description="Enter a prompt to generate a short story with voice narration using on-demand GPU.",
268
+ allow_flagging="never"
269
  )
270
 
271
+ # --- Launch the App ---
272
+ if __name__ == "__main__":
273
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ZeroGPU and Core
2
+ torch==2.3.0
3
+ torchaudio==2.3.0
4
+ gradio==5.47.2
5
+ huggingface-hub
6
+ python-dotenv
7
+
8
+ # TTS Dependencies
9
+ TTS @ git+https://github.com/coqui-ai/TTS@v0.22.0
10
+ pydantic==2.5.3
11
+
12
+ # LLM Dependencies
13
+ llama-cpp-python==0.2.79
14
+
15
+ # Audio & Text Processing
16
+ noisereduce==3.0.1
17
+ pydub
18
+ langid
19
+ nltk
20
+ emoji
21
+ ffmpeg-python
22
+
23
+ # Japanese Text (if needed by TTS)
24
+ mecab-python3==1.0.9
25
+ unidic-lite==1.0.8