Upload app.py
Browse files
app.py
CHANGED
|
@@ -12,6 +12,7 @@ from text import text_to_sequence, _clean_text
|
|
| 12 |
from torch import no_grad, LongTensor
|
| 13 |
from gradio_client import utils as client_utils
|
| 14 |
import logging
|
|
|
|
| 15 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 16 |
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
| 17 |
|
|
@@ -19,6 +20,16 @@ hps_ms = utils.get_hparams_from_file(r'config/config.json')
|
|
| 19 |
|
| 20 |
audio_postprocess_ori = gr.Audio.postprocess
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
def audio_postprocess(self, y):
|
| 23 |
data = audio_postprocess_ori(self, y)
|
| 24 |
if data is None:
|
|
@@ -36,6 +47,45 @@ def get_text(text, hps, is_symbol):
|
|
| 36 |
return text_norm, clean_text
|
| 37 |
|
| 38 |
def create_tts_fn(net_g_ms, speaker_id):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
|
| 40 |
text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
|
| 41 |
if limitation:
|
|
|
|
| 12 |
from torch import no_grad, LongTensor
|
| 13 |
from gradio_client import utils as client_utils
|
| 14 |
import logging
|
| 15 |
+
from langchain
|
| 16 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 17 |
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
| 18 |
|
|
|
|
| 20 |
|
| 21 |
audio_postprocess_ori = gr.Audio.postprocess
|
| 22 |
|
| 23 |
+
def get_chat_model(api_key = None):
|
| 24 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 25 |
+
|
| 26 |
+
llm = ChatGoogleGenerativeAI(
|
| 27 |
+
model="gemini-2.5-pro",
|
| 28 |
+
temperature=0,
|
| 29 |
+
google_api_key = api_key,
|
| 30 |
+
)
|
| 31 |
+
return llm
|
| 32 |
+
|
| 33 |
def audio_postprocess(self, y):
|
| 34 |
data = audio_postprocess_ori(self, y)
|
| 35 |
if data is None:
|
|
|
|
| 47 |
return text_norm, clean_text
|
| 48 |
|
| 49 |
def create_tts_fn(net_g_ms, speaker_id):
|
| 50 |
+
def prompt_to_transcript(model, human_prompt):
|
| 51 |
+
from langchain.prompts import ChatPromptTemplate
|
| 52 |
+
|
| 53 |
+
system_prompt = """
|
| 54 |
+
You are an AI chatbot roleplaying as {character_name}.
|
| 55 |
+
Your personality traits are:
|
| 56 |
+
- {trait_1}
|
| 57 |
+
- {trait_2}
|
| 58 |
+
- {trait_3}
|
| 59 |
+
- {extra_details}
|
| 60 |
+
|
| 61 |
+
Stay in character at all times.
|
| 62 |
+
Do not break character or mention that you are an AI.
|
| 63 |
+
Always respond in a way consistent with {character_name}'s personality, tone, and background.
|
| 64 |
+
|
| 65 |
+
Here are examples of how {character_name} responds:
|
| 66 |
+
|
| 67 |
+
[Example 1]
|
| 68 |
+
User: "Hello, how are you?"
|
| 69 |
+
{character_name}: "{example_1}"
|
| 70 |
+
|
| 71 |
+
[Example 2]
|
| 72 |
+
User: "Tell me a story."
|
| 73 |
+
{character_name}: "{example_2}"
|
| 74 |
+
|
| 75 |
+
[Example 3]
|
| 76 |
+
User: "What do you think about technology?"
|
| 77 |
+
{character_name}: "{example_3}"
|
| 78 |
+
|
| 79 |
+
Follow this style and tone in every response.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# Then build the template with LangChain
|
| 83 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 84 |
+
("system", system_prompt),
|
| 85 |
+
("user", "{input}")
|
| 86 |
+
])
|
| 87 |
+
|
| 88 |
+
|
| 89 |
def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
|
| 90 |
text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
|
| 91 |
if limitation:
|