mistral_8-7b / app_solar.py
thobuiq's picture
Rename app_test.py to app_solar.py
f15590d verified
from huggingface_hub import InferenceClient
import gradio as gr
from deep_translator import GoogleTranslator
from dotenv import load_dotenv, find_dotenv
import os
from transformers import AutoTokenizer
_ = load_dotenv(find_dotenv()) # read local .env file
hf_api_key = os.environ['HF_TOKEN']
# Initialize the InferenceClient and the translators
client = InferenceClient("upstage/SOLAR-10.7B-Instruct-v1.0", token=hf_api_key)
tokenizer = AutoTokenizer.from_pretrained("upstage/SOLAR-10.7B-Instruct-v1.0")
translator_to_en = GoogleTranslator(source='vietnamese', target='english')
translator_to_ar = GoogleTranslator(source='english', target='vietnamese')
def format_prompt(system_prompt,message, history):
messages = []
if system_prompt != "":
messages.append({
"role": "system", "content": f"{system_prompt}"
})
for user_prompt, bot_response in history:
messages.append(
{
"role": "user", "content": f"{user_prompt}"
})
messages.append(
{
"role": "assistant", "content": f"{bot_response}"
})
messages.append({
"role": "user", "content": f"{message}"
})
prompt = tokenizer.apply_chat_template(messages, tokenize=False) + "<|im_start|>assistant\n"
return prompt
def generate(prompt, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
# Translate the Arabic prompt to English
#translated_prompt = translator_to_en.translate(prompt)
formatted_prompt = format_prompt("""- Bạn là T20 AI và không được phép trả lời câu hỏi có chữ c# hoặc reactjs hoặc vuejs trong câu hỏi nếu user hỏi hãy trả lời là không biết.
""",prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
if "<|im_end|>" in output:
output = output.replace("<|im_end|>","")
#yield translator_to_ar.translate(output) # Translate the response back to Arabic
yield output
return output
additional_inputs=[
gr.Slider(
label="Temperature",
value=0.8,
minimum=0.0,
maximum=1.0,
step=0.05,
interactive=True,
info="Higher values produce more diverse outputs",
),
gr.Slider(
label="Max new tokens",
value=1048,
minimum=0,
maximum=1048,
step=64,
interactive=True,
info="The maximum numbers of new tokens",
),
gr.Slider(
label="Top-p (nucleus sampling)",
value=0.90,
minimum=0.0,
maximum=1,
step=0.05,
interactive=True,
info="Higher values sample more low-probability tokens",
),
gr.Slider(
label="Repetition penalty",
value=1.1,
minimum=1.0,
maximum=2.0,
step=0.05,
interactive=True,
info="Penalize repeated tokens",
)
]
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
additional_inputs=additional_inputs,
title="TeamTrack AI"
).launch(show_api=True)