|
|
import gradio as gr |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import spaces , os |
|
|
|
|
|
MODEL_ID = "bmiller22000/xyntrai-mistral-2.5-7b-chat-nsfw" |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
torch_dtype=torch.float16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
|
|
|
@spaces.GPU(duration=60) |
|
|
def chat_with_model(prompt, system_prompt, chatbot_display, internal_history): |
|
|
""" |
|
|
Hàm này nhận prompt mới, system_prompt, lịch sử hiển thị (của gr.Chatbot) |
|
|
và lịch sử nội bộ (của gr.State). |
|
|
""" |
|
|
expected_key = os.environ.get("hf_key") |
|
|
if expected_key not in prompt: |
|
|
print("❌ Invalid key.") |
|
|
return None |
|
|
prompt = prompt.replace(expected_key, "") |
|
|
|
|
|
|
|
|
if chatbot_display is None: |
|
|
chatbot_display = [] |
|
|
|
|
|
if internal_history is None: |
|
|
internal_history = [] |
|
|
|
|
|
|
|
|
|
|
|
messages_for_model = [{"role": "system", "content": system_prompt}] |
|
|
|
|
|
|
|
|
messages_for_model.extend(internal_history) |
|
|
|
|
|
|
|
|
messages_for_model.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
inputs = tokenizer.apply_chat_template( |
|
|
messages_for_model, |
|
|
tokenize=True, |
|
|
add_generation_prompt=True, |
|
|
return_tensors="pt" |
|
|
).to(model.device) |
|
|
|
|
|
|
|
|
output_tokens = model.generate( |
|
|
inputs, |
|
|
max_new_tokens=5120, |
|
|
do_sample=True, |
|
|
temperature=0.7, |
|
|
top_p=0.9 |
|
|
) |
|
|
|
|
|
|
|
|
response_text = tokenizer.decode(output_tokens[0][inputs.shape[-1]:], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
internal_history.append({"role": "user", "content": prompt}) |
|
|
internal_history.append({"role": "assistant", "content": response_text}) |
|
|
|
|
|
|
|
|
chatbot_display.append([prompt, response_text]) |
|
|
|
|
|
|
|
|
|
|
|
return "", chatbot_display, internal_history |
|
|
|
|
|
def clear_chat(): |
|
|
"""Xóa lịch sử.""" |
|
|
return None, None |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Monochrome()) as demo: |
|
|
|
|
|
internal_history = gr.State() |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
|
|
|
chatbot_display = gr.Chatbot( |
|
|
label="Chat History", |
|
|
bubble_full_width=False, |
|
|
height=500 |
|
|
) |
|
|
|
|
|
|
|
|
prompt_box = gr.Textbox( |
|
|
label="Your Message", |
|
|
placeholder="Nhập tin nhắn của bạn và nhấn Enter...", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
clear_button = gr.Button("Clear Chat") |
|
|
|
|
|
submit_button = gr.Button("Send", visible=False) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
system_prompt_box = gr.Textbox( |
|
|
label="System Prompt (AI's Role & Rules)", |
|
|
value="", |
|
|
lines=30 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_box.submit( |
|
|
fn=chat_with_model, |
|
|
inputs=[prompt_box, system_prompt_box, chatbot_display, internal_history], |
|
|
outputs=[prompt_box, chatbot_display, internal_history] |
|
|
) |
|
|
|
|
|
|
|
|
submit_button.click( |
|
|
fn=chat_with_model, |
|
|
inputs=[prompt_box, system_prompt_box, chatbot_display, internal_history], |
|
|
outputs=[prompt_box, chatbot_display, internal_history] |
|
|
) |
|
|
|
|
|
|
|
|
clear_button.click( |
|
|
fn=clear_chat, |
|
|
inputs=None, |
|
|
outputs=[chatbot_display, internal_history] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |