|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
|
|
|
model_name = "lewishamilton21/Qwen_1.5B_multilingual_Fine-Tuned_LLM" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") |
|
|
|
|
|
|
|
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto") |
|
|
|
|
|
|
|
|
def chat(user_message, history): |
|
|
|
|
|
prompt = "" |
|
|
for msg in history: |
|
|
prompt += f"{msg[0]}: {msg[1]}\n" |
|
|
prompt += f"User: {user_message}\nAI:" |
|
|
|
|
|
|
|
|
output = generator(prompt, max_length=512, do_sample=True, temperature=0.7, top_p=0.9, num_return_sequences=1) |
|
|
reply = output[0]['generated_text'].split("AI:")[-1].strip() |
|
|
|
|
|
|
|
|
history.append(("User", user_message)) |
|
|
history.append(("AI", reply)) |
|
|
return history, history |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# 🗣️ Multilingual Qwen 1.5B Chatbot") |
|
|
chatbot = gr.Chatbot() |
|
|
msg = gr.Textbox(label="Type your message here...") |
|
|
clear = gr.Button("Clear Chat") |
|
|
|
|
|
state = gr.State([]) |
|
|
|
|
|
msg.submit(chat, [msg, state], [chatbot, state]) |
|
|
clear.click(lambda: ([], []), None, [chatbot, state]) |
|
|
|
|
|
|
|
|
demo.launch(share=True) |
|
|
|