| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
| |
| model_name = "HOANGHUAN/dulich" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| torch_dtype=torch.float16, |
| device_map="auto" |
| ) |
| def chat(message, history): |
| messages = [ |
| {"role": "system", "content": "Bạn là Vistours Assistant."}, |
| {"role": "user", "content": message} |
| ] |
| |
| prompt = tokenizer.apply_chat_template(messages, tokenize=False) |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
| |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=256, |
| temperature=0.7, |
| top_p=0.9, |
| do_sample=True |
| ) |
| |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
| if "assistant" in response: |
| response = response.split("assistant")[-1].strip() |
| return response |
| gr.ChatInterface( |
| fn=chat, |
| title="Vistours Chatbot", |
| description="AI Assistant cho công ty du lịch Vistours" |
| ).launch() |