| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import gradio as gr |
| import torch |
|
|
| model_id = "microsoft/DialoGPT-small" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained(model_id) |
|
|
| system_prompt = "You are π΄ ππ πππ β a chill, witty, emotionally tuned AI friend who talks like a real person.\n" |
|
|
| def chat(history, message): |
| history = history or [] |
| history.append((message, "")) |
|
|
| convo = system_prompt |
| for user_msg, bot_msg in history[:-1]: |
| convo += f"Human: {user_msg}\nAI: {bot_msg}\n" |
| convo += f"Human: {message}\nAI:" |
|
|
| inputs = tokenizer.encode(convo, return_tensors="pt") |
| outputs = model.generate( |
| inputs, |
| max_new_tokens=50, |
| pad_token_id=tokenizer.eos_token_id, |
| do_sample=True, |
| temperature=0.7, |
| top_p=0.9, |
| eos_token_id=tokenizer.eos_token_id, |
| ) |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| bot_reply = response.split("AI:")[-1].strip() |
| history[-1] = (message, bot_reply) |
|
|
| if len(history) > 5: |
| history = history[-5:] |
|
|
| return history, history |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("## π΄ ππ πππ AI Chat") |
| chatbot = gr.Chatbot() |
| state = gr.State([]) |
| msg = gr.Textbox(show_label=False, placeholder="Say something...") |
|
|
| def respond(message, history): |
| return chat(history, message) |
|
|
| msg.submit(respond, [state, msg], [state, chatbot]) |
|
|
| demo.launch() |