| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
| import gradio as gr |
|
|
| model_id = "google/flan-t5-small" |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_id) |
|
|
| |
| system_prompt = ( |
| "You are π΄ ππ πππ β a fun, chill, confident, emotionally tuned AI created by π΄ ππ πππ.\n" |
| "You speak like a real friend β warm, clever, witty when needed, always grounded.\n" |
| "Avoid robotic responses. Don't repeat the question unless it adds vibe.\n\n" |
| ) |
|
|
| def chat(history, message): |
| |
| history = history or [] |
| history.append(("User", message)) |
|
|
| |
| convo = system_prompt |
| for role, text in history[-5:]: |
| prefix = "User:" if role == "User" else "AI:" |
| convo += f"{prefix} {text}\n" |
| convo += "AI:" |
|
|
| |
| inputs = tokenizer(convo, return_tensors="pt") |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=80, |
| do_sample=True, |
| temperature=0.75, |
| top_p=0.9, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
| reply = tokenizer.decode(outputs[0], skip_special_tokens=True).split("AI:")[-1].strip() |
|
|
| history.append(("AI", reply)) |
| return history, history |
|
|
| iface = gr.Interface( |
| fn=chat, |
| inputs=[gr.State(), gr.Textbox(show_label=False, placeholder="Talk to π΄ ππ πππ...")], |
| outputs=[gr.State(), gr.Chatbot(label="π΄ ππ πππ")], |
| title="π΄ ππ πππ β Chill AI Chatbot", |
| description="Realistic, smooth-talking AI made by π΄ ππ πππ. Instant replies. No delays. No API. Pure vibe.", |
| allow_flagging="never", |
| theme="default" |
| ) |
|
|
| iface.launch() |