| | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
| | import gradio as gr |
| |
|
| | model_id = "google/flan-t5-small" |
| | tokenizer = AutoTokenizer.from_pretrained(model_id) |
| | model = AutoModelForSeq2SeqLM.from_pretrained(model_id) |
| |
|
| | system_prompt = "You are π΄ ππ πππ β a fun, fast, emotionally tuned AI chatbot created by π΄ ππ πππ. You reply quickly, like a chill and clever human friend." |
| |
|
| | def chat(history, message): |
| | prompt = f"{system_prompt}\nUser: {message}\nAI:" |
| | inputs = tokenizer(prompt, return_tensors="pt") |
| | outputs = model.generate(**inputs, max_new_tokens=100) |
| | reply = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() |
| |
|
| | history = history or [] |
| | history.append((message, reply)) |
| | return history, history |
| |
|
| | iface = gr.Interface( |
| | fn=chat, |
| | inputs=[gr.State(), gr.Textbox(placeholder="Talk to π΄ ππ πππ...")], |
| | outputs=[gr.State(), gr.Chatbot(label="π΄ ππ πππ AI Chatbot")], |
| | title="π΄ ππ πππ β Superfast Chatbot", |
| | description="An extremely fast and chill AI chatbot, created by π΄ ππ πππ. Running on Hugging Face Spaces (CPU only).", |
| | allow_flagging="never" |
| | ) |
| |
|
| | iface.launch() |