Update app.py
Browse files
app.py
CHANGED
|
@@ -1,52 +1,29 @@
|
|
| 1 |
-
from transformers import
|
| 2 |
import gradio as gr
|
| 3 |
-
import torch
|
| 4 |
-
|
| 5 |
-
model_id = "microsoft/DialoGPT-small"
|
| 6 |
|
|
|
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 8 |
-
model =
|
| 9 |
|
| 10 |
-
system_prompt = "You are π΄ ππ πππ β a
|
| 11 |
|
| 12 |
def chat(history, message):
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
for user_msg, bot_msg in history[:-1]:
|
| 18 |
-
convo += f"Human: {user_msg}\nAI: {bot_msg}\n"
|
| 19 |
-
convo += f"Human: {message}\nAI:"
|
| 20 |
-
|
| 21 |
-
inputs = tokenizer.encode(convo, return_tensors="pt")
|
| 22 |
-
outputs = model.generate(
|
| 23 |
-
inputs,
|
| 24 |
-
max_new_tokens=50,
|
| 25 |
-
pad_token_id=tokenizer.eos_token_id,
|
| 26 |
-
do_sample=True,
|
| 27 |
-
temperature=0.7,
|
| 28 |
-
top_p=0.9,
|
| 29 |
-
eos_token_id=tokenizer.eos_token_id,
|
| 30 |
-
)
|
| 31 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 32 |
-
bot_reply = response.split("AI:")[-1].strip()
|
| 33 |
-
history[-1] = (message, bot_reply)
|
| 34 |
-
|
| 35 |
-
if len(history) > 5:
|
| 36 |
-
history = history[-5:]
|
| 37 |
|
|
|
|
|
|
|
| 38 |
return history, history
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
gr.
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
return chat(history, message)
|
| 49 |
-
|
| 50 |
-
msg.submit(respond, [state, msg], [state, chatbot])
|
| 51 |
|
| 52 |
-
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
model_id = "google/flan-t5-small" # Extremely fast and CPU-friendly
|
| 5 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 6 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
|
| 7 |
|
| 8 |
+
system_prompt = "You are π΄ ππ πππ β a fun, fast, emotionally tuned AI chatbot created by π΄ ππ πππ. You reply quickly, like a chill and clever human friend."
|
| 9 |
|
| 10 |
def chat(history, message):
|
| 11 |
+
prompt = f"{system_prompt}\nUser: {message}\nAI:"
|
| 12 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
| 13 |
+
outputs = model.generate(**inputs, max_new_tokens=100)
|
| 14 |
+
reply = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
history = history or []
|
| 17 |
+
history.append((message, reply))
|
| 18 |
return history, history
|
| 19 |
|
| 20 |
+
iface = gr.Interface(
|
| 21 |
+
fn=chat,
|
| 22 |
+
inputs=[gr.State(), gr.Textbox(placeholder="Talk to π΄ ππ πππ...")],
|
| 23 |
+
outputs=[gr.State(), gr.Chatbot(label="π΄ ππ πππ AI Chatbot")],
|
| 24 |
+
title="π΄ ππ πππ β Superfast Chatbot",
|
| 25 |
+
description="An extremely fast and chill AI chatbot, created by π΄ ππ πππ. Running on Hugging Face Spaces (CPU only).",
|
| 26 |
+
allow_flagging="never"
|
| 27 |
+
)
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
iface.launch()
|