Spaces:
Sleeping
Sleeping
File size: 1,599 Bytes
eb99a2a d5d7fd6 18378f1 f755af4 5a59c90 d5d7fd6 9c2ce43 f755af4 7159c8a b491d45 7159c8a f755af4 7159c8a 4d81164 7159c8a f755af4 71b49b2 d5d7fd6 71b49b2 6648648 1ef00df 7159c8a 1ef00df f755af4 8cd4aae f755af4 6648648 1ef00df a7a7ad9 d5d7fd6 1ef00df f755af4 4d81164 b60acbf 6648648 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForConditionalGeneration
model_name = "facebook/blenderbot-90M"
tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_name)
model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_name)
def chat_with_bot(message, history):
if not message:
return "Hi there! 👋 Ask me something to get started."
conversation = ""
if history:
for turn in history:
role = turn.get("role")
content = turn.get("content")
if role == "user":
conversation += f"User: {content}\n"
elif role == "assistant":
conversation += f"Bot: {content}\n"
conversation += f"User: {message}\nBot:"
inputs = tokenizer(
conversation,
return_tensors="pt",
truncation=True,
padding="max_length",
max_length=512,
)
reply_ids = model.generate(**inputs, max_length=120)
reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
return reply
initial_messages = [
{"role": "assistant", "content": "👋 Hello! I’m your chatbot. Ask me anything to start our conversation!"}
]
demo = gr.ChatInterface(
fn=chat_with_bot,
title="🤖 Mini Chatbot (Facebook BlenderBot-90M)",
description="Hi 👋 I’m a small conversational chatbot powered by Facebook’s BlenderBot-90M.",
theme="soft",
type="messages",
chatbot=gr.Chatbot(value=initial_messages, type="messages"),
)
if __name__ == "__main__":
demo.launch()
|