Spaces:
Sleeping
Sleeping
File size: 1,610 Bytes
9f7694a 27c51c4 9f7694a 27c51c4 9f7694a 27c51c4 9f7694a 27c51c4 9f7694a 27c51c4 9f7694a 27c51c4 9f7694a 27c51c4 9f7694a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForConditionalGeneration
model_name = "facebook/blenderbot-90M"
tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_name)
model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_name)
def chat_with_bot(message, history):
if not message:
return "Hi there! 👋 Ask me something to get started."
conversation = ""
if history:
for turn in history:
role = turn.get("role")
content = turn.get("content")
if role == "user":
conversation += f"User: {content}\n"
elif role == "assistant":
conversation += f"Bot: {content}\n"
conversation += f"User: {message}\nBot:"
inputs = tokenizer(
conversation,
return_tensors="pt",
truncation=True,
padding="max_length",
max_length=512,
)
reply_ids = model.generate(**inputs, max_length=120)
reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
return reply
initial_message = [
{"role": "assistant", "content": "👋 Hello! I’m your chatbot. Ask me anything to start our conversation!"}
]
demo = gr.ChatInterface(
fn=chat_with_bot,
title="🤖 Mini Chatbot (Facebook BlenderBot-90M)",
description="Hi 👋 I’m a small conversational chatbot powered by Facebook’s BlenderBot-90M.",
theme="soft",
type="messages",
examples=["Hello!", "Tell me a fun fact", "How are you today?"],
)
if __name__ == "__main__":
demo.launch(share=True) |