Spaces:
Sleeping
Sleeping
File size: 1,421 Bytes
801aeee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | import gradio as gr
from transformers import pipeline
# Load the text-generation pipeline
pipe = pipeline(
"text-generation",
model="ibm-granite/granite-3.3-2b-instruct",
)
def chat(user_input, history):
# Convert chat history to Granite-compatible message format
messages = []
for user_msg, bot_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": user_input})
# Generate response
output = pipe(
messages,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_p=0.9,
)
# Extract assistant reply
assistant_reply = output[0]["generated_text"][-1]["content"]
return assistant_reply
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# 🤖 IBM Granite 3.3 Chatbot")
gr.Markdown("Powered by **ibm-granite/granite-3.3-2b-instruct**")
chatbot = gr.Chatbot()
msg = gr.Textbox(
placeholder="Ask me anything...",
label="Your message"
)
clear = gr.Button("Clear")
def respond(message, chat_history):
bot_message = chat(message, chat_history)
chat_history.append((message, bot_message))
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: [], None, chatbot)
demo.launch()
|