Spaces:
Sleeping
Sleeping
File size: 1,386 Bytes
cfbebb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
import gradio as gr
from openai import OpenAI
# ---- Configuration ----
HF_API_KEY = os.getenv("HF_TOKEN")
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=HF_API_KEY
)
# ---- Chat function ----
def chat_with_model(user_message, history):
if history is None:
history = []
messages = [{"role": "system", "content": "You are a helpful assistant."}]
for human, bot in history:
messages.append({"role": "user", "content": human})
messages.append({"role": "assistant", "content": bot})
messages.append({"role": "user", "content": user_message})
try:
completion = client.chat.completions.create(
model="openai/gpt-oss-20b:nebius",
messages=messages
)
assistant_reply = completion.choices[0].message["content"]
history.append((user_message, assistant_reply))
return assistant_reply, history
except Exception as e:
return f"Error: {str(e)}", history
# ---- Gradio UI ----
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Chatbot using HuggingFace Router (OpenAI API Compatible)")
chatbot = gr.Chatbot(height=450)
text_input = gr.Textbox(label="Your message")
text_input.submit(chat_with_model, [text_input, chatbot], [chatbot])
text_input.submit(lambda: "", None, text_input) # clear input
demo.launch()
|