Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,31 +1,38 @@
|
|
| 1 |
import os
|
| 2 |
-
from openai import OpenAI
|
| 3 |
import gradio as gr
|
|
|
|
| 4 |
|
| 5 |
-
# ---
|
| 6 |
-
# Hugging Face Space will automatically inject your token from Secrets
|
| 7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
| 10 |
client = OpenAI(
|
| 11 |
base_url="https://router.huggingface.co/v1",
|
| 12 |
api_key=HF_TOKEN,
|
| 13 |
)
|
| 14 |
|
| 15 |
-
# --- Chat
|
| 16 |
def chat_with_model(message, history):
|
| 17 |
-
# Convert history into OpenAI-
|
| 18 |
-
messages = [
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
messages.append({"role": "user", "content": message})
|
| 23 |
|
|
|
|
| 24 |
try:
|
| 25 |
response = client.chat.completions.create(
|
| 26 |
model="openai/gpt-oss-120b:fireworks-ai",
|
| 27 |
messages=messages,
|
| 28 |
-
max_tokens=500,
|
| 29 |
temperature=0.7,
|
| 30 |
)
|
| 31 |
reply = response.choices[0].message.content
|
|
@@ -34,16 +41,14 @@ def chat_with_model(message, history):
|
|
| 34 |
|
| 35 |
return reply
|
| 36 |
|
| 37 |
-
# --- Gradio
|
| 38 |
-
|
| 39 |
fn=chat_with_model,
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
textbox=gr.Textbox(placeholder="Ask me anything...", scale=7),
|
| 45 |
)
|
| 46 |
|
| 47 |
-
# --- Launch ---
|
| 48 |
if __name__ == "__main__":
|
| 49 |
-
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
+
from openai import OpenAI
|
| 4 |
|
| 5 |
+
# --- Initialize client securely ---
|
|
|
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
+
if not HF_TOKEN:
|
| 9 |
+
raise ValueError("❌ HF_TOKEN not found. Please set it in your Hugging Face Space secrets.")
|
| 10 |
+
|
| 11 |
client = OpenAI(
|
| 12 |
base_url="https://router.huggingface.co/v1",
|
| 13 |
api_key=HF_TOKEN,
|
| 14 |
)
|
| 15 |
|
| 16 |
+
# --- Chat handler ---
|
| 17 |
def chat_with_model(message, history):
|
| 18 |
+
# Convert message history into OpenAI-style messages
|
| 19 |
+
messages = []
|
| 20 |
+
|
| 21 |
+
if history:
|
| 22 |
+
for msg in history:
|
| 23 |
+
if isinstance(msg, dict): # New Gradio message format
|
| 24 |
+
messages.append(msg)
|
| 25 |
+
elif isinstance(msg, (list, tuple)) and len(msg) == 2: # Old format fallback
|
| 26 |
+
messages.append({"role": "user", "content": msg[0]})
|
| 27 |
+
messages.append({"role": "assistant", "content": msg[1]})
|
| 28 |
+
|
| 29 |
messages.append({"role": "user", "content": message})
|
| 30 |
|
| 31 |
+
# Query model
|
| 32 |
try:
|
| 33 |
response = client.chat.completions.create(
|
| 34 |
model="openai/gpt-oss-120b:fireworks-ai",
|
| 35 |
messages=messages,
|
|
|
|
| 36 |
temperature=0.7,
|
| 37 |
)
|
| 38 |
reply = response.choices[0].message.content
|
|
|
|
| 41 |
|
| 42 |
return reply
|
| 43 |
|
| 44 |
+
# --- Gradio Interface ---
|
| 45 |
+
chatbot_ui = gr.ChatInterface(
|
| 46 |
fn=chat_with_model,
|
| 47 |
+
title="🧠 GPT-OSS 120B (Fireworks)",
|
| 48 |
+
description="Chat with the OSS 120B model hosted via Hugging Face router.",
|
| 49 |
+
examples=["Hello!", "Tell me a joke.", "Explain quantum computing in simple terms."],
|
| 50 |
+
type="messages", # ✅ required to use new format
|
|
|
|
| 51 |
)
|
| 52 |
|
|
|
|
| 53 |
if __name__ == "__main__":
|
| 54 |
+
chatbot_ui.launch()
|