Spaces:
Sleeping
Sleeping
File size: 1,676 Bytes
ae935a4 fc5b650 ae935a4 fc5b650 ae935a4 fc5b650 ae935a4 fc5b650 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
from huggingface_hub import InferenceClient
def respond(
message,
history: list[dict[str, str]],
system_message,
hf_token: gr.OAuthToken,
):
client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
messages = [{"role": "system", "content": system_message}]
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=512, # fixed
stream=True,
temperature=0.7, # fixed
top_p=0.95, # fixed
):
choices = message.choices
token = ""
if len(choices) and choices[0].delta.content:
token = choices[0].delta.content
response += token
yield response
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
],
)
with gr.Blocks(css="""
body {
margin: 0;
padding: 0;
font-family: system-ui, sans-serif;
}
.gradio-container {
height: 100vh;
width: 100%;
display: flex;
flex-direction: column;
}
.gr-blocks {
flex: 1;
display: flex;
flex-direction: column;
}
.gr-chatbot {
flex: 1;
overflow-y: auto;
max-height: calc(100vh - 120px);
}
@media (max-width: 768px) {
.gradio-container, .gr-blocks {
padding: 0;
margin: 0;
}
.gr-chatbot {
max-height: calc(100vh - 100px);
}
}
""") as demo:
with gr.Sidebar():
gr.LoginButton()
chatbot.render()
if __name__ == "__main__":
demo.launch() |