| import gradio as gr |
| from huggingface_hub import InferenceClient |
|
|
| |
| client = InferenceClient(model="openai/gpt-oss-20b") |
|
|
| def respond(message, history, system_message, max_tokens, temperature, top_p): |
| messages = [{"role": "system", "content": system_message}] |
| messages += history |
| messages.append({"role": "user", "content": message}) |
|
|
| response = "" |
| try: |
| for chunk in client.chat_completion( |
| messages=messages, |
| max_tokens=max_tokens, |
| stream=True, |
| temperature=temperature, |
| top_p=top_p, |
| ): |
| if hasattr(chunk.choices[0].delta, "content"): |
| token = chunk.choices[0].delta.content |
| response += token |
| yield response |
| except Exception as e: |
| yield f"⚠️ Error: {e}" |
|
|
| chatbot = gr.ChatInterface( |
| fn=respond, |
| additional_inputs=[ |
| gr.Textbox(value="You are a helpful assistant.", label="System Message"), |
| gr.Slider(minimum=64, maximum=2048, value=512, step=1, label="Max Tokens"), |
| gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"), |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"), |
| ], |
| title="🧠 CouncilShell Prototype", |
| description="Send a message and receive a streamed reply from the OSS 20B model — no login required.", |
| ) |
|
|
| if __name__ == "__main__": |
| chatbot.launch() |