import gradio as gr from huggingface_hub import InferenceClient from fastapi import FastAPI, Request from fastapi.responses import JSONResponse import os # Gradio app + FastAPI mount app = FastAPI() client = InferenceClient( model="HuggingFaceH4/zephyr-7b-beta", token=os.getenv("huggingface_token"), ) def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response # Define FastAPI POST endpoint @app.post("/chat") async def chat(request: Request): data = await request.json() message = data.get("message") persona = data.get("persona", "You are a friendly Chatbot.") max_tokens = data.get("max_tokens", 512) temperature = data.get("temperature", 0.7) top_p = data.get("top_p", 0.95) messages = [{"role": "system", "content": persona}, {"role": "user", "content": message}] full_response = "" for chunk in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): full_response += chunk.choices[0].delta.content or "" return JSONResponse({"response": full_response}) # Gradio demo for UI access demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], ) # Mount Gradio app at "/" app = gr.mount_gradio_app(app, demo, path="/")