Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| # Read your key from env (set this in HF Spaces -> Settings -> Secrets) | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| if not OPENAI_API_KEY: | |
| raise RuntimeError("Missing OPENAI_API_KEY environment variable.") | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| DEFAULT_SYS_PROMPT = "You are a helpful assistant. Be concise and accurate." | |
| def chat_fn(message, history, system_prompt, temperature, model_name): | |
| """ | |
| message: latest user text (str) | |
| history: list[dict] like [{'role':'user'|'assistant','content': '...'}, ...] | |
| system_prompt: system instructions (str) | |
| temperature: float | |
| model_name: str (e.g., 'gpt-4o-mini' or 'gpt-5') | |
| """ | |
| # Build messages for the Chat Completions API | |
| messages = [] | |
| if system_prompt and system_prompt.strip(): | |
| messages.append({"role": "system", "content": system_prompt.strip()}) | |
| # Append prior turns | |
| messages.extend(history or []) | |
| # Add the latest user turn | |
| messages.append({"role": "user", "content": message}) | |
| # Stream tokens and yield partials for Gradio | |
| completion = client.chat.completions.create( | |
| model=model_name, | |
| messages=messages, | |
| temperature=float(temperature), | |
| stream=True, | |
| ) | |
| partial = "" | |
| for chunk in completion: | |
| delta = chunk.choices[0].delta | |
| if delta and delta.content: | |
| partial += delta.content | |
| yield partial | |
| with gr.Blocks(title="Healthelic") as demo: | |
| gr.Markdown("## Healthelic Chat\nA lightweight, streaming chat app.") | |
| with gr.Row(): | |
| system_prompt = gr.Textbox( | |
| value=DEFAULT_SYS_PROMPT, | |
| label="System prompt", | |
| lines=2 | |
| ) | |
| with gr.Row(): | |
| model_name = gr.Dropdown( | |
| choices=["gpt-4o-mini", "gpt-4o", "gpt-5-chat-latest"], | |
| value="gpt-5-chat-latest", | |
| label="Model" | |
| ) | |
| temperature = gr.Slider(0.0, 1.2, value=0.7, step=0.1, label="Temperature") | |
| chat = gr.ChatInterface( | |
| fn=chat_fn, | |
| type="messages", | |
| additional_inputs=[system_prompt, temperature, model_name], | |
| textbox=gr.Textbox(placeholder="Ask me anything…"), | |
| examples=[ | |
| ["Explain transformers in one paragraph", DEFAULT_SYS_PROMPT, 0.7, "gpt-4o-mini"], | |
| ["Write a dad joke about databases", DEFAULT_SYS_PROMPT, 0.7, "gpt-4o-mini"] | |
| ], | |
| cache_examples=False, | |
| save_history=True, | |
| flagging_mode="manual", | |
| flagging_options=["👍 Useful", "👎 Not good", "⚠ Inaccurate"], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) | |