| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| # Create client using key from Hugging Face → Settings → Variables | |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
| def chat_with_openai(messages, history): | |
| """ | |
| messages: list of dicts [{"role": "user"/"assistant"/"system", "content": "..."}] | |
| history: ignored; Gradio passes messages now. | |
| """ | |
| # prepend system prompt | |
| context = [{"role": "system", "content": "You are Neuro, a concise helpful AI assistant."}] | |
| context.extend(messages) | |
| response = client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=context, | |
| ) | |
| reply = response.choices[0].message.content | |
| # Append assistant message for the next turn | |
| messages.append({"role": "assistant", "content": reply}) | |
| return messages | |
| demo = gr.ChatInterface( | |
| fn=chat_with_openai, | |
| type="messages", # REQUIRED to use new message schema | |
| title="🧠 Neuro Chat Demo", | |
| description="OpenAI-powered chat demo using the new Gradio message API." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |