Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| # Load API key from Hugging Face secret (set in the Secrets tab) | |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
| # Define the chat function | |
| def chat_with_gpt(message, history): | |
| messages = [] | |
| for human, assistant in history: | |
| messages.append({"role": "user", "content": human}) | |
| messages.append({"role": "assistant", "content": assistant}) | |
| messages.append({"role": "user", "content": message}) | |
| # Call OpenAI API | |
| response = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| max_tokens=150, | |
| temperature=0.7 | |
| ) | |
| # Return the assistant's reply | |
| return response.choices[0].message.content | |
| # Create Gradio interface | |
| demo = gr.ChatInterface( | |
| fn=chat_with_gpt, | |
| title="ChatGPT Demo on Hugging Face", | |
| examples=["What's the capital of Canada?", "Tell me a joke.", "Explain quantum computing."], | |
| cache_examples=False | |
| ) | |
| # Launch the app | |
| demo.launch() | |