Spaces:
Running
Running
| import g4f | |
| import gradio as gr | |
| # ✅ Function to Get GPT-4 Response | |
| def get_gpt4_response(user_message, history=[]): | |
| try: | |
| # Construct the conversation history for the model | |
| messages = [] | |
| for user, assistant in history: | |
| messages.append({"role": "user", "content": user}) | |
| messages.append({"role": "assistant", "content": assistant}) | |
| # Add the new user message | |
| messages.append({"role": "user", "content": user_message}) | |
| # Generate response from GPT-4 | |
| response = g4f.ChatCompletion.create( | |
| model="gpt-4", | |
| messages=messages | |
| ) | |
| return response # ✅ Return only the response as a string (Gradio handles history) | |
| except Exception as e: | |
| print(f"Error occurred: {e}") # Log the error | |
| return "⚠️ Sorry, there was an issue processing your request." | |
| # ✅ Define the Gradio Interface (Hugging Face requires this) | |
| demo = gr.ChatInterface( | |
| fn=get_gpt4_response, | |
| examples=["Hello!", "Tell me a joke!", "What is AI?"], | |
| cache_examples=False | |
| ) | |
| # ✅ No need for `if __name__ == "__main__"` in Hugging Face | |
| demo.launch() | |