File size: 1,218 Bytes
c557d0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import g4f
import gradio as gr

# ✅ Function to Get GPT-4 Response
def get_gpt4_response(user_message, history=[]):
    try:
        # Construct the conversation history for the model
        messages = []
        for user, assistant in history:
            messages.append({"role": "user", "content": user})
            messages.append({"role": "assistant", "content": assistant})

        # Add the new user message
        messages.append({"role": "user", "content": user_message})

        # Generate response from GPT-4
        response = g4f.ChatCompletion.create(
            model="gpt-4",
            messages=messages
        )
        
        return response  # ✅ Return only the response as a string (Gradio handles history)
    
    except Exception as e:
        print(f"Error occurred: {e}")  # Log the error
        return "⚠️ Sorry, there was an issue processing your request."

# ✅ Define the Gradio Interface (Hugging Face requires this)
demo = gr.ChatInterface(
    fn=get_gpt4_response,
    examples=["Hello!", "Tell me a joke!", "What is AI?"],
    cache_examples=False
)

# ✅ No need for `if __name__ == "__main__"` in Hugging Face
demo.launch()