File size: 2,775 Bytes
88f66c9
 
 
 
 
 
 
39cb513
88f66c9
 
 
4467054
88f66c9
2ce4467
88f66c9
 
 
 
 
 
 
39cb513
4467054
39cb513
88f66c9
39cb513
88f66c9
4467054
39cb513
4467054
 
 
 
 
 
39cb513
88f66c9
 
39cb513
88f66c9
 
 
 
 
 
 
 
 
 
 
39cb513
 
 
88f66c9
 
 
 
 
 
39cb513
88f66c9
 
 
 
 
 
 
 
 
 
39cb513
88f66c9
39cb513
 
88f66c9
 
39cb513
4467054
88f66c9
 
 
 
 
 
39cb513
88f66c9
 
 
 
 
39cb513
 
88f66c9
 
 
 
 
39cb513
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT

# Load API key from .env
if os.path.exists(".env"):
    load_dotenv(".env")

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

client = OpenAI(api_key=OPENAI_API_KEY)

def gpt_call(history, user_message,
             model="gpt-4o-mini",
             max_tokens=512,
             temperature=0.7,
             top_p=0.95):
    """
    Generate responses using OpenAI ChatCompletion API.
    - history: [(user_text, assistant_text), ...]
    - user_message: The latest user input.
    """
    # 1) Add system message
    messages = [{"role": "system", "content": MAIN_PROMPT}]
    
    # 2) Convert history into OpenAI message format
    for user_text, assistant_text in history:
        if user_text:
            messages.append({"role": "user", "content": user_text})
        if assistant_text:
            messages.append({"role": "assistant", "content": assistant_text})

    # 3) Add the latest user input
    messages.append({"role": "user", "content": user_message})

    # 4) OpenAI API call
    completion = client.chat.completions.create(
        model=model,
        messages=messages,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p
    )
    return completion.choices[0].message.content

def respond(user_message, history):
    """
    Handles user input and generates chatbot responses.
    - user_message: The latest user message.
    - history: A list of (user, assistant) message tuples.
    """
    if not user_message:
        return "", history

    assistant_reply = gpt_call(history, user_message)

    # Append new message pair to history
    history.append((user_message, assistant_reply))

    return "", history

##############################
#  Gradio Blocks UI
##############################
with gr.Blocks() as demo:
    gr.Markdown("## Simple Chat Interface")

    # Initialize Chatbot correctly (list of tuples, not dictionaries)
    chatbot = gr.Chatbot(
        value=[("", INITIAL_PROMPT)],  # Fixed format
        height=500
    )

    # Maintain chat history in state
    state_history = gr.State([("", INITIAL_PROMPT)])

    user_input = gr.Textbox(
        placeholder="Type your message here...",
        label="Your Input"
    )

    # Handle user input
    user_input.submit(
        respond,
        inputs=[user_input, state_history],
        outputs=[user_input, chatbot]
    ).then(
        fn=lambda h: h,  # Fix: Only update history, not chatbot
        inputs=[state_history],
        outputs=[state_history]
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)