|
|
import openai |
|
|
import gradio as gr |
|
|
import os |
|
|
|
|
|
|
|
|
openai.api_key = os.environ["chat_key"] |
|
|
|
|
|
|
|
|
user_dialogue_histories = {} |
|
|
max_tokens_per_user = 2000 |
|
|
|
|
|
def get_total_tokens(dialogue_history): |
|
|
total_tokens = 0 |
|
|
for message in dialogue_history: |
|
|
total_tokens += len(message["content"]) |
|
|
return total_tokens |
|
|
|
|
|
def remove_earliest_messages(user_id, tokens_to_remove): |
|
|
while tokens_to_remove > 0 and user_dialogue_histories[user_id]: |
|
|
removed_message = user_dialogue_histories[user_id].pop(0) |
|
|
tokens_to_remove -= len(removed_message["content"]) |
|
|
|
|
|
def chat_with_chatgpt(user_id, user_message): |
|
|
|
|
|
if user_id not in user_dialogue_histories: |
|
|
user_dialogue_histories[user_id] = [] |
|
|
|
|
|
|
|
|
user_dialogue_histories[user_id].append({"role": "user", "content": user_message}) |
|
|
|
|
|
|
|
|
if get_total_tokens(user_dialogue_histories[user_id]) > max_tokens_per_user: |
|
|
remove_earliest_messages(user_id, get_total_tokens(user_dialogue_histories[user_id]) - max_tokens_per_user) |
|
|
|
|
|
|
|
|
response = openai.Completion.create( |
|
|
engine="gpt-3.5-turbo", |
|
|
prompt=[{"role": "system", "content": "You are a helpful assistant."}] + user_dialogue_histories[user_id], |
|
|
max_tokens=150, |
|
|
n=1, |
|
|
stop=None, |
|
|
temperature=0.5, |
|
|
) |
|
|
|
|
|
chatgpt_response = response.choices[0].text.strip() |
|
|
|
|
|
|
|
|
user_dialogue_histories[user_id].append({"role": "assistant", "content": chatgpt_response}) |
|
|
|
|
|
|
|
|
if get_total_tokens(user_dialogue_histories[user_id]) > max_tokens_per_user: |
|
|
remove_earliest_messages(user_id, get_total_tokens(user_dialogue_histories[user_id]) - max_tokens_per_user) |
|
|
|
|
|
return chatgpt_response |
|
|
|
|
|
|
|
|
def gradio_interface(user_id, user_message): |
|
|
response = chat_with_chatgpt(user_id, user_message) |
|
|
return response |
|
|
|
|
|
inputs = [ |
|
|
gr.inputs.Textbox(label="User ID", placeholder="Enter user ID here"), |
|
|
gr.inputs.Textbox(label="Message", placeholder="Enter your message here"), |
|
|
] |
|
|
|
|
|
output = gr.outputs.Textbox(label="ChatGPT Response") |
|
|
|
|
|
gr.Interface(fn=gradio_interface, inputs=inputs, outputs=output, title="Chat with ChatGPT").launch() |
|
|
|