Update app.py
Browse files
app.py
CHANGED
|
@@ -1,89 +1,37 @@
|
|
| 1 |
import openai
|
| 2 |
-
import os
|
| 3 |
-
|
| 4 |
-
|
| 5 |
import gradio as gr
|
| 6 |
-
import openai
|
| 7 |
-
import backoff # for exponential backoff
|
| 8 |
-
from reportlab.lib.pagesizes import letter
|
| 9 |
-
from reportlab.lib import colors
|
| 10 |
-
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
|
| 11 |
-
from reportlab.lib.styles import getSampleStyleSheet
|
| 12 |
-
from reportlab.lib.enums import TA_CENTER
|
| 13 |
-
|
| 14 |
-
openai.api_key = os.environ['chat_key']
|
| 15 |
-
|
| 16 |
-
class SessionManager:
|
| 17 |
-
def __init__(self):
|
| 18 |
-
self.sessions = {}
|
| 19 |
-
|
| 20 |
-
def add_message(self, session_id, message):
|
| 21 |
-
if session_id not in self.sessions:
|
| 22 |
-
self.sessions[session_id] = []
|
| 23 |
-
self.sessions[session_id].append(message)
|
| 24 |
-
|
| 25 |
-
def get_messages(self, session_id):
|
| 26 |
-
return self.sessions.get(session_id, [])
|
| 27 |
-
|
| 28 |
-
# 自定义一个函数,用于将对话消息整理为简短概要
|
| 29 |
-
def summarize_message(message):
|
| 30 |
-
# 在这个示例中,我们简单地返回原始消息。
|
| 31 |
-
# 你可以根据需要替换为更复杂的逻辑,以提取关键信息并生成简短概要。
|
| 32 |
-
return message
|
| 33 |
-
|
| 34 |
-
session_manager = SessionManager()
|
| 35 |
-
def chat_gpt(session_id, user_message, model="gpt-3.5-turbo", max_tokens=4096):
|
| 36 |
-
# 将用户消息添加到对应的会话中
|
| 37 |
-
summarized_message = summarize_message(f"User: {user_message}")
|
| 38 |
-
session_manager.add_message(session_id, summarized_message)
|
| 39 |
-
|
| 40 |
-
# 获取当前会话的消息列表
|
| 41 |
-
message_list = session_manager.get_messages(session_id)
|
| 42 |
-
|
| 43 |
-
# 组合聊天历史
|
| 44 |
-
conversation = "\n\n".join(message_list)
|
| 45 |
-
|
| 46 |
-
# 检查对话历史是否超过模型的token限制
|
| 47 |
-
tokens_length = len(conversation.split())
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
while tokens_length > max_tokens:
|
| 52 |
-
message_list.pop(0)
|
| 53 |
-
conversation = "\n\n".join(message_list)
|
| 54 |
-
tokens_length = len(conversation.split())
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
messages=[{"role": "system", "content": "你好,请问有什么问题我可以帮您解答吗?"}] +
|
| 60 |
-
[{"role": "user", "content": msg} for msg in message_list],
|
| 61 |
-
# system 中 定义回答问题的具体类型等
|
| 62 |
-
temperature=0.5,
|
| 63 |
-
max_tokens=150,
|
| 64 |
-
top_p=1,
|
| 65 |
-
frequency_penalty=0,
|
| 66 |
-
presence_penalty=0,
|
| 67 |
-
stop=["\n\n"],
|
| 68 |
-
)
|
| 69 |
-
assistant_response = response.choices[0].message.content
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
|
|
|
|
|
|
| 74 |
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
| 84 |
|
| 85 |
-
|
| 86 |
-
output_text = gr.outputs.Textbox()
|
| 87 |
-
iface = gr.Interface(fn=gradio_chat_gpt, inputs=input_text, outputs=output_text, title="ChatGPT", description="与GPT模型聊天")
|
| 88 |
|
| 89 |
-
|
|
|
|
| 1 |
import openai
|
|
|
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
# 设置您的API密钥
|
| 5 |
+
openai.api_key = "your_openai_api_key_here"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
# 存储用户对话历史的字典
|
| 8 |
+
user_dialogue_histories = {}
|
| 9 |
+
max_tokens_per_user = 1000 # 您可以根据需要设置此值
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
def get_total_tokens(dialogue_history):
|
| 12 |
+
total_tokens = 0
|
| 13 |
+
for message in dialogue_history:
|
| 14 |
+
total_tokens += len(message["content"])
|
| 15 |
+
return total_tokens
|
| 16 |
|
| 17 |
+
def remove_earliest_messages(user_id, tokens_to_remove):
|
| 18 |
+
while tokens_to_remove > 0 and user_dialogue_histories[user_id]:
|
| 19 |
+
removed_message = user_dialogue_histories[user_id].pop(0)
|
| 20 |
+
tokens_to_remove -= len(removed_message["content"])
|
| 21 |
|
| 22 |
+
def chat_with_chatgpt(user_id, user_message):
|
| 23 |
+
# ... 保持与之前相同的代码 ...
|
| 24 |
|
| 25 |
+
# Gradio界面
|
| 26 |
+
def gradio_interface(user_id, user_message):
|
| 27 |
+
response = chat_with_chatgpt(user_id, user_message)
|
| 28 |
+
return response
|
| 29 |
|
| 30 |
+
inputs = [
|
| 31 |
+
gr.inputs.Textbox(label="User ID", placeholder="Enter user ID here"),
|
| 32 |
+
gr.inputs.Textbox(label="Message", placeholder="Enter your message here"),
|
| 33 |
+
]
|
| 34 |
|
| 35 |
+
output = gr.outputs.Textbox(label="ChatGPT Response")
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
gr.Interface(fn=gradio_interface, inputs=inputs, outputs=output, title="Chat with ChatGPT").launch()
|