|
|
from pickle import NONE |
|
|
import numpy as np |
|
|
import cv2 |
|
|
import urllib.request |
|
|
import openai |
|
|
import gradio as gr |
|
|
import random |
|
|
import poe |
|
|
|
|
|
client = None |
|
|
user_contexts = {} |
|
|
|
|
|
def get_assistant_response(user_question, context, model_name): |
|
|
global client, models |
|
|
context.append({"role": "user", "content": user_question}) |
|
|
prompt = '' |
|
|
for item in context[3:]: |
|
|
prompt += item["role"] + ": " + item["content"] + "\n" |
|
|
for chunk in client.send_message(models[model_name], prompt): |
|
|
pass |
|
|
|
|
|
assistant_response = chunk["text"] |
|
|
context.append({"role": "assistant", "content": assistant_response}) |
|
|
client.send_chat_break(models[model_name]) |
|
|
return assistant_response |
|
|
|
|
|
def generate_image_url(prompt): |
|
|
response = openai.Image.create( |
|
|
prompt=prompt, |
|
|
n=1, |
|
|
size="512x512", |
|
|
) |
|
|
image_url = response["data"][0]["url"] |
|
|
return image_url |
|
|
|
|
|
def greet(user_id, api_key, user_question, clear_history, model_name): |
|
|
global client |
|
|
if len(api_key)>5: |
|
|
client = poe.Client(api_key) |
|
|
global user_contexts |
|
|
if user_id not in user_contexts: |
|
|
user_contexts[user_id] = [ |
|
|
{"role": "system", "content": "你是一个聪明的AI助手。请参考对话记录,回答用户的最后一个问题,无需做多余的解释,更不要强调对话历史的事情"}, |
|
|
{"role": "user", "content": "你会说中文吗?"}, |
|
|
{"role": "assistant", "content": "是的,我可以说中文。"} |
|
|
] |
|
|
|
|
|
context = user_contexts[user_id] |
|
|
|
|
|
if clear_history: |
|
|
context = [ |
|
|
{"role": "system", "content": "你是一个聪明的AI助手。请参考对话记录,回答用户的最后一个问题,无需做多余的解释,更不要强调对话历史的事情"}, |
|
|
{"role": "user", "content": "你会说中文吗?"}, |
|
|
{"role": "assistant", "content": "是的,我可以说中文。"} |
|
|
] |
|
|
user_contexts[user_id] = context |
|
|
return '清空成功', '保持聊天记录', np.ones((5,5)) |
|
|
else: |
|
|
|
|
|
if user_question.startswith("生成图片:") or user_question.startswith("生成图片:"): |
|
|
image_prompt = user_question[5:] |
|
|
image_url = generate_image_url(image_prompt) |
|
|
resp = urllib.request.urlopen(image_url) |
|
|
image = np.asarray(bytearray(resp.read()), dtype="uint8") |
|
|
image = cv2.imdecode(image, cv2.IMREAD_COLOR) |
|
|
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
return '', '图片已生成', image |
|
|
get_assistant_response(user_question, context, model_name) |
|
|
prompt = "" |
|
|
|
|
|
for item in context[3:]: |
|
|
prompt += item["role"] + ": " + item["content"] + "\n" |
|
|
return '', prompt, np.ones((5,5)) |
|
|
|
|
|
models = { |
|
|
"GPT-4": "beaver", |
|
|
"Claude-instant-100k": "a2_100k", |
|
|
"Claude+": "a2_2", |
|
|
"ChatGPT": "chinchilla", |
|
|
"Claude-instant": "a2", |
|
|
} |
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=greet, |
|
|
inputs=[ |
|
|
gr.Textbox(lines=1, label='请输入用户ID', placeholder='请输入用户ID'), |
|
|
gr.Textbox(lines=1, label='请输入你的专属密钥', placeholder='请输入你的专属密钥'), |
|
|
gr.Textbox(lines=15, label='请输入问题', placeholder='请输入您的问题'), |
|
|
gr.Checkbox(label='清空聊天记录', default=False), |
|
|
gr.Radio(choices=list(models.keys()), label="选择模型") |
|
|
], |
|
|
outputs=[ |
|
|
gr.Textbox(lines=1, label='聊天记录状态', placeholder='等待清空聊天记录'), |
|
|
gr.Textbox(lines=25, label='AI回答', placeholder='等待AI回答') |
|
|
], |
|
|
title="ChatALL", |
|
|
description=""" |
|
|
1.使用说明: |
|
|
请输入您的问题,AI助手会给出回答。 |
|
|
支持连续对话,可以记录对话历史。 |
|
|
重新开始对话勾选清空聊天记录,输出清空成功表示重新开启对话。 |
|
|
2.特别警告: |
|
|
为了防止用户数据混乱,请自定义用户ID。 |
|
|
理论上如果被别人知道自己的ID,那么别人可以查看自己的历史对话,对此你可以选择在对话结束后清除对话记录。 |
|
|
3.作者的GPT4网页导航网站链接如下:http://aust001.pythonanywhere.com/ -> 专属密钥进群获取 |
|
|
""" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |