AIChat / appByCompletion.py
glt3953's picture
Add File
4d9c18e
#安装一下 Python 的 Gradio 的包:conda install -c conda-forge gradio
#依赖tiktoken:pip install tiktoken,使用不同的 GPT 模型,对应着不同的 Tiktoken 的编码器模型:https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb。
import gradio as gr
import openai
import os
#import tiktoken
openai.api_key = os.environ.get("OPENAI_API_KEY")
#encoding = tiktoken.get_encoding("p50k_base")
class Conversation:
def __init__(self, prompt, num_of_round):
self.prompt = prompt
self.num_of_round = num_of_round
self.messages = []
self.messages.append(self.prompt)
def ask(self, question):
#if len(encoding.encode(question)) > 512:
if len(question) > 512:
return "输入的文字过长,请精简后再提问"
try:
self.messages.append(question)
response = openai.Completion.create(
engine="text-davinci-003",
prompt=question,
max_tokens=3580,
n=1,
stop=None,
temperature=0.5,
)
except Exception as e:
print(e)
return e
message = response.choices[0].text.strip()
if len(self.messages) > self.num_of_round*2 + 1:
del self.messages[1:3] #Remove the first round conversation left.
return message
prompt = """你是一个聊天机器人,请尽可能回答用户的所有问题"""
conv = Conversation(prompt, 10)
def answer(question, history=[]):
history.append(question)
response = conv.ask(question)
history.append(response)
responses = [(u,b) for u,b in zip(history[::2], history[1::2])]
return responses, history
with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo:
chatbot = gr.Chatbot(elem_id="chatbot")
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
txt.submit(answer, [txt, state], [chatbot, state])
demo.launch()