XingHe0127 commited on
Commit
fefce3f
·
1 Parent(s): 1473e9c

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitignore +137 -0
  2. Chatbot.py +55 -42
  3. presets.py +12 -7
  4. utils.py +173 -125
.gitignore ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+ history/
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ .python-version
87
+
88
+ # pipenv
89
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
90
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
91
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
92
+ # install all needed dependencies.
93
+ #Pipfile.lock
94
+
95
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
96
+ __pypackages__/
97
+
98
+ # Celery stuff
99
+ celerybeat-schedule
100
+ celerybeat.pid
101
+
102
+ # SageMath parsed files
103
+ *.sage.py
104
+
105
+ # Environments
106
+ .env
107
+ .venv
108
+ env/
109
+ venv/
110
+ ENV/
111
+ env.bak/
112
+ venv.bak/
113
+
114
+ # Spyder project settings
115
+ .spyderproject
116
+ .spyproject
117
+
118
+ # Rope project settings
119
+ .ropeproject
120
+
121
+ # mkdocs documentation
122
+ /site
123
+
124
+ # mypy
125
+ .mypy_cache/
126
+ .dmypy.json
127
+ dmypy.json
128
+
129
+ # Pyre type checker
130
+ .pyre/
131
+
132
+ # Mac system file
133
+ **/.DS_Store
134
+
135
+ api_key.txt
136
+
137
+ auth.json
Chatbot.py CHANGED
@@ -2,13 +2,12 @@ import gradio as gr
2
  # import openai
3
  import os
4
  import sys
 
5
  from utils import *
6
  from presets import *
7
 
8
- my_api_key = os.getenv("OpenAI-API") # 在这里输入你的 API 密钥
9
- HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
10
 
11
- gr.Chatbot.postprocess = postprocess
12
 
13
  #if we are running in Docker
14
  if os.environ.get('dockerrun') == 'yes':
@@ -40,12 +39,17 @@ else:
40
  if username != "" and password != "":
41
  authflag = True
42
 
 
 
43
  with gr.Blocks(css=customCSS) as demo:
44
  gr.HTML(title)
45
- keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",
46
- value=my_api_key, label="API Key", type="password", visible=not HIDE_MY_KEY).style(container=True)
 
 
47
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
48
  history = gr.State([])
 
49
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
50
  TRUECOMSTANT = gr.State(True)
51
  FALSECONSTANT = gr.State(False)
@@ -53,42 +57,42 @@ with gr.Blocks(css=customCSS) as demo:
53
 
54
  with gr.Row():
55
  with gr.Column(scale=12):
56
- txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(
57
  container=False)
58
  with gr.Column(min_width=50, scale=1):
59
  submitBtn = gr.Button("🚀", variant="primary")
60
  with gr.Row():
61
  emptyBtn = gr.Button("🧹 新的对话")
62
  retryBtn = gr.Button("🔄 重新生成")
63
- delLastBtn = gr.Button("🗑️ 删除上条对话")
64
  reduceTokenBtn = gr.Button("♻️ 总结对话")
65
- statusDisplay = gr.Markdown("status: ready")
66
  systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
67
  label="System prompt", value=initial_prompt).style(container=True)
68
  with gr.Accordion(label="加载Prompt模板", open=False):
69
  with gr.Column():
70
  with gr.Row():
71
  with gr.Column(scale=6):
72
- templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件(.csv)", choices=get_template_names(plain=True), multiselect=False)
73
  with gr.Column(scale=1):
74
  templateRefreshBtn = gr.Button("🔄 刷新")
75
  templaeFileReadBtn = gr.Button("📂 读入模板")
76
  with gr.Row():
77
  with gr.Column(scale=6):
78
- templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False)
79
  with gr.Column(scale=1):
80
  templateApplyBtn = gr.Button("⬇️ 应用")
81
- with gr.Accordion(label="保存/加载对话历史记录(在文本框中输入文件名,点击“保存对话”按钮)", open=False):
82
  with gr.Column():
83
  with gr.Row():
84
  with gr.Column(scale=6):
85
  saveFileName = gr.Textbox(
86
  show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
87
  with gr.Column(scale=1):
88
- saveBtn = gr.Button("💾 保存对话")
89
  with gr.Row():
90
  with gr.Column(scale=6):
91
- historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False)
92
  with gr.Column(scale=1):
93
  historyRefreshBtn = gr.Button("🔄 刷新")
94
  historyReadBtn = gr.Button("📂 读入对话")
@@ -103,44 +107,53 @@ with gr.Blocks(css=customCSS) as demo:
103
  gr.Markdown(description)
104
 
105
 
106
- txt.submit(predict, [txt, top_p, temperature, keyTxt,
107
- chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
108
- txt.submit(reset_textbox, [], [txt])
109
- submitBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot,
110
- history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
111
- submitBtn.click(reset_textbox, [], [txt])
112
- emptyBtn.click(reset_state, outputs=[chatbot, history])
113
- retryBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
114
- systemPromptTxt, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
115
- delLastBtn.click(delete_last_conversation, [chatbot, history], [
116
- chatbot, history], show_progress=True)
117
- reduceTokenBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
118
- systemPromptTxt, FALSECONSTANT, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
119
- saveBtn.click(save_chat_history, [
 
 
120
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
121
- saveBtn.click(get_history_names, None, [historyFileSelectDropdown])
 
 
122
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
123
- historyReadBtn.click(load_chat_history, [historyFileSelectDropdown], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
 
 
124
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
 
125
  templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
126
- templateApplyBtn.click(lambda x, y: x[y], [promptTemplates, templateSelectDropdown], [systemPromptTxt], show_progress=True)
127
 
128
- print("Chatbot已成功启动!")
 
 
129
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
130
  demo.title = "Chatbot"
131
 
132
- #if running in Docker
133
- if dockerflag:
134
- if authflag:
135
- demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
136
- else:
137
- demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
138
- #if not running in Docker
139
- else:
140
- if authflag:
141
- demo.queue().launch(share=False, auth=(username, password))
142
  else:
143
- #demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
 
 
 
144
  #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
145
  demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(os.environ["username"], os.environ["password"])) # 可设置用户名与密码
146
  #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
 
2
  # import openai
3
  import os
4
  import sys
5
+ import argparse
6
  from utils import *
7
  from presets import *
8
 
 
 
9
 
10
+ my_api_key = os.getenv("OpenAI-API") # 在这里输入你的 API 密钥
11
 
12
  #if we are running in Docker
13
  if os.environ.get('dockerrun') == 'yes':
 
39
  if username != "" and password != "":
40
  authflag = True
41
 
42
+ gr.Chatbot.postprocess = postprocess
43
+
44
  with gr.Blocks(css=customCSS) as demo:
45
  gr.HTML(title)
46
+ with gr.Row():
47
+ keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...",
48
+ value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True)
49
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
50
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
51
  history = gr.State([])
52
+ token_count = gr.State([])
53
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
54
  TRUECOMSTANT = gr.State(True)
55
  FALSECONSTANT = gr.State(False)
 
57
 
58
  with gr.Row():
59
  with gr.Column(scale=12):
60
+ user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
61
  container=False)
62
  with gr.Column(min_width=50, scale=1):
63
  submitBtn = gr.Button("🚀", variant="primary")
64
  with gr.Row():
65
  emptyBtn = gr.Button("🧹 新的对话")
66
  retryBtn = gr.Button("🔄 重新生成")
67
+ delLastBtn = gr.Button("🗑️ 删除最近一条对话")
68
  reduceTokenBtn = gr.Button("♻️ 总结对话")
69
+ status_display = gr.Markdown("status: ready")
70
  systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
71
  label="System prompt", value=initial_prompt).style(container=True)
72
  with gr.Accordion(label="加载Prompt模板", open=False):
73
  with gr.Column():
74
  with gr.Row():
75
  with gr.Column(scale=6):
76
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0])
77
  with gr.Column(scale=1):
78
  templateRefreshBtn = gr.Button("🔄 刷新")
79
  templaeFileReadBtn = gr.Button("📂 读入模板")
80
  with gr.Row():
81
  with gr.Column(scale=6):
82
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0])
83
  with gr.Column(scale=1):
84
  templateApplyBtn = gr.Button("⬇️ 应用")
85
+ with gr.Accordion(label="保存/加载对话历史记录", open=False):
86
  with gr.Column():
87
  with gr.Row():
88
  with gr.Column(scale=6):
89
  saveFileName = gr.Textbox(
90
  show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
91
  with gr.Column(scale=1):
92
+ saveHistoryBtn = gr.Button("💾 保存对话")
93
  with gr.Row():
94
  with gr.Column(scale=6):
95
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
96
  with gr.Column(scale=1):
97
  historyRefreshBtn = gr.Button("🔄 刷新")
98
  historyReadBtn = gr.Button("📂 读入对话")
 
107
  gr.Markdown(description)
108
 
109
 
110
+ user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
111
+ user_input.submit(reset_textbox, [], [user_input])
112
+
113
+ submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
114
+ submitBtn.click(reset_textbox, [], [user_input])
115
+
116
+ emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
117
+
118
+ retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
119
+
120
+ delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [
121
+ chatbot, history, token_count, status_display], show_progress=True)
122
+
123
+ reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
124
+
125
+ saveHistoryBtn.click(save_chat_history, [
126
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
127
+
128
+ saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
129
+
130
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
131
+
132
+ historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
133
+
134
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
135
+
136
  templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
 
137
 
138
+ templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
139
+
140
+ print("Chatbot启动成功!")
141
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
142
  demo.title = "Chatbot"
143
 
144
+ if __name__ == "__main__":
145
+ #if running in Docker
146
+ if dockerflag:
147
+ if authflag:
148
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
149
+ else:
150
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
151
+ #if not running in Docker
 
 
152
  else:
153
+ if authflag:
154
+ demo.queue().launch(share=False, auth=(username, password))
155
+ else:
156
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
157
  #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
158
  demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(os.environ["username"], os.environ["password"])) # 可设置用户名与密码
159
  #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
presets.py CHANGED
@@ -16,20 +16,25 @@ code {
16
  padding: .2em .4em .1em .4em;
17
  background-color: rgba(175,184,193,0.2);
18
  }
19
- pre {
20
  display: block;
21
  white-space: pre;
22
  background-color: hsla(0, 0%, 0%, 72%);
23
  border: solid 5px var(--color-border-primary) !important;
24
- border-radius: 8px;
25
  padding: 0 1.2rem 1.2rem;
26
  margin-top: 1em !important;
27
  color: #FFF;
28
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
29
  }
30
- pre code, pre code code {
31
- background-color: transparent !important;
32
- margin: 0;
33
- padding: 0;
34
- }
35
  """
 
 
 
 
 
 
 
 
 
 
 
16
  padding: .2em .4em .1em .4em;
17
  background-color: rgba(175,184,193,0.2);
18
  }
19
+ pre code {
20
  display: block;
21
  white-space: pre;
22
  background-color: hsla(0, 0%, 0%, 72%);
23
  border: solid 5px var(--color-border-primary) !important;
24
+ border-radius: 10px;
25
  padding: 0 1.2rem 1.2rem;
26
  margin-top: 1em !important;
27
  color: #FFF;
28
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
29
  }
 
 
 
 
 
30
  """
31
+
32
+ standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
33
+ error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
34
+ summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
35
+ max_token_streaming = 4096 # 流式对话时的最大 token 数
36
+ timeout_streaming = 5 # 流式对话时的超时时间
37
+ max_token_all = 4096 # 非流式对话时的最大 token 数
38
+ timeout_all = 200 # 非流式对话时的超时时间
39
+ enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
40
+ HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
utils.py CHANGED
@@ -13,6 +13,8 @@ import requests
13
  # import markdown
14
  import csv
15
  import mdtex2html
 
 
16
 
17
  if TYPE_CHECKING:
18
  from typing import TypedDict
@@ -41,7 +43,7 @@ def postprocess(
41
  y[i] = (
42
  # None if message is None else markdown.markdown(message),
43
  # None if response is None else markdown.markdown(response),
44
- None if message is None else mdtex2html.convert(message),
45
  None if response is None else mdtex2html.convert(response),
46
  )
47
  return y
@@ -50,7 +52,6 @@ def parse_text(text):
50
  lines = text.split("\n")
51
  lines = [line for line in lines if line != ""]
52
  count = 0
53
- firstline = False
54
  for i, line in enumerate(lines):
55
  if "```" in line:
56
  count += 1
@@ -78,54 +79,33 @@ def parse_text(text):
78
  text = "".join(lines)
79
  return text
80
 
81
- def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, retry_on_crash = False, stream = True): # repetition_penalty, top_k
 
82
 
83
- if retry_on_crash:
84
- retry = True
85
 
 
 
 
 
 
 
 
 
 
 
 
86
  headers = {
87
  "Content-Type": "application/json",
88
  "Authorization": f"Bearer {openai_api_key}"
89
  }
90
 
91
- chat_counter = len(history) // 2
92
-
93
- print(f"chat_counter - {chat_counter}")
94
-
95
- messages = []
96
- if chat_counter:
97
- for index in range(0, 2*chat_counter, 2):
98
- temp1 = {}
99
- temp1["role"] = "user"
100
- temp1["content"] = history[index]
101
- temp2 = {}
102
- temp2["role"] = "assistant"
103
- temp2["content"] = history[index+1]
104
- if temp1["content"] != "":
105
- if temp2["content"] != "" or retry:
106
- messages.append(temp1)
107
- messages.append(temp2)
108
- else:
109
- messages[-1]['content'] = temp2['content']
110
- if retry and chat_counter:
111
- if retry_on_crash:
112
- messages = messages[-6:]
113
- messages.pop()
114
- elif summary:
115
- history = [*[i["content"] for i in messages[-2:]], "我们刚刚聊了什么?"]
116
- messages.append(compose_user(
117
- "请帮我总结一下上述对话的内容,实现减少字数的同时,保证对话的质量。在总结中不要加入这一句话。"))
118
- else:
119
- temp3 = {}
120
- temp3["role"] = "user"
121
- temp3["content"] = inputs
122
- messages.append(temp3)
123
- chat_counter += 1
124
- messages = [compose_system(system_prompt), *messages]
125
- # messages
126
  payload = {
127
  "model": "gpt-3.5-turbo",
128
- "messages": messages, # [{"role": "user", "content": f"{inputs}"}],
129
  "temperature": temperature, # 1.0,
130
  "top_p": top_p, # 1.0,
131
  "n": 1,
@@ -133,91 +113,129 @@ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[],
133
  "presence_penalty": 0,
134
  "frequency_penalty": 0,
135
  }
136
-
137
- if not summary:
138
- history.append(inputs)
139
  else:
140
- print("精简中...")
 
 
141
 
142
- print(f"payload: {payload}")
143
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
 
 
 
 
 
 
144
  try:
145
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
146
- except:
147
- history.append("")
148
- chatbot.append(inputs, "")
149
- yield history, chatbot, f"出现了网络错误"
150
  return
151
 
152
- token_counter = 0
153
- partial_words = ""
154
 
155
- counter = 0
156
- if stream:
157
- chatbot.append((parse_text(history[-1]), ""))
158
- for chunk in response.iter_lines():
159
- if counter == 0:
160
- counter += 1
161
- continue
162
  counter += 1
163
- # check whether each line is non-empty
164
- if chunk:
165
- # decode each line as response data is in bytes
166
- try:
167
- if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
168
- chunkjson = json.loads(chunk.decode()[6:])
169
- status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
170
- yield chatbot, history, status_text
171
- break
172
- except Exception as e:
173
- traceback.print_exc()
174
- if not retry_on_crash:
175
- print("正在尝试使用缩短的context重新生成……")
176
- chatbot.pop()
177
- history.append("")
178
- yield next(predict(inputs, top_p, temperature, openai_api_key, chatbot, history, system_prompt, retry, summary=False, retry_on_crash=True, stream=False))
179
- else:
180
- msg = "☹️发生了错误:生成失败,请检查网络"
181
- print(msg)
182
- history.append(inputs, "")
183
- chatbot.append(inputs, msg)
184
- yield chatbot, history, "status: ERROR"
185
  break
186
- chunkjson = json.loads(chunk.decode()[6:])
187
- status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
188
- partial_words = partial_words + \
189
- json.loads(chunk.decode()[6:])[
190
- 'choices'][0]["delta"]["content"]
191
  if token_counter == 0:
192
- history.append(" " + partial_words)
193
  else:
194
- history[-1] = partial_words
195
- chatbot[-1] = (parse_text(history[-2]), parse_text(history[-1]))
196
  token_counter += 1
197
- yield chatbot, history, status_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  else:
199
- try:
200
- responsejson = json.loads(response.text)
201
- content = responsejson["choices"][0]["message"]["content"]
202
- history.append(content)
203
- chatbot.append((parse_text(history[-2]), parse_text(content)))
204
- status_text = "精简��成"
205
- except:
206
- chatbot.append((parse_text(history[-1]), "☹️发生了错误,请检查网络连接或者稍后再试。"))
207
- status_text = "status: ERROR"
208
- yield chatbot, history, status_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
 
 
 
 
 
 
 
 
210
 
211
 
212
- def delete_last_conversation(chatbot, history):
213
- if "☹️发生了错误" in chatbot[-1][1]:
214
  chatbot.pop()
215
- print(history)
216
  return chatbot, history
217
- history.pop()
218
- history.pop()
219
- print(history)
220
- return chatbot, history
 
 
 
 
 
221
 
222
  def save_chat_history(filename, system, history, chatbot):
223
  if filename == "":
@@ -231,19 +249,37 @@ def save_chat_history(filename, system, history, chatbot):
231
  json.dump(json_s, f)
232
 
233
 
234
- def load_chat_history(filename):
235
- with open(os.path.join(HISTORY_DIR, filename), "r") as f:
236
- json_s = json.load(f)
237
- print(json_s)
238
- return filename, json_s["system"], json_s["history"], json_s["chatbot"]
 
 
 
 
 
 
 
 
 
 
 
239
 
 
 
240
 
241
- def get_file_names(dir, plain=False, filetype=".json"):
242
  # find all json files in the current directory and return their names
 
243
  try:
244
- files = sorted([f for f in os.listdir(dir) if f.endswith(filetype)])
 
245
  except FileNotFoundError:
246
  files = []
 
 
 
247
  if plain:
248
  return files
249
  else:
@@ -254,23 +290,35 @@ def get_history_names(plain=False):
254
 
255
  def load_template(filename, mode=0):
256
  lines = []
257
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
258
- reader = csv.reader(csvfile)
259
- lines = list(reader)
260
- lines = lines[1:]
 
 
 
 
 
 
261
  if mode == 1:
262
- return sorted([row[0] for row in lines])
263
  elif mode == 2:
264
  return {row[0]:row[1] for row in lines}
265
  else:
266
- return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=sorted([row[0] for row in lines]))
 
267
 
268
  def get_template_names(plain=False):
269
- return get_file_names(TEMPLATES_DIR, plain, filetype=".csv")
270
 
271
- def reset_state():
272
- return [], []
 
 
 
273
 
 
 
274
 
275
  def compose_system(system_prompt):
276
  return {"role": "system", "content": system_prompt}
 
13
  # import markdown
14
  import csv
15
  import mdtex2html
16
+ from pypinyin import lazy_pinyin
17
+ from presets import *
18
 
19
  if TYPE_CHECKING:
20
  from typing import TypedDict
 
43
  y[i] = (
44
  # None if message is None else markdown.markdown(message),
45
  # None if response is None else markdown.markdown(response),
46
+ None if message is None else mdtex2html.convert((message)),
47
  None if response is None else mdtex2html.convert(response),
48
  )
49
  return y
 
52
  lines = text.split("\n")
53
  lines = [line for line in lines if line != ""]
54
  count = 0
 
55
  for i, line in enumerate(lines):
56
  if "```" in line:
57
  count += 1
 
79
  text = "".join(lines)
80
  return text
81
 
82
+ def construct_text(role, text):
83
+ return {"role": role, "content": text}
84
 
85
+ def construct_user(text):
86
+ return construct_text("user", text)
87
 
88
+ def construct_system(text):
89
+ return construct_text("system", text)
90
+
91
+ def construct_assistant(text):
92
+ return construct_text("assistant", text)
93
+
94
+ def construct_token_message(token, stream=False):
95
+ extra = "【仅包含回答的计数】 " if stream else ""
96
+ return f"{extra}Token 计数: {token}"
97
+
98
+ def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
99
  headers = {
100
  "Content-Type": "application/json",
101
  "Authorization": f"Bearer {openai_api_key}"
102
  }
103
 
104
+ history = [construct_system(system_prompt), *history]
105
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  payload = {
107
  "model": "gpt-3.5-turbo",
108
+ "messages": history, # [{"role": "user", "content": f"{inputs}"}],
109
  "temperature": temperature, # 1.0,
110
  "top_p": top_p, # 1.0,
111
  "n": 1,
 
113
  "presence_penalty": 0,
114
  "frequency_penalty": 0,
115
  }
116
+ if stream:
117
+ timeout = timeout_streaming
 
118
  else:
119
+ timeout = timeout_all
120
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
121
+ return response
122
 
123
+ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
124
+ def get_return_value():
125
+ return chatbot, history, status_text, [*previous_token_count, token_counter]
126
+ token_counter = 0
127
+ partial_words = ""
128
+ counter = 0
129
+ status_text = "OK"
130
+ history.append(construct_user(inputs))
131
  try:
132
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
133
+ except requests.exceptions.ConnectTimeout:
134
+ status_text = standard_error_msg + error_retrieve_prompt
135
+ yield get_return_value()
 
136
  return
137
 
138
+ chatbot.append((parse_text(inputs), ""))
139
+ yield get_return_value()
140
 
141
+ for chunk in response.iter_lines():
142
+ if counter == 0:
 
 
 
 
 
143
  counter += 1
144
+ continue
145
+ counter += 1
146
+ # check whether each line is non-empty
147
+ if chunk:
148
+ chunk = chunk.decode()
149
+ chunklength = len(chunk)
150
+ chunk = json.loads(chunk[6:])
151
+ # decode each line as response data is in bytes
152
+ if chunklength > 6 and "delta" in chunk['choices'][0]:
153
+ finish_reason = chunk['choices'][0]['finish_reason']
154
+ status_text = construct_token_message(sum(previous_token_count)+token_counter, stream=True)
155
+ if finish_reason == "stop":
156
+ yield get_return_value()
 
 
 
 
 
 
 
 
 
157
  break
158
+ partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
 
 
 
 
159
  if token_counter == 0:
160
+ history.append(construct_assistant(" " + partial_words))
161
  else:
162
+ history[-1] = construct_assistant(partial_words)
163
+ chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
164
  token_counter += 1
165
+ yield get_return_value()
166
+
167
+
168
+ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
169
+ history.append(construct_user(inputs))
170
+ try:
171
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
172
+ except requests.exceptions.ConnectTimeout:
173
+ status_text = standard_error_msg + error_retrieve_prompt
174
+ return chatbot, history, status_text, previous_token_count
175
+ response = json.loads(response.text)
176
+ content = response["choices"][0]["message"]["content"]
177
+ history.append(construct_assistant(content))
178
+ chatbot.append((parse_text(inputs), parse_text(content)))
179
+ total_token_count = response["usage"]["total_tokens"]
180
+ previous_token_count.append(total_token_count - sum(previous_token_count))
181
+ status_text = construct_token_message(total_token_count)
182
+ return chatbot, history, status_text, previous_token_count
183
+
184
+
185
+ def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
186
+ if stream:
187
+ iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
188
+ for chatbot, history, status_text, token_count in iter:
189
+ yield chatbot, history, status_text, token_count
190
  else:
191
+ chatbot, history, status_text, token_count = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
192
+ yield chatbot, history, status_text, token_count
193
+ if stream:
194
+ max_token = max_token_streaming
195
+ else:
196
+ max_token = max_token_all
197
+ if sum(token_count) > max_token and should_check_token_count:
198
+ iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=True)
199
+ for chatbot, history, status_text, token_count in iter:
200
+ status_text = f"Token已达到上限,自动降低Token计数至 {status_text}"
201
+ yield chatbot, history, status_text, token_count
202
+
203
+
204
+ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
205
+ if len(history) == 0:
206
+ yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
207
+ return
208
+ history.pop()
209
+ inputs = history.pop()["content"]
210
+ token_count.pop()
211
+ iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
212
+ for x in iter:
213
+ yield x
214
+
215
 
216
+ def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
217
+ iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
218
+ for chatbot, history, status_text, previous_token_count in iter:
219
+ history = history[-2:]
220
+ token_count = previous_token_count[-1:]
221
+ if hidden:
222
+ chatbot.pop()
223
+ yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
224
 
225
 
226
+ def delete_last_conversation(chatbot, history, previous_token_count, streaming):
227
+ if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
228
  chatbot.pop()
 
229
  return chatbot, history
230
+ if len(history) > 0:
231
+ history.pop()
232
+ history.pop()
233
+ if len(chatbot) > 0:
234
+ chatbot.pop()
235
+ if len(previous_token_count) > 0:
236
+ previous_token_count.pop()
237
+ return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count), streaming)
238
+
239
 
240
  def save_chat_history(filename, system, history, chatbot):
241
  if filename == "":
 
249
  json.dump(json_s, f)
250
 
251
 
252
+ def load_chat_history(filename, system, history, chatbot):
253
+ try:
254
+ with open(os.path.join(HISTORY_DIR, filename), "r") as f:
255
+ json_s = json.load(f)
256
+ if type(json_s["history"]) == list:
257
+ new_history = []
258
+ for index, item in enumerate(json_s["history"]):
259
+ if index % 2 == 0:
260
+ new_history.append(construct_user(item))
261
+ else:
262
+ new_history.append(construct_assistant(item))
263
+ json_s["history"] = new_history
264
+ return filename, json_s["system"], json_s["history"], json_s["chatbot"]
265
+ except FileNotFoundError:
266
+ print("File not found.")
267
+ return filename, system, history, chatbot
268
 
269
+ def sorted_by_pinyin(list):
270
+ return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
271
 
272
+ def get_file_names(dir, plain=False, filetypes=[".json"]):
273
  # find all json files in the current directory and return their names
274
+ files = []
275
  try:
276
+ for type in filetypes:
277
+ files += [f for f in os.listdir(dir) if f.endswith(type)]
278
  except FileNotFoundError:
279
  files = []
280
+ files = sorted_by_pinyin(files)
281
+ if files == []:
282
+ files = [""]
283
  if plain:
284
  return files
285
  else:
 
290
 
291
  def load_template(filename, mode=0):
292
  lines = []
293
+ print("Loading template...")
294
+ if filename.endswith(".json"):
295
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
296
+ lines = json.load(f)
297
+ lines = [[i["act"], i["prompt"]] for i in lines]
298
+ else:
299
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
300
+ reader = csv.reader(csvfile)
301
+ lines = list(reader)
302
+ lines = lines[1:]
303
  if mode == 1:
304
+ return sorted_by_pinyin([row[0] for row in lines])
305
  elif mode == 2:
306
  return {row[0]:row[1] for row in lines}
307
  else:
308
+ choices = sorted_by_pinyin([row[0] for row in lines])
309
+ return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
310
 
311
  def get_template_names(plain=False):
312
+ return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
313
 
314
+ def get_template_content(templates, selection, original_system_prompt):
315
+ try:
316
+ return templates[selection]
317
+ except:
318
+ return original_system_prompt
319
 
320
+ def reset_state():
321
+ return [], [], [], construct_token_message(0)
322
 
323
  def compose_system(system_prompt):
324
  return {"role": "system", "content": system_prompt}