Spaces:
Sleeping
Sleeping
lmt commited on
Commit ·
e0f18aa
1
Parent(s): 1e0f673
更新样式
Browse files- app.py +1 -2
- presets.py +2 -2
- utils.py +64 -34
app.py
CHANGED
|
@@ -18,8 +18,7 @@ with gr.Blocks(css=customCSS) as server:
|
|
| 18 |
with gr.Column(scale=1):
|
| 19 |
use_streaming_checkbox = gr.Checkbox(
|
| 20 |
label="实时传输回答", value=True, visible=enable_streaming_option)
|
| 21 |
-
chatbot = gr.Chatbot(elem_id="chat"
|
| 22 |
-
color_map=("#1D51EE", "#ffffff"))
|
| 23 |
history = gr.State([])
|
| 24 |
token_count = gr.State([])
|
| 25 |
promptTemplates = gr.State(load_template(
|
|
|
|
| 18 |
with gr.Column(scale=1):
|
| 19 |
use_streaming_checkbox = gr.Checkbox(
|
| 20 |
label="实时传输回答", value=True, visible=enable_streaming_option)
|
| 21 |
+
chatbot = gr.Chatbot(elem_id="chat", color_map=("#1D51EE", "#ffffff"))
|
|
|
|
| 22 |
history = gr.State([])
|
| 23 |
token_count = gr.State([])
|
| 24 |
promptTemplates = gr.State(load_template(
|
presets.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
# -*- coding:utf-8 -*-
|
| 2 |
title = """<h1 align="center">MyChatGPT</h1>"""
|
| 3 |
description = """<div align=center>
|
| 4 |
-
Powered by `gpt-3.5-turbo`
|
| 5 |
</div>
|
| 6 |
"""
|
| 7 |
customCSS = """
|
|
@@ -39,5 +39,5 @@ max_token_streaming = 3500 # 流式对话时的最大 token 数
|
|
| 39 |
timeout_streaming = 5 # 流式对话时的超时时间
|
| 40 |
max_token_all = 3500 # 非流式对话时的最大 token 数
|
| 41 |
timeout_all = 200 # 非流式对话时的超时时间
|
| 42 |
-
enable_streaming_option =
|
| 43 |
HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
|
|
|
| 1 |
# -*- coding:utf-8 -*-
|
| 2 |
title = """<h1 align="center">MyChatGPT</h1>"""
|
| 3 |
description = """<div align=center>
|
| 4 |
+
Powered by `gpt-3.5-turbo` Model
|
| 5 |
</div>
|
| 6 |
"""
|
| 7 |
customCSS = """
|
|
|
|
| 39 |
timeout_streaming = 5 # 流式对话时的超时时间
|
| 40 |
max_token_all = 3500 # 非流式对话时的最大 token 数
|
| 41 |
timeout_all = 200 # 非流式对话时的超时时间
|
| 42 |
+
enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
|
| 43 |
HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
utils.py
CHANGED
|
@@ -26,31 +26,34 @@ API_URL = "https://api.openai.com/v1/chat/completions"
|
|
| 26 |
HISTORY_DIR = "history"
|
| 27 |
TEMPLATES_DIR = "templates"
|
| 28 |
|
|
|
|
| 29 |
def postprocess(
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
| 48 |
|
| 49 |
def count_token(input_str):
|
| 50 |
encoding = tiktoken.get_encoding("cl100k_base")
|
| 51 |
length = len(encoding.encode(input_str))
|
| 52 |
return length
|
| 53 |
|
|
|
|
| 54 |
def parse_text(text):
|
| 55 |
lines = text.split("\n")
|
| 56 |
lines = [line for line in lines if line != ""]
|
|
@@ -82,21 +85,27 @@ def parse_text(text):
|
|
| 82 |
text = "".join(lines)
|
| 83 |
return text
|
| 84 |
|
|
|
|
| 85 |
def construct_text(role, text):
|
| 86 |
return {"role": role, "content": text}
|
| 87 |
|
|
|
|
| 88 |
def construct_user(text):
|
| 89 |
return construct_text("user", text)
|
| 90 |
|
|
|
|
| 91 |
def construct_system(text):
|
| 92 |
return construct_text("system", text)
|
| 93 |
|
|
|
|
| 94 |
def construct_assistant(text):
|
| 95 |
return construct_text("assistant", text)
|
| 96 |
|
|
|
|
| 97 |
def construct_token_message(token, stream=False):
|
| 98 |
return f"Token 计数: {token}"
|
| 99 |
|
|
|
|
| 100 |
def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
|
| 101 |
headers = {
|
| 102 |
"Content-Type": "application/json",
|
|
@@ -119,9 +128,11 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
|
|
| 119 |
timeout = timeout_streaming
|
| 120 |
else:
|
| 121 |
timeout = timeout_all
|
| 122 |
-
response = requests.post(API_URL, headers=headers,
|
|
|
|
| 123 |
return response
|
| 124 |
|
|
|
|
| 125 |
def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
|
| 126 |
def get_return_value():
|
| 127 |
return chatbot, history, status_text, [*previous_token_count, token_counter]
|
|
@@ -140,7 +151,8 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
|
|
| 140 |
user_token_count = count_token(inputs)
|
| 141 |
print(f"输入token计数: {user_token_count}")
|
| 142 |
try:
|
| 143 |
-
response = get_response(
|
|
|
|
| 144 |
except requests.exceptions.ConnectTimeout:
|
| 145 |
status_text = standard_error_msg + error_retrieve_prompt
|
| 146 |
yield get_return_value()
|
|
@@ -162,15 +174,19 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
|
|
| 162 |
# decode each line as response data is in bytes
|
| 163 |
if chunklength > 6 and "delta" in chunk['choices'][0]:
|
| 164 |
finish_reason = chunk['choices'][0]['finish_reason']
|
| 165 |
-
status_text = construct_token_message(
|
|
|
|
| 166 |
if finish_reason == "stop":
|
| 167 |
print("生成完毕")
|
| 168 |
yield get_return_value()
|
| 169 |
break
|
| 170 |
try:
|
| 171 |
-
partial_words = partial_words +
|
|
|
|
| 172 |
except KeyError:
|
| 173 |
-
status_text = standard_error_msg + "API回复中找不到内容
|
|
|
|
|
|
|
| 174 |
yield get_return_value()
|
| 175 |
break
|
| 176 |
if token_counter == 0:
|
|
@@ -186,7 +202,8 @@ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previou
|
|
| 186 |
print("一次性回答模式")
|
| 187 |
history.append(construct_user(inputs))
|
| 188 |
try:
|
| 189 |
-
response = get_response(
|
|
|
|
| 190 |
except requests.exceptions.ConnectTimeout:
|
| 191 |
status_text = standard_error_msg + error_retrieve_prompt
|
| 192 |
return chatbot, history, status_text, previous_token_count
|
|
@@ -201,26 +218,29 @@ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previou
|
|
| 201 |
return chatbot, history, status_text, previous_token_count
|
| 202 |
|
| 203 |
|
| 204 |
-
def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count
|
| 205 |
-
print("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
| 206 |
if stream:
|
| 207 |
print("使用流式传输")
|
| 208 |
-
iter = stream_predict(openai_api_key, system_prompt, history,
|
|
|
|
| 209 |
for chatbot, history, status_text, token_count in iter:
|
| 210 |
yield chatbot, history, status_text, token_count
|
| 211 |
else:
|
| 212 |
print("不使用流式传输")
|
| 213 |
-
chatbot, history, status_text, token_count = predict_all(
|
|
|
|
| 214 |
yield chatbot, history, status_text, token_count
|
| 215 |
print(f"传输完毕。当前token计数为{token_count}")
|
| 216 |
-
print("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
|
| 217 |
if stream:
|
| 218 |
max_token = max_token_streaming
|
| 219 |
else:
|
| 220 |
max_token = max_token_all
|
| 221 |
if sum(token_count) > max_token and should_check_token_count:
|
| 222 |
print(f"精简token中{token_count}/{max_token}")
|
| 223 |
-
iter = reduce_token_size(openai_api_key, system_prompt, history,
|
|
|
|
| 224 |
for chatbot, history, status_text, token_count in iter:
|
| 225 |
status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
|
| 226 |
yield chatbot, history, status_text, token_count
|
|
@@ -234,7 +254,8 @@ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, t
|
|
| 234 |
history.pop()
|
| 235 |
inputs = history.pop()["content"]
|
| 236 |
token_count.pop()
|
| 237 |
-
iter = predict(openai_api_key, system_prompt, history, inputs,
|
|
|
|
| 238 |
print("重试完毕")
|
| 239 |
for x in iter:
|
| 240 |
yield x
|
|
@@ -242,7 +263,8 @@ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, t
|
|
| 242 |
|
| 243 |
def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
|
| 244 |
print("开始减少token数量……")
|
| 245 |
-
iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot,
|
|
|
|
| 246 |
for chatbot, history, status_text, previous_token_count in iter:
|
| 247 |
history = history[-2:]
|
| 248 |
token_count = previous_token_count[-1:]
|
|
@@ -309,9 +331,11 @@ def load_chat_history(filename, system, history, chatbot):
|
|
| 309 |
print("没有找到对话历史文件,不执行任何操作")
|
| 310 |
return filename, system, history, chatbot
|
| 311 |
|
|
|
|
| 312 |
def sorted_by_pinyin(list):
|
| 313 |
return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
|
| 314 |
|
|
|
|
| 315 |
def get_file_names(dir, plain=False, filetypes=[".json"]):
|
| 316 |
print(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
|
| 317 |
files = []
|
|
@@ -328,10 +352,12 @@ def get_file_names(dir, plain=False, filetypes=[".json"]):
|
|
| 328 |
else:
|
| 329 |
return gr.Dropdown.update(choices=files)
|
| 330 |
|
|
|
|
| 331 |
def get_history_names(plain=False):
|
| 332 |
print("获取历史记录文件名列表")
|
| 333 |
return get_file_names(HISTORY_DIR, plain)
|
| 334 |
|
|
|
|
| 335 |
def load_template(filename, mode=0):
|
| 336 |
print(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
|
| 337 |
lines = []
|
|
@@ -348,15 +374,17 @@ def load_template(filename, mode=0):
|
|
| 348 |
if mode == 1:
|
| 349 |
return sorted_by_pinyin([row[0] for row in lines])
|
| 350 |
elif mode == 2:
|
| 351 |
-
return {row[0]:row[1] for row in lines}
|
| 352 |
else:
|
| 353 |
choices = sorted_by_pinyin([row[0] for row in lines])
|
| 354 |
-
return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
|
|
|
|
| 355 |
|
| 356 |
def get_template_names(plain=False):
|
| 357 |
print("获取模板文件名列表")
|
| 358 |
return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
|
| 359 |
|
|
|
|
| 360 |
def get_template_content(templates, selection, original_system_prompt):
|
| 361 |
print(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
|
| 362 |
try:
|
|
@@ -364,9 +392,11 @@ def get_template_content(templates, selection, original_system_prompt):
|
|
| 364 |
except:
|
| 365 |
return original_system_prompt
|
| 366 |
|
|
|
|
| 367 |
def reset_state():
|
| 368 |
print("重置状态")
|
| 369 |
return [], [], [], construct_token_message(0)
|
| 370 |
|
|
|
|
| 371 |
def reset_textbox():
|
| 372 |
return gr.update(value='')
|
|
|
|
| 26 |
HISTORY_DIR = "history"
|
| 27 |
TEMPLATES_DIR = "templates"
|
| 28 |
|
| 29 |
+
|
| 30 |
def postprocess(
|
| 31 |
+
self, y: List[Tuple[str | None, str | None]]
|
| 32 |
+
) -> List[Tuple[str | None, str | None]]:
|
| 33 |
+
"""
|
| 34 |
+
Parameters:
|
| 35 |
+
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
| 36 |
+
Returns:
|
| 37 |
+
List of tuples representing the message and response. Each message and response will be a string of HTML.
|
| 38 |
+
"""
|
| 39 |
+
if y is None:
|
| 40 |
+
return []
|
| 41 |
+
for i, (message, response) in enumerate(y):
|
| 42 |
+
y[i] = (
|
| 43 |
+
# None if message is None else markdown.markdown(message),
|
| 44 |
+
# None if response is None else markdown.markdown(response),
|
| 45 |
+
None if message is None else mdtex2html.convert((message)),
|
| 46 |
+
None if response is None else mdtex2html.convert(response),
|
| 47 |
+
)
|
| 48 |
+
return y
|
| 49 |
+
|
| 50 |
|
| 51 |
def count_token(input_str):
|
| 52 |
encoding = tiktoken.get_encoding("cl100k_base")
|
| 53 |
length = len(encoding.encode(input_str))
|
| 54 |
return length
|
| 55 |
|
| 56 |
+
|
| 57 |
def parse_text(text):
|
| 58 |
lines = text.split("\n")
|
| 59 |
lines = [line for line in lines if line != ""]
|
|
|
|
| 85 |
text = "".join(lines)
|
| 86 |
return text
|
| 87 |
|
| 88 |
+
|
| 89 |
def construct_text(role, text):
|
| 90 |
return {"role": role, "content": text}
|
| 91 |
|
| 92 |
+
|
| 93 |
def construct_user(text):
|
| 94 |
return construct_text("user", text)
|
| 95 |
|
| 96 |
+
|
| 97 |
def construct_system(text):
|
| 98 |
return construct_text("system", text)
|
| 99 |
|
| 100 |
+
|
| 101 |
def construct_assistant(text):
|
| 102 |
return construct_text("assistant", text)
|
| 103 |
|
| 104 |
+
|
| 105 |
def construct_token_message(token, stream=False):
|
| 106 |
return f"Token 计数: {token}"
|
| 107 |
|
| 108 |
+
|
| 109 |
def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
|
| 110 |
headers = {
|
| 111 |
"Content-Type": "application/json",
|
|
|
|
| 128 |
timeout = timeout_streaming
|
| 129 |
else:
|
| 130 |
timeout = timeout_all
|
| 131 |
+
response = requests.post(API_URL, headers=headers,
|
| 132 |
+
json=payload, stream=True, timeout=timeout)
|
| 133 |
return response
|
| 134 |
|
| 135 |
+
|
| 136 |
def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
|
| 137 |
def get_return_value():
|
| 138 |
return chatbot, history, status_text, [*previous_token_count, token_counter]
|
|
|
|
| 151 |
user_token_count = count_token(inputs)
|
| 152 |
print(f"输入token计数: {user_token_count}")
|
| 153 |
try:
|
| 154 |
+
response = get_response(
|
| 155 |
+
openai_api_key, system_prompt, history, temperature, top_p, True)
|
| 156 |
except requests.exceptions.ConnectTimeout:
|
| 157 |
status_text = standard_error_msg + error_retrieve_prompt
|
| 158 |
yield get_return_value()
|
|
|
|
| 174 |
# decode each line as response data is in bytes
|
| 175 |
if chunklength > 6 and "delta" in chunk['choices'][0]:
|
| 176 |
finish_reason = chunk['choices'][0]['finish_reason']
|
| 177 |
+
status_text = construct_token_message(
|
| 178 |
+
sum(previous_token_count)+token_counter+user_token_count, stream=True)
|
| 179 |
if finish_reason == "stop":
|
| 180 |
print("生成完毕")
|
| 181 |
yield get_return_value()
|
| 182 |
break
|
| 183 |
try:
|
| 184 |
+
partial_words = partial_words + \
|
| 185 |
+
chunk['choices'][0]["delta"]["content"]
|
| 186 |
except KeyError:
|
| 187 |
+
status_text = standard_error_msg + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " + \
|
| 188 |
+
str(sum(previous_token_count) +
|
| 189 |
+
token_counter+user_token_count)
|
| 190 |
yield get_return_value()
|
| 191 |
break
|
| 192 |
if token_counter == 0:
|
|
|
|
| 202 |
print("一次性回答模式")
|
| 203 |
history.append(construct_user(inputs))
|
| 204 |
try:
|
| 205 |
+
response = get_response(
|
| 206 |
+
openai_api_key, system_prompt, history, temperature, top_p, False)
|
| 207 |
except requests.exceptions.ConnectTimeout:
|
| 208 |
status_text = standard_error_msg + error_retrieve_prompt
|
| 209 |
return chatbot, history, status_text, previous_token_count
|
|
|
|
| 218 |
return chatbot, history, status_text, previous_token_count
|
| 219 |
|
| 220 |
|
| 221 |
+
def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count=True): # repetition_penalty, top_k
|
| 222 |
+
# print("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
|
| 223 |
if stream:
|
| 224 |
print("使用流式传输")
|
| 225 |
+
iter = stream_predict(openai_api_key, system_prompt, history,
|
| 226 |
+
inputs, chatbot, token_count, top_p, temperature)
|
| 227 |
for chatbot, history, status_text, token_count in iter:
|
| 228 |
yield chatbot, history, status_text, token_count
|
| 229 |
else:
|
| 230 |
print("不使用流式传输")
|
| 231 |
+
chatbot, history, status_text, token_count = predict_all(
|
| 232 |
+
openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
|
| 233 |
yield chatbot, history, status_text, token_count
|
| 234 |
print(f"传输完毕。当前token计数为{token_count}")
|
| 235 |
+
# print("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
|
| 236 |
if stream:
|
| 237 |
max_token = max_token_streaming
|
| 238 |
else:
|
| 239 |
max_token = max_token_all
|
| 240 |
if sum(token_count) > max_token and should_check_token_count:
|
| 241 |
print(f"精简token中{token_count}/{max_token}")
|
| 242 |
+
iter = reduce_token_size(openai_api_key, system_prompt, history,
|
| 243 |
+
chatbot, token_count, top_p, temperature, stream=False, hidden=True)
|
| 244 |
for chatbot, history, status_text, token_count in iter:
|
| 245 |
status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
|
| 246 |
yield chatbot, history, status_text, token_count
|
|
|
|
| 254 |
history.pop()
|
| 255 |
inputs = history.pop()["content"]
|
| 256 |
token_count.pop()
|
| 257 |
+
iter = predict(openai_api_key, system_prompt, history, inputs,
|
| 258 |
+
chatbot, token_count, top_p, temperature, stream=stream)
|
| 259 |
print("重试完毕")
|
| 260 |
for x in iter:
|
| 261 |
yield x
|
|
|
|
| 263 |
|
| 264 |
def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
|
| 265 |
print("开始减少token数量……")
|
| 266 |
+
iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot,
|
| 267 |
+
token_count, top_p, temperature, stream=stream, should_check_token_count=False)
|
| 268 |
for chatbot, history, status_text, previous_token_count in iter:
|
| 269 |
history = history[-2:]
|
| 270 |
token_count = previous_token_count[-1:]
|
|
|
|
| 331 |
print("没有找到对话历史文件,不执行任何操作")
|
| 332 |
return filename, system, history, chatbot
|
| 333 |
|
| 334 |
+
|
| 335 |
def sorted_by_pinyin(list):
|
| 336 |
return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
|
| 337 |
|
| 338 |
+
|
| 339 |
def get_file_names(dir, plain=False, filetypes=[".json"]):
|
| 340 |
print(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
|
| 341 |
files = []
|
|
|
|
| 352 |
else:
|
| 353 |
return gr.Dropdown.update(choices=files)
|
| 354 |
|
| 355 |
+
|
| 356 |
def get_history_names(plain=False):
|
| 357 |
print("获取历史记录文件名列表")
|
| 358 |
return get_file_names(HISTORY_DIR, plain)
|
| 359 |
|
| 360 |
+
|
| 361 |
def load_template(filename, mode=0):
|
| 362 |
print(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
|
| 363 |
lines = []
|
|
|
|
| 374 |
if mode == 1:
|
| 375 |
return sorted_by_pinyin([row[0] for row in lines])
|
| 376 |
elif mode == 2:
|
| 377 |
+
return {row[0]: row[1] for row in lines}
|
| 378 |
else:
|
| 379 |
choices = sorted_by_pinyin([row[0] for row in lines])
|
| 380 |
+
return {row[0]: row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
|
| 381 |
+
|
| 382 |
|
| 383 |
def get_template_names(plain=False):
|
| 384 |
print("获取模板文件名列表")
|
| 385 |
return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
|
| 386 |
|
| 387 |
+
|
| 388 |
def get_template_content(templates, selection, original_system_prompt):
|
| 389 |
print(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
|
| 390 |
try:
|
|
|
|
| 392 |
except:
|
| 393 |
return original_system_prompt
|
| 394 |
|
| 395 |
+
|
| 396 |
def reset_state():
|
| 397 |
print("重置状态")
|
| 398 |
return [], [], [], construct_token_message(0)
|
| 399 |
|
| 400 |
+
|
| 401 |
def reset_textbox():
|
| 402 |
return gr.update(value='')
|