Spaces:
Sleeping
Sleeping
chat_with_perplexity
Browse files- app.py +9 -9
- chatbot.py +148 -18
app.py
CHANGED
|
@@ -2367,7 +2367,7 @@ def get_instructions(content_subject, content_grade, transcript_text, key_moment
|
|
| 2367 |
grade: {content_grade}
|
| 2368 |
context: {key_moments}
|
| 2369 |
transcript_text: {transcript_text}
|
| 2370 |
-
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴
|
| 2371 |
User Role: {content_grade} th-grade student.
|
| 2372 |
Method: {method}
|
| 2373 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
|
@@ -2580,18 +2580,18 @@ def get_chatbot_config(ai_name, transcript_state, key_moments, content_subject,
|
|
| 2580 |
ai_name_clients_model = {
|
| 2581 |
"foxcat": {
|
| 2582 |
"ai_name": "foxcat",
|
| 2583 |
-
"ai_client":
|
| 2584 |
-
"ai_model_name": "
|
| 2585 |
},
|
| 2586 |
"lili": {
|
| 2587 |
"ai_name": "lili",
|
| 2588 |
-
"ai_client":
|
| 2589 |
-
"ai_model_name": "
|
| 2590 |
},
|
| 2591 |
"maimai": {
|
| 2592 |
"ai_name": "maimai",
|
| 2593 |
"ai_client": PERPLEXITY_CLIENT,
|
| 2594 |
-
"ai_model_name": "
|
| 2595 |
}
|
| 2596 |
}
|
| 2597 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
|
@@ -3330,7 +3330,7 @@ def create_app():
|
|
| 3330 |
with gr.Accordion("🦄 飛特精靈 敘述", open=False):
|
| 3331 |
vaitor_chatbot_description_value = gr.Markdown(value=vaitor_chatbot_description, visible=True)
|
| 3332 |
# 狐狸貓
|
| 3333 |
-
with gr.Column(scale=1, variant="panel", visible=
|
| 3334 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
| 3335 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
| 3336 |
foxcat_chatbot_description = """Hi,我是【狐狸貓】,可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
|
@@ -3345,7 +3345,7 @@ def create_app():
|
|
| 3345 |
with gr.Accordion("💜 狐狸貓 敘述", open=False):
|
| 3346 |
foxcat_chatbot_description_value = gr.Markdown(value=foxcat_chatbot_description, visible=True)
|
| 3347 |
# 梨梨
|
| 3348 |
-
with gr.Column(scale=1, variant="panel", visible=
|
| 3349 |
lili_chatbot_avatar_url = "https://junyitopicimg.s3.amazonaws.com/live/v1283-new-topic-44-icon.png"
|
| 3350 |
lili_avatar_images = gr.State([user_avatar, lili_chatbot_avatar_url])
|
| 3351 |
lili_chatbot_description = """你好,我是溫柔的【梨梨】,很高興可以在這裡陪伴你學習。如果你有任何疑問,請隨時向我提出哦! \n
|
|
@@ -3364,7 +3364,7 @@ def create_app():
|
|
| 3364 |
with gr.Accordion("🧡 梨梨 敘述", open=False):
|
| 3365 |
lili_chatbot_description_value = gr.Markdown(value=lili_chatbot_description, visible=True)
|
| 3366 |
# 麥麥
|
| 3367 |
-
with gr.Column(scale=1, variant="panel", visible=
|
| 3368 |
maimai_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/07/%E6%80%9D%E8%80%83%E5%8A%9B%E8%B6%85%E4%BA%BA%E5%BE%BD%E7%AB%A0_%E5%B7%A5%E4%BD%9C%E5%8D%80%E5%9F%9F-1-%E8%A4%87%E6%9C%AC-150x150.png"
|
| 3369 |
maimai_avatar_images = gr.State([user_avatar, maimai_chatbot_avatar_url])
|
| 3370 |
maimai_chatbot_description = """Hi,我是迷人的【麥麥】,我在這裡等著和你一起探索新知,任何疑問都可以向我提出!\n
|
|
|
|
| 2367 |
grade: {content_grade}
|
| 2368 |
context: {key_moments}
|
| 2369 |
transcript_text: {transcript_text}
|
| 2370 |
+
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴 and your name if you know
|
| 2371 |
User Role: {content_grade} th-grade student.
|
| 2372 |
Method: {method}
|
| 2373 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
|
|
|
| 2580 |
ai_name_clients_model = {
|
| 2581 |
"foxcat": {
|
| 2582 |
"ai_name": "foxcat",
|
| 2583 |
+
"ai_client": PERPLEXITY_CLIENT,
|
| 2584 |
+
"ai_model_name": "perplexity_sonar",
|
| 2585 |
},
|
| 2586 |
"lili": {
|
| 2587 |
"ai_name": "lili",
|
| 2588 |
+
"ai_client": PERPLEXITY_CLIENT,
|
| 2589 |
+
"ai_model_name": "perplexity_r1_1776",
|
| 2590 |
},
|
| 2591 |
"maimai": {
|
| 2592 |
"ai_name": "maimai",
|
| 2593 |
"ai_client": PERPLEXITY_CLIENT,
|
| 2594 |
+
"ai_model_name": "perplexity_r1_1776",
|
| 2595 |
}
|
| 2596 |
}
|
| 2597 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
|
|
|
| 3330 |
with gr.Accordion("🦄 飛特精靈 敘述", open=False):
|
| 3331 |
vaitor_chatbot_description_value = gr.Markdown(value=vaitor_chatbot_description, visible=True)
|
| 3332 |
# 狐狸貓
|
| 3333 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
| 3334 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
| 3335 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
| 3336 |
foxcat_chatbot_description = """Hi,我是【狐狸貓】,可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
|
|
|
| 3345 |
with gr.Accordion("💜 狐狸貓 敘述", open=False):
|
| 3346 |
foxcat_chatbot_description_value = gr.Markdown(value=foxcat_chatbot_description, visible=True)
|
| 3347 |
# 梨梨
|
| 3348 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
| 3349 |
lili_chatbot_avatar_url = "https://junyitopicimg.s3.amazonaws.com/live/v1283-new-topic-44-icon.png"
|
| 3350 |
lili_avatar_images = gr.State([user_avatar, lili_chatbot_avatar_url])
|
| 3351 |
lili_chatbot_description = """你好,我是溫柔的【梨梨】,很高興可以在這裡陪伴你學習。如果你有任何疑問,請隨時向我提出哦! \n
|
|
|
|
| 3364 |
with gr.Accordion("🧡 梨梨 敘述", open=False):
|
| 3365 |
lili_chatbot_description_value = gr.Markdown(value=lili_chatbot_description, visible=True)
|
| 3366 |
# 麥麥
|
| 3367 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
| 3368 |
maimai_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/07/%E6%80%9D%E8%80%83%E5%8A%9B%E8%B6%85%E4%BA%BA%E5%BE%BD%E7%AB%A0_%E5%B7%A5%E4%BD%9C%E5%8D%80%E5%9F%9F-1-%E8%A4%87%E6%9C%AC-150x150.png"
|
| 3369 |
maimai_avatar_images = gr.State([user_avatar, maimai_chatbot_avatar_url])
|
| 3370 |
maimai_chatbot_description = """Hi,我是迷人的【麥麥】,我在這裡等著和你一起探索新知,任何疑問都可以向我提出!\n
|
chatbot.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
import requests
|
|
|
|
| 4 |
|
| 5 |
class Chatbot:
|
| 6 |
def __init__(self, config):
|
|
@@ -43,8 +44,10 @@ class Chatbot:
|
|
| 43 |
try:
|
| 44 |
messages = self.prepare_messages(chat_history, user_message)
|
| 45 |
system_prompt = self.instructions
|
|
|
|
| 46 |
service_type = self.ai_model_name
|
| 47 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
|
|
|
| 48 |
except Exception as e:
|
| 49 |
print(f"Error: {e}")
|
| 50 |
response_text = "學習精靈有點累,請稍後再試!"
|
|
@@ -77,8 +80,8 @@ class Chatbot:
|
|
| 77 |
return self.chat_with_groq(service_type, system_prompt, messages)
|
| 78 |
elif service_type == 'claude3':
|
| 79 |
return self.chat_with_claude3(system_prompt, messages)
|
| 80 |
-
elif service_type
|
| 81 |
-
return self.
|
| 82 |
else:
|
| 83 |
raise gr.Error("不支持的服务类型")
|
| 84 |
|
|
@@ -92,7 +95,6 @@ class Chatbot:
|
|
| 92 |
model = "gpt-4o"
|
| 93 |
print("======model======")
|
| 94 |
print(model)
|
| 95 |
-
# model = "gpt-3.5-turbo-0125"
|
| 96 |
data = {
|
| 97 |
"data": {
|
| 98 |
"messages": messages,
|
|
@@ -155,29 +157,157 @@ class Chatbot:
|
|
| 155 |
response_completion = response_body.get('content')[0].get('text').strip()
|
| 156 |
return response_completion
|
| 157 |
|
| 158 |
-
def
|
| 159 |
-
"""使用 Perplexity
|
| 160 |
if not system_prompt.strip():
|
| 161 |
raise ValueError("System prompt cannot be empty")
|
| 162 |
|
| 163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
print("======model======")
|
| 166 |
-
print(
|
|
|
|
|
|
|
| 167 |
|
| 168 |
try:
|
| 169 |
perplexity_client = self.ai_client
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
max_tokens
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
|
| 180 |
except Exception as e:
|
| 181 |
-
print(f"Perplexity
|
| 182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
import requests
|
| 4 |
+
import re
|
| 5 |
|
| 6 |
class Chatbot:
|
| 7 |
def __init__(self, config):
|
|
|
|
| 44 |
try:
|
| 45 |
messages = self.prepare_messages(chat_history, user_message)
|
| 46 |
system_prompt = self.instructions
|
| 47 |
+
system_prompt += "\n\n告知用戶你現在是誰,第一次加上科目學伴及名字,後面就只說名字就好,但不用每次都說,自然就好,不用每一句都特別說明,口氣請符合給予的人設,請用繁體中文回答"
|
| 48 |
service_type = self.ai_model_name
|
| 49 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
| 50 |
+
|
| 51 |
except Exception as e:
|
| 52 |
print(f"Error: {e}")
|
| 53 |
response_text = "學習精靈有點累,請稍後再試!"
|
|
|
|
| 80 |
return self.chat_with_groq(service_type, system_prompt, messages)
|
| 81 |
elif service_type == 'claude3':
|
| 82 |
return self.chat_with_claude3(system_prompt, messages)
|
| 83 |
+
elif service_type in ['perplexity_sonar', 'perplexity_sonar_pro', 'perplexity_r1_1776']:
|
| 84 |
+
return self.chat_with_perplexity(service_type, system_prompt, messages)
|
| 85 |
else:
|
| 86 |
raise gr.Error("不支持的服务类型")
|
| 87 |
|
|
|
|
| 95 |
model = "gpt-4o"
|
| 96 |
print("======model======")
|
| 97 |
print(model)
|
|
|
|
| 98 |
data = {
|
| 99 |
"data": {
|
| 100 |
"messages": messages,
|
|
|
|
| 157 |
response_completion = response_body.get('content')[0].get('text').strip()
|
| 158 |
return response_completion
|
| 159 |
|
| 160 |
+
def chat_with_perplexity(self, service_type, system_prompt, messages):
|
| 161 |
+
"""使用 Perplexity API 進行對話"""
|
| 162 |
if not system_prompt.strip():
|
| 163 |
raise ValueError("System prompt cannot be empty")
|
| 164 |
|
| 165 |
+
# 清理用戶訊息中的特殊指令
|
| 166 |
+
for msg in messages:
|
| 167 |
+
if msg["role"] == "user":
|
| 168 |
+
# 移除可能導致問題的特殊指令
|
| 169 |
+
msg["content"] = msg["content"].replace("/n", "\n")
|
| 170 |
+
# 移除括號內的特殊指令
|
| 171 |
+
msg["content"] = re.sub(r'\(請一定要用繁體中文回答.*?\)', '', msg["content"])
|
| 172 |
+
|
| 173 |
+
# 系統提示放在最前面
|
| 174 |
+
clean_messages = [{"role": "system", "content": system_prompt}]
|
| 175 |
+
# 添加其他訊息
|
| 176 |
+
for msg in messages:
|
| 177 |
+
if msg["role"] != "system": # 避免重複添加系統提示
|
| 178 |
+
clean_messages.append(msg)
|
| 179 |
+
|
| 180 |
+
# 在系統提示中添加 Markdown 和 LaTeX 格式指導
|
| 181 |
+
system_prompt += "\n\n重要:使用 LaTeX 數學符號時,請確保格式正確。數學表達式應該使用 $ 符號包圍,例如:$7 \\times 10^4$。不要使用 ** 符號來強調數字,而是使用 $ 符號,例如:$7$個萬 ($7 \\times 10000$)。不要使用 \\text 或 \\quad 等命令。"
|
| 182 |
+
|
| 183 |
+
# 根據服務類型選擇模型
|
| 184 |
+
model_name_dict = {
|
| 185 |
+
"perplexity_sonar": "sonar",
|
| 186 |
+
"perplexity_sonar_pro": "sonar-pro",
|
| 187 |
+
"perplexity_r1_1776": "r1-1776"
|
| 188 |
+
}
|
| 189 |
+
model = model_name_dict.get(service_type, "sonar")
|
| 190 |
|
| 191 |
print("======model======")
|
| 192 |
+
print(model)
|
| 193 |
+
print("======clean_messages======")
|
| 194 |
+
print(json.dumps(clean_messages[:1], ensure_ascii=False)) # 只打印系統提示的前部分
|
| 195 |
|
| 196 |
try:
|
| 197 |
perplexity_client = self.ai_client
|
| 198 |
+
|
| 199 |
+
# 針對 r1-1776 模型調整參數
|
| 200 |
+
if service_type == "perplexity_r1_1776":
|
| 201 |
+
# 增加 max_tokens 並添加特殊指令
|
| 202 |
+
response = perplexity_client.chat.completions.create(
|
| 203 |
+
model=model,
|
| 204 |
+
messages=clean_messages,
|
| 205 |
+
max_tokens=1000, # 增加 token 限制
|
| 206 |
+
temperature=0.7,
|
| 207 |
+
top_p=0.9
|
| 208 |
+
)
|
| 209 |
+
else:
|
| 210 |
+
response = perplexity_client.chat.completions.create(
|
| 211 |
+
model=model,
|
| 212 |
+
messages=clean_messages,
|
| 213 |
+
max_tokens=500,
|
| 214 |
+
temperature=0.7,
|
| 215 |
+
top_p=0.9
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# 檢查回應是否為空
|
| 219 |
+
if not hasattr(response, 'choices') or len(response.choices) == 0:
|
| 220 |
+
print("警告:API 回傳無效回應結構")
|
| 221 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
| 222 |
+
|
| 223 |
+
response_completion = response.choices[0].message.content
|
| 224 |
+
if not response_completion or response_completion.strip() == "":
|
| 225 |
+
print("警告:API 回傳空回應")
|
| 226 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
| 227 |
+
|
| 228 |
+
# 處理回應中的思考過程標籤和修正 LaTeX 格式
|
| 229 |
+
response_completion = self._process_response(response_completion)
|
| 230 |
+
|
| 231 |
+
# 打印處理後的回應以便調試
|
| 232 |
+
print("======processed_response======")
|
| 233 |
+
print(response_completion)
|
| 234 |
+
|
| 235 |
+
return response_completion.strip()
|
| 236 |
|
| 237 |
except Exception as e:
|
| 238 |
+
print(f"Perplexity API Error: {e}")
|
| 239 |
+
print(f"Error details: {str(e)}")
|
| 240 |
+
# 嘗試使用備用模型
|
| 241 |
+
try:
|
| 242 |
+
if service_type == "perplexity_r1_1776":
|
| 243 |
+
print("嘗試使用備用模型 sonar")
|
| 244 |
+
backup_response = perplexity_client.chat.completions.create(
|
| 245 |
+
model="sonar",
|
| 246 |
+
messages=clean_messages,
|
| 247 |
+
max_tokens=500,
|
| 248 |
+
temperature=0.7
|
| 249 |
+
)
|
| 250 |
+
backup_completion = backup_response.choices[0].message.content
|
| 251 |
+
backup_completion = self._process_response(backup_completion)
|
| 252 |
+
return backup_completion.strip()
|
| 253 |
+
except Exception as backup_error:
|
| 254 |
+
print(f"備用模型也失敗: {backup_error}")
|
| 255 |
+
|
| 256 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
| 257 |
+
|
| 258 |
+
def _process_response(self, response_text):
|
| 259 |
+
"""處理回應中的思考過程標籤和修正 LaTeX 格式"""
|
| 260 |
+
# 移除 <think>...</think> 區塊
|
| 261 |
+
import re
|
| 262 |
+
response_text = re.sub(r'<think>.*?</think>', '', response_text, flags=re.DOTALL)
|
| 263 |
+
|
| 264 |
+
# 移除其他可能的標籤或指令
|
| 265 |
+
response_text = re.sub(r'(偷偷說.*?)', '', response_text, flags=re.DOTALL)
|
| 266 |
+
|
| 267 |
+
# 修正 Markdown 格式
|
| 268 |
+
# 1. 確保項目符號前後有正確的空格和換行
|
| 269 |
+
response_text = re.sub(r'(\n|^)(\s*)([-•○●◦])\s*', r'\1\2\3 ', response_text)
|
| 270 |
+
|
| 271 |
+
# 2. 確保數字列表前後有正確的空格和換行
|
| 272 |
+
response_text = re.sub(r'(\n|^)(\s*)(\d+\.)\s*', r'\1\2\3 ', response_text)
|
| 273 |
+
|
| 274 |
+
# 3. 修正 LaTeX 格式
|
| 275 |
+
# 移除不正確的 LaTeX 命令
|
| 276 |
+
response_text = re.sub(r'\\text\{([^}]+)\}', r'\1', response_text)
|
| 277 |
+
response_text = re.sub(r'\\quad', ' ', response_text)
|
| 278 |
+
|
| 279 |
+
# 4. 修正數學表達式
|
| 280 |
+
# 確保數學表達式中的乘法符號格式正確
|
| 281 |
+
response_text = re.sub(r'(\d+)個「([^」]+)」→\s*(\d+)\\times(\d+)', r'\1個「\2」→ $\3\\times\4$', response_text)
|
| 282 |
+
|
| 283 |
+
# 5. 修正單獨數字的 LaTeX 格式
|
| 284 |
+
# 將單獨的數字包裹在 $ 符號中
|
| 285 |
+
response_text = re.sub(r'([^$\d])(\d+)([^$\d\w])', r'\1$\2$\3', response_text)
|
| 286 |
+
|
| 287 |
+
# 6. 修正連續的 LaTeX 表達式
|
| 288 |
+
# 確保連續的 LaTeX 表達式之間有空格
|
| 289 |
+
response_text = re.sub(r'\$([^$]+)\$\$([^$]+)\$', r'$\1$ $\2$', response_text)
|
| 290 |
+
|
| 291 |
+
# 7. 移除單獨的 $ 符號
|
| 292 |
+
response_text = re.sub(r'(?<!\$)\$(?!\$)\s*$', '', response_text)
|
| 293 |
+
response_text = re.sub(r'^\s*\$(?!\$)', '', response_text)
|
| 294 |
+
response_text = re.sub(r'(?<!\$)\$(?!\$)\s*\n', '\n', response_text)
|
| 295 |
+
|
| 296 |
+
# 8. 確保成對的 $ 符號
|
| 297 |
+
dollar_count = response_text.count('$')
|
| 298 |
+
if dollar_count % 2 != 0:
|
| 299 |
+
# 如果 $ 符號數量為奇數,移除最後一個 $
|
| 300 |
+
last_dollar_pos = response_text.rfind('$')
|
| 301 |
+
if last_dollar_pos != -1:
|
| 302 |
+
response_text = response_text[:last_dollar_pos] + response_text[last_dollar_pos+1:]
|
| 303 |
+
|
| 304 |
+
# 9. 修正錯誤的粗體標記
|
| 305 |
+
# 將 **數字** 格式修正為正確的數字格式
|
| 306 |
+
response_text = re.sub(r'\*\*(\d+)\*\*', r'$\1$', response_text)
|
| 307 |
+
|
| 308 |
+
# 如果處理後的回應為空,返回原始回應
|
| 309 |
+
if not response_text.strip():
|
| 310 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
| 311 |
+
|
| 312 |
+
return response_text
|
| 313 |
|