Song
commited on
Commit
·
98feb3e
1
Parent(s):
6b22529
0926
Browse files- app.py +51 -32
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -21,7 +21,7 @@ from linebot.v3.exceptions import InvalidSignatureError
|
|
| 21 |
# --------------------------------------------------------------
|
| 22 |
from openai import OpenAI
|
| 23 |
from tavily import TavilyClient # 新增 Tavily 客戶端
|
| 24 |
-
from sentence_transformers import SentenceTransformer, util #
|
| 25 |
|
| 26 |
# ==== CONFIG (從環境變數載入,或使用預設值) ====
|
| 27 |
def _require_env(var: str) -> str:
|
|
@@ -34,8 +34,8 @@ def _require_env(var: str) -> str:
|
|
| 34 |
CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
|
| 35 |
CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
TAVILY_API_KEY = "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"
|
| 39 |
|
| 40 |
# LLM API 設定
|
| 41 |
LLM_API_CONFIG = {
|
|
@@ -43,25 +43,22 @@ LLM_API_CONFIG = {
|
|
| 43 |
"api_key": os.getenv("LLM_API_KEY", "sk-eT_04m428oAPUD5kUmIhVA"),
|
| 44 |
}
|
| 45 |
|
| 46 |
-
# LLM 模型設定
|
| 47 |
LLM_MODEL_CONFIG = {
|
| 48 |
-
"model": os.getenv("LLM_MODEL", "gpt-
|
| 49 |
-
"max_tokens": int(os.getenv("MAX_TOKENS",
|
| 50 |
"temperature": float(os.getenv("TEMPERATURE", 0.3)),
|
| 51 |
"seed": int(os.getenv("LLM_SEED", 42)),
|
| 52 |
}
|
| 53 |
|
| 54 |
-
#
|
| 55 |
SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的文字回覆用戶的問題。
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
聖經問題一律從希伯來文的角度出發回答,並確認來源的可靠性。
|
| 63 |
-
如果回應內容可能很長,請優先提供完整且正確的資訊,但盡量結構化以保持清晰。如果預估超過20000字元,請先給詳細摘要,然後分段說明細節。
|
| 64 |
-
"""
|
| 65 |
|
| 66 |
# ---------- 記憶體儲存對話歷史 ----------
|
| 67 |
conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
|
|
@@ -85,9 +82,16 @@ def split_text_for_line(text: str, max_length: int = 4900) -> List[str]:
|
|
| 85 |
text = text[split_pos:].lstrip()
|
| 86 |
return chunks
|
| 87 |
|
| 88 |
-
# ----------
|
| 89 |
-
def
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
print(f"開始網路搜尋:查詢詞 = '{query}',最大結果數 = {max_results}")
|
| 92 |
try:
|
| 93 |
client = TavilyClient(api_key=TAVILY_API_KEY)
|
|
@@ -96,11 +100,11 @@ def perform_web_search(query: str, max_results: int = 10) -> str:
|
|
| 96 |
print("搜尋完成:沒有找到相關結果。")
|
| 97 |
return "沒有找到相關的網路搜尋結果。"
|
| 98 |
|
| 99 |
-
# 加載輕量嵌入模型(CPU
|
| 100 |
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 101 |
query_emb = embedder.encode(query)
|
| 102 |
|
| 103 |
-
# 計算每個結果的相似度
|
| 104 |
results_with_scores = []
|
| 105 |
for result in response['results']:
|
| 106 |
content = result['content']
|
|
@@ -108,7 +112,7 @@ def perform_web_search(query: str, max_results: int = 10) -> str:
|
|
| 108 |
score = util.cos_sim(query_emb, content_emb)[0][0].item()
|
| 109 |
results_with_scores.append((score, result))
|
| 110 |
|
| 111 |
-
# 排序並過濾相似度 > 0.
|
| 112 |
results_with_scores.sort(key=lambda x: x[0], reverse=True)
|
| 113 |
relevant_results = [res for score, res in results_with_scores if score > 0.3]
|
| 114 |
|
|
@@ -116,9 +120,9 @@ def perform_web_search(query: str, max_results: int = 10) -> str:
|
|
| 116 |
print("搜尋完成:沒有高度相關的結果。")
|
| 117 |
return "沒有找到高度相關的網路搜尋結果。"
|
| 118 |
|
| 119 |
-
search_summary = "
|
| 120 |
search_summary += f"AI總結:{response.get('answer', '無總結可用')}\n\n"
|
| 121 |
-
for i, result in enumerate(relevant_results, 1):
|
| 122 |
print(f"結果 {i}: 標題 = '{result['title']}',內容 = '{result['content'][:200]}...',來源 = '{result['url']}',相似度 = {results_with_scores[i-1][0]:.2f}")
|
| 123 |
search_summary += f"{i}. {result['title']}: {result['content'][:200]}... (來源: {result['url']})\n"
|
| 124 |
print(f"搜尋完成:總結果數 = {len(response['results'])}, 相關結果數 = {len(relevant_results)}")
|
|
@@ -127,34 +131,45 @@ def perform_web_search(query: str, max_results: int = 10) -> str:
|
|
| 127 |
print(f"網路搜尋錯誤:{e}")
|
| 128 |
return f"搜尋時發生錯誤:{str(e)}。請稍後再試。"
|
| 129 |
|
| 130 |
-
# ---------- 聊天處理流程 ----------
|
|
|
|
|
|
|
| 131 |
class ChatPipeline:
|
| 132 |
def __init__(self):
|
| 133 |
if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
|
| 134 |
raise ValueError("LLM API Key or Base URL is not configured.")
|
| 135 |
self.llm_client = OpenAI(api_key=LLM_API_CONFIG["api_key"], base_url=LLM_API_CONFIG["base_url"])
|
| 136 |
|
|
|
|
| 137 |
def _llm_call(self, messages: List[Dict[str, str]]) -> str:
|
| 138 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
response = self.llm_client.chat.completions.create(
|
| 140 |
model=LLM_MODEL_CONFIG["model"],
|
| 141 |
messages=messages,
|
| 142 |
max_tokens=LLM_MODEL_CONFIG["max_tokens"],
|
| 143 |
temperature=LLM_MODEL_CONFIG["temperature"],
|
| 144 |
seed=LLM_MODEL_CONFIG["seed"],
|
|
|
|
| 145 |
)
|
| 146 |
content = response.choices[0].message.content or ""
|
| 147 |
return content
|
| 148 |
except Exception as e:
|
| 149 |
-
print(f"LLM call error: {e}")
|
| 150 |
raise
|
| 151 |
|
| 152 |
def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
|
| 153 |
return conversations.get(user_id, [])
|
| 154 |
|
| 155 |
def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
|
| 156 |
-
#
|
| 157 |
-
|
|
|
|
| 158 |
|
| 159 |
def clear_conversation_history(self, user_id: str):
|
| 160 |
if user_id in conversations:
|
|
@@ -173,19 +188,18 @@ class ChatPipeline:
|
|
| 173 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 174 |
messages.extend(history)
|
| 175 |
messages.append({"role": "user", "content": user_text})
|
| 176 |
-
|
|
|
|
| 177 |
|
| 178 |
response = self._llm_call(messages)
|
| 179 |
response = response.replace('*', '')
|
| 180 |
|
| 181 |
# 更新歷史紀錄
|
| 182 |
history.append({"role": "user", "content": user_text})
|
| 183 |
-
# 為了節省 token,可以選擇不將搜尋結果存入歷史
|
| 184 |
-
# history.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
|
| 185 |
history.append({"role": "assistant", "content": response})
|
| 186 |
self.update_conversation_history(user_id, history)
|
| 187 |
|
| 188 |
-
# 如果回應過長,檢查 chunks 數量,如果超過5
|
| 189 |
chunks = split_text_for_line(response)
|
| 190 |
if len(chunks) > 5:
|
| 191 |
summary_prompt = [{"role": "system", "content": "請將以下內容生成一個簡潔但完整的中文摘要,保留關鍵事實和細節,長度控制在20000字元內。"}]
|
|
@@ -301,6 +315,11 @@ async def line_webhook(request: Request):
|
|
| 301 |
async def health_check():
|
| 302 |
return {"status": "ok"}
|
| 303 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
if __name__ == "__main__":
|
| 305 |
port = int(os.getenv("PORT", 7860))
|
| 306 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
|
| 21 |
# --------------------------------------------------------------
|
| 22 |
from openai import OpenAI
|
| 23 |
from tavily import TavilyClient # 新增 Tavily 客戶端
|
| 24 |
+
from sentence_transformers import SentenceTransformer, util # 新增用於向量相似度排序(CPU友好)
|
| 25 |
|
| 26 |
# ==== CONFIG (從環境變數載入,或使用預設值) ====
|
| 27 |
def _require_env(var: str) -> str:
|
|
|
|
| 34 |
CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
|
| 35 |
CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
|
| 36 |
|
| 37 |
+
# Tavily API Key (從環境變數讀取以確保安全)
|
| 38 |
+
TAVILY_API_KEY = _require_env("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")
|
| 39 |
|
| 40 |
# LLM API 設定
|
| 41 |
LLM_API_CONFIG = {
|
|
|
|
| 43 |
"api_key": os.getenv("LLM_API_KEY", "sk-eT_04m428oAPUD5kUmIhVA"),
|
| 44 |
}
|
| 45 |
|
| 46 |
+
# LLM 模型設定 (改用 azure-gpt-4.1,降低 max_tokens 以避免超時)
|
| 47 |
LLM_MODEL_CONFIG = {
|
| 48 |
+
"model": os.getenv("LLM_MODEL", "azure-gpt-4.1"),
|
| 49 |
+
"max_tokens": int(os.getenv("MAX_TOKENS", 2000)), # 降低上限以提升回應速度
|
| 50 |
"temperature": float(os.getenv("TEMPERATURE", 0.3)),
|
| 51 |
"seed": int(os.getenv("LLM_SEED", 42)),
|
| 52 |
}
|
| 53 |
|
| 54 |
+
# 系統提示詞(精簡版,強調使用最新資料)
|
| 55 |
SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的文字回覆用戶的問題。
|
| 56 |
+
回答複雜問題時,先給概念,再給詳細解釋。
|
| 57 |
+
使用條列式(如 - 或 1. 2. 3.)整理內容,讓它適合手機閱讀。
|
| 58 |
+
使用提供的網路搜尋結果作為參考,只用高度相關結果。如果新資料比訓練資料新,以新資料為主並驗證正確性。
|
| 59 |
+
無相關資料時,用知識回答;無知識時,說不知道。
|
| 60 |
+
聖經問題從希伯來文角度回答,確認來源可靠性。
|
| 61 |
+
回應盡量結構化,清晰。"""
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# ---------- 記憶體儲存對話歷史 ----------
|
| 64 |
conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
|
|
|
|
| 82 |
text = text[split_pos:].lstrip()
|
| 83 |
return chunks
|
| 84 |
|
| 85 |
+
# ---------- 估算 token 數 (粗略,用於限制輸入長度) ----------
|
| 86 |
+
def estimate_tokens(messages: List[Dict[str, str]]) -> int:
|
| 87 |
+
total = 0
|
| 88 |
+
for msg in messages:
|
| 89 |
+
total += len(msg["content"].split()) * 1.3 # 粗估 token
|
| 90 |
+
return total
|
| 91 |
+
|
| 92 |
+
# ---------- 網路搜尋函數(Tavily API,top 5 結果,向量相似度排序,CPU 環境友好) ----------
|
| 93 |
+
def perform_web_search(query: str, max_results: int = 5) -> str: # 改為 top 5
|
| 94 |
+
"""使用 Tavily 進行網路搜尋,計算向量相似度(文字意義)排序結果,並返回摘要。同時 log/print 檢索過程。"""
|
| 95 |
print(f"開始網路搜尋:查詢詞 = '{query}',最大結果數 = {max_results}")
|
| 96 |
try:
|
| 97 |
client = TavilyClient(api_key=TAVILY_API_KEY)
|
|
|
|
| 100 |
print("搜尋完成:沒有找到相關結果。")
|
| 101 |
return "沒有找到相關的網路搜尋結果。"
|
| 102 |
|
| 103 |
+
# 加載輕量嵌入模型(all-MiniLM-L6-v2,CPU 友好,無 GPU 依賴)
|
| 104 |
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 105 |
query_emb = embedder.encode(query)
|
| 106 |
|
| 107 |
+
# 計算每個結果的相似度 (文字意義排序)
|
| 108 |
results_with_scores = []
|
| 109 |
for result in response['results']:
|
| 110 |
content = result['content']
|
|
|
|
| 112 |
score = util.cos_sim(query_emb, content_emb)[0][0].item()
|
| 113 |
results_with_scores.append((score, result))
|
| 114 |
|
| 115 |
+
# 排序並過濾相似度 > 0.3 的結果(確保相關性)
|
| 116 |
results_with_scores.sort(key=lambda x: x[0], reverse=True)
|
| 117 |
relevant_results = [res for score, res in results_with_scores if score > 0.3]
|
| 118 |
|
|
|
|
| 120 |
print("搜尋完成:沒有高度相關的結果。")
|
| 121 |
return "沒有找到高度相關的網路搜尋結果。"
|
| 122 |
|
| 123 |
+
search_summary = "以下是相關的網路搜尋結果摘要(已按文字相似度排序):\n"
|
| 124 |
search_summary += f"AI總結:{response.get('answer', '無總結可用')}\n\n"
|
| 125 |
+
for i, result in enumerate(relevant_results[:5], 1): # 限制 top 5
|
| 126 |
print(f"結果 {i}: 標題 = '{result['title']}',內容 = '{result['content'][:200]}...',來源 = '{result['url']}',相似度 = {results_with_scores[i-1][0]:.2f}")
|
| 127 |
search_summary += f"{i}. {result['title']}: {result['content'][:200]}... (來源: {result['url']})\n"
|
| 128 |
print(f"搜尋完成:總結果數 = {len(response['results'])}, 相關結果數 = {len(relevant_results)}")
|
|
|
|
| 131 |
print(f"網路搜尋錯誤:{e}")
|
| 132 |
return f"搜尋時發生錯誤:{str(e)}。請稍後再試。"
|
| 133 |
|
| 134 |
+
# ---------- 聊天處理流程 (新增 retry 和 timeout) ----------
|
| 135 |
+
from tenacity import retry, stop_after_attempt, wait_exponential # 需要 pip install tenacity
|
| 136 |
+
|
| 137 |
class ChatPipeline:
|
| 138 |
def __init__(self):
|
| 139 |
if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
|
| 140 |
raise ValueError("LLM API Key or Base URL is not configured.")
|
| 141 |
self.llm_client = OpenAI(api_key=LLM_API_CONFIG["api_key"], base_url=LLM_API_CONFIG["base_url"])
|
| 142 |
|
| 143 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
| 144 |
def _llm_call(self, messages: List[Dict[str, str]]) -> str:
|
| 145 |
try:
|
| 146 |
+
# 估算 token 並 print 監控
|
| 147 |
+
token_est = estimate_tokens(messages)
|
| 148 |
+
print(f"LLM 呼叫:估計 token = {token_est}")
|
| 149 |
+
if token_est > 50000:
|
| 150 |
+
raise ValueError("輸入過長,超過 token 限制")
|
| 151 |
+
|
| 152 |
response = self.llm_client.chat.completions.create(
|
| 153 |
model=LLM_MODEL_CONFIG["model"],
|
| 154 |
messages=messages,
|
| 155 |
max_tokens=LLM_MODEL_CONFIG["max_tokens"],
|
| 156 |
temperature=LLM_MODEL_CONFIG["temperature"],
|
| 157 |
seed=LLM_MODEL_CONFIG["seed"],
|
| 158 |
+
timeout=30.0, # 30 秒 timeout
|
| 159 |
)
|
| 160 |
content = response.choices[0].message.content or ""
|
| 161 |
return content
|
| 162 |
except Exception as e:
|
| 163 |
+
print(f"LLM call error (retry): {e}")
|
| 164 |
raise
|
| 165 |
|
| 166 |
def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
|
| 167 |
return conversations.get(user_id, [])
|
| 168 |
|
| 169 |
def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
|
| 170 |
+
# 限制歷史:保留最近 20 條訊息 (約 10 輪)
|
| 171 |
+
recent = messages[-20:]
|
| 172 |
+
conversations[user_id] = recent
|
| 173 |
|
| 174 |
def clear_conversation_history(self, user_id: str):
|
| 175 |
if user_id in conversations:
|
|
|
|
| 188 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 189 |
messages.extend(history)
|
| 190 |
messages.append({"role": "user", "content": user_text})
|
| 191 |
+
if "沒有找到" not in search_results: # 只在有結果時加入
|
| 192 |
+
messages.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
|
| 193 |
|
| 194 |
response = self._llm_call(messages)
|
| 195 |
response = response.replace('*', '')
|
| 196 |
|
| 197 |
# 更新歷史紀錄
|
| 198 |
history.append({"role": "user", "content": user_text})
|
|
|
|
|
|
|
| 199 |
history.append({"role": "assistant", "content": response})
|
| 200 |
self.update_conversation_history(user_id, history)
|
| 201 |
|
| 202 |
+
# 如果回應過長,檢查 chunks 數量,如果超過5,生成摘要
|
| 203 |
chunks = split_text_for_line(response)
|
| 204 |
if len(chunks) > 5:
|
| 205 |
summary_prompt = [{"role": "system", "content": "請將以下內容生成一個簡潔但完整的中文摘要,保留關鍵事實和細節,長度控制在20000字元內。"}]
|
|
|
|
| 315 |
async def health_check():
|
| 316 |
return {"status": "ok"}
|
| 317 |
|
| 318 |
+
# 根路由,避免 404
|
| 319 |
+
@app.get("/")
|
| 320 |
+
async def root():
|
| 321 |
+
return {"message": "LINE Bot is running"}
|
| 322 |
+
|
| 323 |
if __name__ == "__main__":
|
| 324 |
port = int(os.getenv("PORT", 7860))
|
| 325 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
requirements.txt
CHANGED
|
@@ -3,4 +3,5 @@ uvicorn
|
|
| 3 |
line-bot-sdk
|
| 4 |
openai
|
| 5 |
tavily-python
|
| 6 |
-
sentence-transformers
|
|
|
|
|
|
| 3 |
line-bot-sdk
|
| 4 |
openai
|
| 5 |
tavily-python
|
| 6 |
+
sentence-transformers
|
| 7 |
+
tenacity
|