Song commited on
Commit
25c6441
·
1 Parent(s): 4125246
Files changed (1) hide show
  1. app.py +85 -51
app.py CHANGED
@@ -8,12 +8,19 @@ from typing import List, Dict, Any
8
  from contextlib import asynccontextmanager
9
  from fastapi import FastAPI, Request, HTTPException, status
10
  import uvicorn
11
- from linebot.v3.messaging import MessagingApi, TextMessage, ReplyMessageRequest # 更新:新增 ReplyMessageRequest
 
 
 
 
 
 
 
 
 
 
12
  from openai import OpenAI
13
- import hmac
14
- import base64
15
- import hashlib
16
- from ddgs import DDGS # 更新:從 ddgs 匯入 DDGS
17
 
18
  # ==== CONFIG (從環境變數載入,或使用預設值) ====
19
  def _require_env(var: str) -> str:
@@ -22,20 +29,25 @@ def _require_env(var: str) -> str:
22
  raise RuntimeError(f"FATAL: Missing required environment variable: {var}")
23
  return v
24
 
 
25
  CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
26
  CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
27
 
 
28
  LLM_API_CONFIG = {
29
- "base_url": "https://litellm-ekkks8gsocw.dgx-coolify.apmic.ai/",
30
- "api_key": "sk-eT_04m428oAPUD5kUmIhVA",
31
  }
 
 
32
  LLM_MODEL_CONFIG = {
33
- "model": "gpt-oss-120b",
34
- "max_tokens": int(os.getenv("MAX_TOKENS", 127000)),
35
  "temperature": float(os.getenv("TEMPERATURE", 0.3)),
36
  "seed": int(os.getenv("LLM_SEED", 42)),
37
  }
38
 
 
39
  SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的中文回覆用戶的問題。
40
  在回應時,不要使用 Markdown 格式,如粗體、斜體或標題。避免使用 * 號或任何特殊符號來格式化。
41
  使用條列式(如 - 或 1. 2. 3.)來整理內容,讓它適合在 LINE 上閱讀。
@@ -44,7 +56,7 @@ SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的中
44
  # ---------- 記憶體儲存對話歷史 ----------
45
  conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
46
 
47
- # ---------- Helper to split long message for LINE API ----------
48
  def split_text_for_line(text: str, max_length: int = 4800) -> List[str]:
49
  if len(text) <= max_length:
50
  return [text]
@@ -62,7 +74,7 @@ def split_text_for_line(text: str, max_length: int = 4800) -> List[str]:
62
 
63
  # ---------- 網路搜尋函數 ----------
64
  def perform_web_search(query: str, max_results: int = 5) -> str:
65
- """使用 DuckDuckGo 進行簡單的網路搜尋,並返回摘要結果。"""
66
  try:
67
  with DDGS() as ddgs:
68
  results = [r for r in ddgs.text(query, max_results=max_results)]
@@ -74,9 +86,10 @@ def perform_web_search(query: str, max_results: int = 5) -> str:
74
  search_summary += f"{i}. {result['title']}: {result['body'][:200]}... (來源: {result['href']})\n"
75
  return search_summary
76
  except Exception as e:
 
77
  return f"搜尋時發生錯誤:{str(e)}。請稍後再試。"
78
 
79
- # ---------- Chat Pipeline ----------
80
  class ChatPipeline:
81
  def __init__(self):
82
  if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
@@ -95,13 +108,14 @@ class ChatPipeline:
95
  content = response.choices[0].message.content or ""
96
  return content
97
  except Exception as e:
 
98
  raise
99
 
100
  def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
101
  return conversations.get(user_id, [])
102
 
103
  def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
104
- # 保留最近10條訊息
105
  if len(messages) > 10:
106
  messages = messages[-10:]
107
  conversations[user_id] = messages
@@ -111,25 +125,25 @@ class ChatPipeline:
111
  del conversations[user_id]
112
 
113
  def answer_question(self, user_id: str, user_text: str) -> str:
114
- if user_text.strip() == "/clear":
115
  self.clear_conversation_history(user_id)
116
  return "對話紀錄已清除!現在開始新的對話。"
117
 
118
- # 新增:進行網路搜尋
119
  search_results = perform_web_search(user_text)
120
 
121
  history = self.get_conversation_history(user_id)
122
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
123
  messages.extend(history)
124
  messages.append({"role": "user", "content": user_text})
125
- # 新增:將搜尋結果加入到訊息中,讓 LLM 參考
126
  messages.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
127
 
128
  response = self._llm_call(messages)
129
- response = response.replace('*', '') # 移除所有 '*' 符號
130
- # 更新歷史(包含搜尋結果以維持上下文)
 
131
  history.append({"role": "user", "content": user_text})
132
- history.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
 
133
  history.append({"role": "assistant", "content": response})
134
  self.update_conversation_history(user_id, history)
135
  return response
@@ -144,49 +158,69 @@ async def lifespan(app: FastAPI):
144
  app = FastAPI(lifespan=lifespan)
145
  chat_pipeline = None
146
 
147
- # LINE Bot API 更新:使用 v3 MessagingApi
148
- line_bot_api = MessagingApi(CHANNEL_ACCESS_TOKEN)
 
 
 
 
 
 
 
149
 
150
  # ---------- LINE Webhook 處理 ----------
151
  @app.post("/webhook")
152
  async def line_webhook(request: Request):
153
- signature = request.headers.get('X-Line-Signature', '')
 
154
  body = await request.body()
155
- hash_ = hmac.new(CHANNEL_SECRET.encode('utf-8'), body, hashlib.sha256).digest()
156
- expected_signature = base64.b64encode(hash_).decode()
157
- if not hmac.compare_digest(signature, expected_signature):
158
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid signature")
159
-
160
- body_str = body.decode('utf-8')
161
  try:
162
- payload = json.loads(body_str)
163
- except json.JSONDecodeError:
164
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid JSON")
165
-
166
- for event in payload.get('events', []):
167
- user_id = event['source']['userId']
168
- reply_token = event.get('replyToken')
169
- if event['type'] == 'message' and event['message']['type'] == 'text':
170
- user_text = event['message']['text'].strip()
171
- if not user_text:
172
- continue
173
- try:
174
- ai_response = chat_pipeline.answer_question(user_id, user_text)
175
- chunks = split_text_for_line(ai_response)
176
- messages_to_send = [TextMessage(text=chunk) for chunk in chunks]
177
- reply_request = ReplyMessageRequest(
 
 
 
 
 
 
 
 
178
  reply_token=reply_token,
179
- messages=messages_to_send[:5] # LINE5
180
  )
181
- line_bot_api.reply_message(reply_request)
182
- except Exception:
183
- error_message = "抱歉,系統發生錯誤,請稍後再試。"
184
- error_reply_request = ReplyMessageRequest(
 
 
 
185
  reply_token=reply_token,
186
  messages=[TextMessage(text=error_message)]
187
  )
188
- line_bot_api.reply_message(error_reply_request)
 
 
 
 
 
 
189
  return {"status": "ok"}
190
 
191
  if __name__ == "__main__":
192
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
8
  from contextlib import asynccontextmanager
9
  from fastapi import FastAPI, Request, HTTPException, status
10
  import uvicorn
11
+ # ----------------- LINE Bot SDK v3 修正引用方式 -----------------
12
+ from linebot.v3.messaging import (
13
+ AsyncApiClient,
14
+ AsyncMessagingApi,
15
+ Configuration,
16
+ TextMessage,
17
+ ReplyMessageRequest
18
+ )
19
+ from linebot.v3.webhook import WebhookParser
20
+ from linebot.v3.exceptions import InvalidSignatureError
21
+ # --------------------------------------------------------------
22
  from openai import OpenAI
23
+ from ddgs import DDGS
 
 
 
24
 
25
  # ==== CONFIG (從環境變數載入,或使用預設值) ====
26
  def _require_env(var: str) -> str:
 
29
  raise RuntimeError(f"FATAL: Missing required environment variable: {var}")
30
  return v
31
 
32
+ # 從環境變數讀取 LINE Bot 的憑證
33
  CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
34
  CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
35
 
36
+ # LLM API 設定
37
  LLM_API_CONFIG = {
38
+ "base_url": os.getenv("LLM_BASE_URL", "https://litellm-ekkks8gsocw.dgx-coolify.apmic.ai/"),
39
+ "api_key": os.getenv("LLM_API_KEY", "sk-eT_04m428oAPUD5kUmIhVA"),
40
  }
41
+
42
+ # LLM 模型設定
43
  LLM_MODEL_CONFIG = {
44
+ "model": os.getenv("LLM_MODEL", "gpt-oss-120b"),
45
+ "max_tokens": int(os.getenv("MAX_TOKENS", 4096)), # 調整為一個比較合理的預設值
46
  "temperature": float(os.getenv("TEMPERATURE", 0.3)),
47
  "seed": int(os.getenv("LLM_SEED", 42)),
48
  }
49
 
50
+ # 系統提示詞
51
  SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的中文回覆用戶的問題。
52
  在回應時,不要使用 Markdown 格式,如粗體、斜體或標題。避免使用 * 號或任何特殊符號來格式化。
53
  使用條列式(如 - 或 1. 2. 3.)來整理內容,讓它適合在 LINE 上閱讀。
 
56
  # ---------- 記憶體儲存對話歷史 ----------
57
  conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
58
 
59
+ # ---------- 將長訊息分割以符合 LINE API 限制 ----------
60
  def split_text_for_line(text: str, max_length: int = 4800) -> List[str]:
61
  if len(text) <= max_length:
62
  return [text]
 
74
 
75
  # ---------- 網路搜尋函數 ----------
76
  def perform_web_search(query: str, max_results: int = 5) -> str:
77
+ """使用 DuckDuckGo 進行網路搜尋,並返回摘要結果。"""
78
  try:
79
  with DDGS() as ddgs:
80
  results = [r for r in ddgs.text(query, max_results=max_results)]
 
86
  search_summary += f"{i}. {result['title']}: {result['body'][:200]}... (來源: {result['href']})\n"
87
  return search_summary
88
  except Exception as e:
89
+ print(f"Web search error: {e}")
90
  return f"搜尋時發生錯誤:{str(e)}。請稍後再試。"
91
 
92
+ # ---------- 聊天處理流程 ----------
93
  class ChatPipeline:
94
  def __init__(self):
95
  if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
 
108
  content = response.choices[0].message.content or ""
109
  return content
110
  except Exception as e:
111
+ print(f"LLM call error: {e}")
112
  raise
113
 
114
  def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
115
  return conversations.get(user_id, [])
116
 
117
  def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
118
+ # 保留最近10則訊息 (5組對話)
119
  if len(messages) > 10:
120
  messages = messages[-10:]
121
  conversations[user_id] = messages
 
125
  del conversations[user_id]
126
 
127
  def answer_question(self, user_id: str, user_text: str) -> str:
128
+ if user_text.strip().lower() == "/clear":
129
  self.clear_conversation_history(user_id)
130
  return "對話紀錄已清除!現在開始新的對話。"
131
 
 
132
  search_results = perform_web_search(user_text)
133
 
134
  history = self.get_conversation_history(user_id)
135
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
136
  messages.extend(history)
137
  messages.append({"role": "user", "content": user_text})
 
138
  messages.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
139
 
140
  response = self._llm_call(messages)
141
+ response = response.replace('*', '')
142
+
143
+ # 更新歷史紀錄
144
  history.append({"role": "user", "content": user_text})
145
+ # 為了節省 token,可以選擇不將搜尋結果存入歷史
146
+ # history.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
147
  history.append({"role": "assistant", "content": response})
148
  self.update_conversation_history(user_id, history)
149
  return response
 
158
  app = FastAPI(lifespan=lifespan)
159
  chat_pipeline = None
160
 
161
+ # ----------------- LINE Bot API v3 初始化修正 -----------------
162
+ # 建立一個 Configuration 物件,並傳入你的 Access Token
163
+ configuration = Configuration(access_token=CHANNEL_ACCESS_TOKEN)
164
+ # 使用 Configuration 物件來初始化 AsyncApiClient 和 AsyncMessagingApi
165
+ async_api_client = AsyncApiClient(configuration)
166
+ line_bot_api = AsyncMessagingApi(async_api_client)
167
+ # 建立 WebhookParser 來解析請求
168
+ parser = WebhookParser(CHANNEL_SECRET)
169
+ # --------------------------------------------------------------
170
 
171
  # ---------- LINE Webhook 處理 ----------
172
  @app.post("/webhook")
173
  async def line_webhook(request: Request):
174
+ # 驗證簽名
175
+ signature = request.headers['X-Line-Signature']
176
  body = await request.body()
 
 
 
 
 
 
177
  try:
178
+ events = parser.parse(body.decode(), signature)
179
+ except InvalidSignatureError:
180
+ raise HTTPException(status_code=400, detail="Invalid signature")
181
+
182
+ for event in events:
183
+ # 只處理文字訊息事件
184
+ if event.type != 'message' or event.message.type != 'text':
185
+ continue
186
+
187
+ user_id = event.source.user_id
188
+ reply_token = event.reply_token
189
+ user_text = event.message.text.strip()
190
+
191
+ if not user_text:
192
+ continue
193
+
194
+ try:
195
+ ai_response = chat_pipeline.answer_question(user_id, user_text)
196
+ chunks = split_text_for_line(ai_response)
197
+ messages_to_send = [TextMessage(text=chunk) for chunk in chunks]
198
+
199
+ # 使用 await 來呼叫非同步的 reply_message
200
+ await line_bot_api.reply_message(
201
+ ReplyMessageRequest(
202
  reply_token=reply_token,
203
+ messages=messages_to_send[:5] # LINE 一次最多回覆 5 則訊息
204
  )
205
+ )
206
+ except Exception as e:
207
+ print(f"Error processing message: {e}")
208
+ error_message = "抱歉,系統發生錯誤,請稍後再試。"
209
+ # 使用 await 來呼叫非同步的 reply_message
210
+ await line_bot_api.reply_message(
211
+ ReplyMessageRequest(
212
  reply_token=reply_token,
213
  messages=[TextMessage(text=error_message)]
214
  )
215
+ )
216
+
217
+ return {"status": "ok"}
218
+
219
+ # 健康檢查端點
220
+ @app.get("/health")
221
+ async def health_check():
222
  return {"status": "ok"}
223
 
224
  if __name__ == "__main__":
225
+ port = int(os.getenv("PORT", 7860))
226
+ uvicorn.run(app, host="0.0.0.0", port=port)