Song commited on
Commit
841b5e8
·
1 Parent(s): 1929e96
Files changed (1) hide show
  1. app.py +79 -113
app.py CHANGED
@@ -2,12 +2,12 @@
2
  # -*- coding: utf-8 -*-
3
  # ---------- 環境與快取設定 (應置於最前) ----------
4
  import os
5
- import time
6
- from typing import List, Dict, Any
7
  from contextlib import asynccontextmanager
8
- from fastapi import FastAPI, Request, HTTPException, status
9
  import uvicorn
10
- # ----------------- LINE Bot SDK v3 修正引用方式 -----------------
 
11
  from linebot.v3.messaging import (
12
  AsyncApiClient,
13
  AsyncMessagingApi,
@@ -17,40 +17,35 @@ from linebot.v3.messaging import (
17
  )
18
  from linebot.v3.webhook import WebhookParser
19
  from linebot.v3.exceptions import InvalidSignatureError
20
- # --------------------------------------------------------------
21
  from openai import OpenAI
22
- from tavily import TavilyClient # Tavily 客戶端
23
- from sentence_transformers import SentenceTransformer, util # 用於向量相似度排序(CPU友好)
 
24
 
25
- # ==== CONFIG (從環境變數載入,或使用預設值) ====
26
  def _require_env(var: str) -> str:
27
  v = os.getenv(var)
28
  if not v:
29
  raise RuntimeError(f"FATAL: Missing required environment variable: {var}")
30
  return v
31
 
32
- # 從環境變數讀取 LINE Bot 的憑證
33
  CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
34
  CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
35
-
36
- # Tavily API Key (強制從環境變數讀取,移除硬編碼)
37
  TAVILY_API_KEY = _require_env("TAVILY_API_KEY")
38
 
39
- # LLM API 設定(改用 OpenRouter)
40
  LLM_API_CONFIG = {
41
  "base_url": os.getenv("LLM_BASE_URL", "https://litellm-ekkks8gsocw.dgx-coolify.apmic.ai/"),
42
- "api_key": _require_env("OPENROUTER_API_KEY"), # 強制要求 OpenRouter API Key
43
  }
44
 
45
- # LLM 模型設定 (預設改用 gpt-4o,性價比高)
46
  LLM_MODEL_CONFIG = {
47
- "model": os.getenv("LLM_MODEL", 'gemini-3-pro'),
48
  "max_tokens": int(os.getenv("MAX_TOKENS", 2000)),
49
  "temperature": float(os.getenv("TEMPERATURE", 0.3)),
50
  "seed": int(os.getenv("LLM_SEED", 42)),
51
  }
52
 
53
- # 系統提示詞(保持原樣)
54
  SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的文字回覆用戶的問題。
55
  回答複雜問題時,先給概念,再給詳細解釋。
56
  使用條列式(如 - 或 1. 2. 3.)整理內容,讓它適合手機閱讀。
@@ -60,13 +55,11 @@ SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的文
60
  聖經問題從希伯來文角度回答,確認來源可靠性。
61
  回應盡量結構化,清晰。"""
62
 
63
- # ---------- 記憶體儲存對話歷史 ----------
64
- conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
 
65
 
66
- # ---------- 儲存待發送的剩餘 chunks ----------
67
- pending_chunks: Dict[str, List[str]] = {} # {user_id: remaining_chunks}
68
-
69
- # ---------- 將長訊息分割以符合 LINE API 限制 ----------
70
  def split_text_for_line(text: str, max_length: int = 4900) -> List[str]:
71
  if len(text) <= max_length:
72
  return [text]
@@ -82,109 +75,90 @@ def split_text_for_line(text: str, max_length: int = 4900) -> List[str]:
82
  text = text[split_pos:].lstrip()
83
  return chunks
84
 
85
- # ---------- 估算 token (粗略,用於限制輸入長度) ----------
86
  def estimate_tokens(messages: List[Dict[str, str]]) -> int:
87
  total = 0
88
  for msg in messages:
89
- total += len(msg["content"].split()) * 1.3 # 粗估 token
90
- return total
91
 
92
- # ---------- 網路搜尋函數(優化:嵌入模型由 ChatPipeline 預載) ----------
93
  def perform_web_search(query: str, max_results: int = 5) -> str:
94
- """使用 Tavily 進行網路搜尋,計算向量相似度排序結果,並返回摘要。"""
95
- print(f"開始網路搜尋:查詢詞 = '{query}',最大結果數 = {max_results}")
96
  try:
97
  client = TavilyClient(api_key=TAVILY_API_KEY)
98
  response = client.search(query, max_results=max_results, include_answer=True)
99
- if not response['results']:
100
- print("搜尋完成:沒有找到相關結果。")
101
  return "沒有找到相關的網路搜尋結果。"
102
 
103
- # 使用 ChatPipeline 中預載的 embedder
104
  embedder = chat_pipeline.embedder
105
  query_emb = embedder.encode(query)
106
 
107
  results_with_scores = []
108
  for result in response['results']:
109
- content = result['content']
110
- content_emb = embedder.encode(content)
111
  score = util.cos_sim(query_emb, content_emb)[0][0].item()
112
  results_with_scores.append((score, result))
113
 
114
  results_with_scores.sort(key=lambda x: x[0], reverse=True)
115
- relevant_results = [res for score, res in results_with_scores if score > 0.3]
116
 
117
- if not relevant_results:
118
- print("搜尋完成:沒有高度相關的結果。")
 
119
  return "沒有找到高度相關的網路搜尋結果。"
120
 
121
- search_summary = "以下是相關的網路搜尋結果摘要(已按文字相似度排序):\n"
122
  search_summary += f"AI總結:{response.get('answer', '無總結可用')}\n\n"
123
- for i, result in enumerate(relevant_results[:5], 1):
124
- score = results_with_scores[i-1][0]
125
- print(f"結果 {i}: 標題 = '{result['title']}',內容 = '{result['content'][:200]}...',來源 = '{result['url']}',相似度 = {score:.2f}")
126
- search_summary += f"{i}. {result['title']}: {result['content'][:200]}... (來源: {result['url']})\n"
127
- print(f"搜尋完成:總結果數 = {len(response['results'])}, 相關結果數 = {len(relevant_results)}")
128
  return search_summary
 
129
  except Exception as e:
130
  print(f"網路搜尋錯誤:{e}")
131
- return f"搜尋時發生錯誤:{str(e)}。請稍後再試。"
132
-
133
- # ---------- 聊天處理流程 (新增 retry 和 timeout) ----------
134
- from tenacity import retry, stop_after_attempt, wait_exponential
135
 
 
136
  class ChatPipeline:
137
  def __init__(self):
138
- if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
139
- raise ValueError("LLM API Key or Base URL is not configured.")
140
-
141
- # 預載入嵌入模型(大幅提升搜尋速度)
142
  self.embedder = SentenceTransformer('all-MiniLM-L6-v2')
143
-
144
- # OpenAI client(相容 OpenRouter,並加入建議 headers)
145
  self.llm_client = OpenAI(
146
  api_key=LLM_API_CONFIG["api_key"],
147
  base_url=LLM_API_CONFIG["base_url"],
148
  default_headers={
149
- "HTTP-Referer": os.getenv("SITE_URL", "https://your-line-bot.example.com"), # 建議設定你的網站域名
150
- "X-Title": os.getenv("SITE_NAME", "My LINE Bot"), # 建議設定 Bot 名稱
151
  }
152
  )
153
 
154
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
155
  def _llm_call(self, messages: List[Dict[str, str]]) -> str:
156
- try:
157
- token_est = estimate_tokens(messages)
158
- print(f"LLM 呼叫:估計 token = {token_est}")
159
- if token_est > 50000:
160
- raise ValueError("輸入過長,超過 token 限制")
161
-
162
- response = self.llm_client.chat.completions.create(
163
- model=LLM_MODEL_CONFIG["model"],
164
- messages=messages,
165
- max_tokens=LLM_MODEL_CONFIG["max_tokens"],
166
- temperature=LLM_MODEL_CONFIG["temperature"],
167
- seed=LLM_MODEL_CONFIG["seed"],
168
- timeout=30.0,
169
- )
170
- content = response.choices[0].message.content or ""
171
- return content
172
- except Exception as e:
173
- print(f"LLM call error (retry): {e}")
174
- raise
175
 
176
  def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
177
  return conversations.get(user_id, [])
178
 
179
  def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
180
- recent = messages[-20:]
181
- conversations[user_id] = recent
182
 
183
  def clear_conversation_history(self, user_id: str):
184
- if user_id in conversations:
185
- del conversations[user_id]
186
- if user_id in pending_chunks:
187
- del pending_chunks[user_id]
188
 
189
  def answer_question(self, user_id: str, user_text: str) -> str:
190
  if user_text.strip().lower() == "/clear":
@@ -197,6 +171,7 @@ class ChatPipeline:
197
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
198
  messages.extend(history)
199
  messages.append({"role": "user", "content": user_text})
 
200
  if "沒有找到" not in search_results:
201
  messages.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
202
 
@@ -209,15 +184,16 @@ class ChatPipeline:
209
 
210
  chunks = split_text_for_line(response)
211
  if len(chunks) > 5:
212
- summary_prompt = [{"role": "system", "content": "請將以下內容生成一個簡潔但完整的中文摘要,保留關鍵事實和細節,長度控制在2000字元內。"}]
213
- summary_prompt.append({"role": "user", "content": response})
214
- summary = self._llm_call(summary_prompt)
215
- summary = summary.replace('*', '')
216
- return summary + "\n(完整回應過長,已提供摘要。如需細節,請分次詢問。)"
 
217
 
218
  return response
219
 
220
- # ---------- FastAPI 應用程式設置 ----------
221
  @asynccontextmanager
222
  async def lifespan(app: FastAPI):
223
  global chat_pipeline
@@ -225,19 +201,18 @@ async def lifespan(app: FastAPI):
225
  yield
226
 
227
  app = FastAPI(lifespan=lifespan)
228
- chat_pipeline = None # 會在 lifespan 中初始化
229
 
230
- # ----------------- LINE Bot API v3 初始化 -----------------
231
  configuration = Configuration(access_token=CHANNEL_ACCESS_TOKEN)
232
  async_api_client = AsyncApiClient(configuration)
233
  line_bot_api = AsyncMessagingApi(async_api_client)
234
  parser = WebhookParser(CHANNEL_SECRET)
235
 
236
- # ---------- LINE Webhook 處理 ----------
237
  @app.post("/webhook")
238
  async def line_webhook(request: Request):
239
- signature = request.headers['X-Line-Signature']
240
  body = await request.body()
 
241
  try:
242
  events = parser.parse(body.decode(), signature)
243
  except InvalidSignatureError:
@@ -253,8 +228,9 @@ async def line_webhook(request: Request):
253
 
254
  if not user_text:
255
  continue
256
-
257
  try:
 
258
  if user_text.lower() == "繼續" and user_id in pending_chunks:
259
  remaining = pending_chunks[user_id]
260
  if not remaining:
@@ -264,54 +240,44 @@ async def line_webhook(request: Request):
264
  chunks_to_send = remaining[:send_count]
265
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks_to_send]
266
  if len(remaining) > send_count:
267
- messages_to_send[-1].text += "\n\n內容過長,請回覆 '繼續' 以查看下一部分。"
268
  pending_chunks[user_id] = remaining[send_count:]
269
  else:
270
  messages_to_send[-1].text += "\n\n內容已全部發送。"
271
- del pending_chunks[user_id]
272
- await line_bot_api.reply_message(
273
- ReplyMessageRequest(
274
- reply_token=reply_token,
275
- messages=messages_to_send
276
- )
277
- )
278
  continue
279
-
 
280
  ai_response = chat_pipeline.answer_question(user_id, user_text)
281
  chunks = split_text_for_line(ai_response)
282
-
283
  if len(chunks) <= 5:
284
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks]
285
  else:
286
  chunks_to_send = chunks[:5]
287
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks_to_send]
288
- messages_to_send[-1].text += "\n\n內容過長,請回覆 '繼續' 以查看下一部分。"
289
  pending_chunks[user_id] = chunks[5:]
290
-
291
- await line_bot_api.reply_message(
292
- ReplyMessageRequest(
293
- reply_token=reply_token,
294
- messages=messages_to_send
295
- )
296
- )
297
  except Exception as e:
298
  print(f"Error processing message: {e}")
299
- error_message = "抱歉,系統發生錯誤,請稍後再試。"
300
  await line_bot_api.reply_message(
301
  ReplyMessageRequest(
302
  reply_token=reply_token,
303
- messages=[TextMessage(text=error_message)]
304
  )
305
  )
306
-
307
  return {"status": "ok"}
308
 
309
- # 健康檢查端點
310
  @app.get("/health")
311
  async def health_check():
312
  return {"status": "ok"}
313
 
314
- # 根路由
315
  @app.get("/")
316
  async def root():
317
  return {"message": "LINE Bot is running"}
 
2
  # -*- coding: utf-8 -*-
3
  # ---------- 環境與快取設定 (應置於最前) ----------
4
  import os
5
+ from typing import List, Dict
 
6
  from contextlib import asynccontextmanager
7
+ from fastapi import FastAPI, Request, HTTPException
8
  import uvicorn
9
+
10
+ # ----------------- LINE Bot SDK v3 -----------------
11
  from linebot.v3.messaging import (
12
  AsyncApiClient,
13
  AsyncMessagingApi,
 
17
  )
18
  from linebot.v3.webhook import WebhookParser
19
  from linebot.v3.exceptions import InvalidSignatureError
20
+
21
  from openai import OpenAI
22
+ from tavily import TavilyClient
23
+ from sentence_transformers import SentenceTransformer, util
24
+ from tenacity import retry, stop_after_attempt, wait_exponential
25
 
26
+ # ==== CONFIG ====
27
  def _require_env(var: str) -> str:
28
  v = os.getenv(var)
29
  if not v:
30
  raise RuntimeError(f"FATAL: Missing required environment variable: {var}")
31
  return v
32
 
 
33
  CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
34
  CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
 
 
35
  TAVILY_API_KEY = _require_env("TAVILY_API_KEY")
36
 
 
37
  LLM_API_CONFIG = {
38
  "base_url": os.getenv("LLM_BASE_URL", "https://litellm-ekkks8gsocw.dgx-coolify.apmic.ai/"),
39
+ "api_key": _require_env("OPENROUTER_API_KEY"),
40
  }
41
 
 
42
  LLM_MODEL_CONFIG = {
43
+ "model": os.getenv("LLM_MODEL", "gemini-3-pro"),
44
  "max_tokens": int(os.getenv("MAX_TOKENS", 2000)),
45
  "temperature": float(os.getenv("TEMPERATURE", 0.3)),
46
  "seed": int(os.getenv("LLM_SEED", 42)),
47
  }
48
 
 
49
  SYSTEM_PROMPT = """你是一個友好的AI助手,請用簡單、親切的文字回覆用戶的問題。
50
  回答複雜問題時,先給概念,再給詳細解釋。
51
  使用條列式(如 - 或 1. 2. 3.)整理內容,讓它適合手機閱讀。
 
55
  聖經問題從希伯來文角度回答,確認來源可靠性。
56
  回應盡量結構化,清晰。"""
57
 
58
+ # ---------- 記憶體儲存 ----------
59
+ conversations: Dict[str, List[Dict[str, str]]] = {}
60
+ pending_chunks: Dict[str, List[str]] = {}
61
 
62
+ # ---------- 長訊息分割 ----------
 
 
 
63
  def split_text_for_line(text: str, max_length: int = 4900) -> List[str]:
64
  if len(text) <= max_length:
65
  return [text]
 
75
  text = text[split_pos:].lstrip()
76
  return chunks
77
 
78
+ # ---------- token 粗估 ----------
79
  def estimate_tokens(messages: List[Dict[str, str]]) -> int:
80
  total = 0
81
  for msg in messages:
82
+ total += len(msg["content"].split()) * 1.3
83
+ return int(total)
84
 
85
+ # ---------- 網路搜尋(已修復相似度 bug) ----------
86
  def perform_web_search(query: str, max_results: int = 5) -> str:
87
+ print(f"開始網路搜尋:查詢詞 = '{query}'")
 
88
  try:
89
  client = TavilyClient(api_key=TAVILY_API_KEY)
90
  response = client.search(query, max_results=max_results, include_answer=True)
91
+
92
+ if not response.get('results'):
93
  return "沒有找到相關的網路搜尋結果。"
94
 
 
95
  embedder = chat_pipeline.embedder
96
  query_emb = embedder.encode(query)
97
 
98
  results_with_scores = []
99
  for result in response['results']:
100
+ content_emb = embedder.encode(result['content'])
 
101
  score = util.cos_sim(query_emb, content_emb)[0][0].item()
102
  results_with_scores.append((score, result))
103
 
104
  results_with_scores.sort(key=lambda x: x[0], reverse=True)
 
105
 
106
+ relevant_with_scores = [item for item in results_with_scores if item[0] > 0.3]
107
+
108
+ if not relevant_with_scores:
109
  return "沒有找到高度相關的網路搜尋結果。"
110
 
111
+ search_summary = "以下是相關的網路搜尋結果摘要(已按相似度排序):\n"
112
  search_summary += f"AI總結:{response.get('answer', '無總結可用')}\n\n"
113
+
114
+ for i, (score, result) in enumerate(relevant_with_scores[:5], 1):
115
+ print(f"結果 {i}: 標題='{result['title']}',相似度={score:.2f},來源={result['url']}")
116
+ search_summary += f"{i}. [{score:.2f}] {result['title']}\n {result['content'][:300]}...\n 來源: {result['url']}\n\n"
117
+
118
  return search_summary
119
+
120
  except Exception as e:
121
  print(f"網路搜尋錯誤:{e}")
122
+ return "搜尋時發生錯誤,請稍後再試。"
 
 
 
123
 
124
+ # ---------- ChatPipeline ----------
125
  class ChatPipeline:
126
  def __init__(self):
 
 
 
 
127
  self.embedder = SentenceTransformer('all-MiniLM-L6-v2')
 
 
128
  self.llm_client = OpenAI(
129
  api_key=LLM_API_CONFIG["api_key"],
130
  base_url=LLM_API_CONFIG["base_url"],
131
  default_headers={
132
+ "HTTP-Referer": os.getenv("SITE_URL", "https://your-line-bot.example.com"),
133
+ "X-Title": os.getenv("SITE_NAME", "My LINE Bot"),
134
  }
135
  )
136
 
137
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
138
  def _llm_call(self, messages: List[Dict[str, str]]) -> str:
139
+ token_est = estimate_tokens(messages)
140
+ if token_est > 50000:
141
+ raise ValueError("輸入過長")
142
+
143
+ response = self.llm_client.chat.completions.create(
144
+ model=LLM_MODEL_CONFIG["model"],
145
+ messages=messages,
146
+ max_tokens=LLM_MODEL_CONFIG["max_tokens"],
147
+ temperature=LLM_MODEL_CONFIG["temperature"],
148
+ seed=LLM_MODEL_CONFIG["seed"],
149
+ timeout=30.0,
150
+ )
151
+ return response.choices[0].message.content or ""
 
 
 
 
 
 
152
 
153
  def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
154
  return conversations.get(user_id, [])
155
 
156
  def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
157
+ conversations[user_id] = messages[-20:]
 
158
 
159
  def clear_conversation_history(self, user_id: str):
160
+ conversations.pop(user_id, None)
161
+ pending_chunks.pop(user_id, None)
 
 
162
 
163
  def answer_question(self, user_id: str, user_text: str) -> str:
164
  if user_text.strip().lower() == "/clear":
 
171
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
172
  messages.extend(history)
173
  messages.append({"role": "user", "content": user_text})
174
+
175
  if "沒有找到" not in search_results:
176
  messages.append({"role": "system", "content": f"網路搜尋結果:{search_results}"})
177
 
 
184
 
185
  chunks = split_text_for_line(response)
186
  if len(chunks) > 5:
187
+ summary_prompt = [
188
+ {"role": "system", "content": "請將以下內容生成一個簡潔但完整的中文摘要,保留關鍵事實和細節,長度控制在2000字元內。"},
189
+ {"role": "user", "content": response}
190
+ ]
191
+ summary = self._llm_call(summary_prompt).replace('*', '')
192
+ return summary + "\n\n(完整回應過長,已提供摘要。如需細節,請分次詢問或回覆「繼續」)"
193
 
194
  return response
195
 
196
+ # ---------- FastAPI ----------
197
  @asynccontextmanager
198
  async def lifespan(app: FastAPI):
199
  global chat_pipeline
 
201
  yield
202
 
203
  app = FastAPI(lifespan=lifespan)
204
+ chat_pipeline = None
205
 
 
206
  configuration = Configuration(access_token=CHANNEL_ACCESS_TOKEN)
207
  async_api_client = AsyncApiClient(configuration)
208
  line_bot_api = AsyncMessagingApi(async_api_client)
209
  parser = WebhookParser(CHANNEL_SECRET)
210
 
 
211
  @app.post("/webhook")
212
  async def line_webhook(request: Request):
213
+ signature = request.headers.get('X-Line-Signature', '')
214
  body = await request.body()
215
+
216
  try:
217
  events = parser.parse(body.decode(), signature)
218
  except InvalidSignatureError:
 
228
 
229
  if not user_text:
230
  continue
231
+
232
  try:
233
+ # 處理「繼續」
234
  if user_text.lower() == "繼續" and user_id in pending_chunks:
235
  remaining = pending_chunks[user_id]
236
  if not remaining:
 
240
  chunks_to_send = remaining[:send_count]
241
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks_to_send]
242
  if len(remaining) > send_count:
243
+ messages_to_send[-1].text += "\n\n內容過長,請回覆「繼續」查看下一部分。"
244
  pending_chunks[user_id] = remaining[send_count:]
245
  else:
246
  messages_to_send[-1].text += "\n\n內容已全部發送。"
247
+ pending_chunks.pop(user_id, None)
248
+
249
+ await line_bot_api.reply_message(ReplyMessageRequest(reply_token=reply_token, messages=messages_to_send))
 
 
 
 
250
  continue
251
+
252
+ # 正常回應
253
  ai_response = chat_pipeline.answer_question(user_id, user_text)
254
  chunks = split_text_for_line(ai_response)
255
+
256
  if len(chunks) <= 5:
257
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks]
258
  else:
259
  chunks_to_send = chunks[:5]
260
  messages_to_send = [TextMessage(text=chunk) for chunk in chunks_to_send]
261
+ messages_to_send[-1].text += "\n\n內容過長,請回覆「繼續」查看下一部分。"
262
  pending_chunks[user_id] = chunks[5:]
263
+
264
+ await line_bot_api.reply_message(ReplyMessageRequest(reply_token=reply_token, messages=messages_to_send))
265
+
 
 
 
 
266
  except Exception as e:
267
  print(f"Error processing message: {e}")
 
268
  await line_bot_api.reply_message(
269
  ReplyMessageRequest(
270
  reply_token=reply_token,
271
+ messages=[TextMessage(text="抱歉,系統發生錯誤,請稍後再試。")]
272
  )
273
  )
274
+
275
  return {"status": "ok"}
276
 
 
277
  @app.get("/health")
278
  async def health_check():
279
  return {"status": "ok"}
280
 
 
281
  @app.get("/")
282
  async def root():
283
  return {"message": "LINE Bot is running"}