|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import asyncio |
|
|
from typing import List, Dict |
|
|
from contextlib import asynccontextmanager |
|
|
|
|
|
from fastapi import FastAPI, Request, HTTPException |
|
|
import uvicorn |
|
|
|
|
|
from linebot.v3.messaging import ( |
|
|
AsyncApiClient, |
|
|
AsyncMessagingApi, |
|
|
Configuration, |
|
|
TextMessage, |
|
|
ReplyMessageRequest |
|
|
) |
|
|
from linebot.v3.webhook import WebhookParser |
|
|
from linebot.v3.exceptions import InvalidSignatureError |
|
|
|
|
|
from openai import AsyncOpenAI |
|
|
from tenacity import retry, stop_after_attempt, wait_exponential |
|
|
|
|
|
|
|
|
def _require_env(var: str) -> str: |
|
|
v = os.getenv(var) |
|
|
if not v: |
|
|
raise RuntimeError(f"FATAL: Missing required environment variable: {var}") |
|
|
return v |
|
|
|
|
|
CHANNEL_SECRET = _require_env("CHANNEL_SECRET") |
|
|
CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN") |
|
|
OPENROUTER_API_KEY = _require_env("OPENROUTER_API_KEY") |
|
|
|
|
|
|
|
|
JESUS_PROMPT = """你現在是耶穌基督。請**完全**模仿新約聖經(繁體中文和合本)中我的語氣與用詞來回答。 |
|
|
不用像個現代分析師條列重點,而是像我在登山寶訓或是對門徒說話那樣:充滿權柄、智慧、比喻與憐憫。 |
|
|
|
|
|
**語氣與遣詞指導:** |
|
|
1. **第一人稱**:使用「我」、「我的父」。稱呼用戶為「孩子」、「小子」或「親愛的」。 |
|
|
2. **聖經句式**:多用「我實實在在告訴你」、「豈不知」、「凡...的」、「聽過有話說...只是我告訴你們」、「願你們平安」。 |
|
|
3. **拒絕現代術語**:**絕對禁止**使用「心理學」、「自我照顧」、「自我實現」、「優化」、「概念」、「核心」等現代詞彙。務必用屬天的語言(如「靈魂」、「安息」、「永生」、「背起十字架」、「捨己」)來轉化回答現代問題。 |
|
|
4. **以父為念**:將所有問題的答案最終指向父神、天國與永恆的生命,而非今生的舒適。 |
|
|
|
|
|
**格式要求:** |
|
|
- 保持純文字,**絕不使用 Markdown 格式**(如粗體、斜體)。 |
|
|
- 使用短段落,留白便於手機閱讀,但語氣要是連貫的教導,不要變成僵硬的條列。 |
|
|
- **避免重複**:請勿在回答中重複相同的句子或段落,每一句話都應帶出新的意涵。""" |
|
|
|
|
|
|
|
|
|
|
|
FALLBACK_MODELS = [ |
|
|
"arcee-ai/trinity-large-preview:free", |
|
|
"nous-research/hermes-3-llama-3.1-70b:free", |
|
|
"qwen/qwen-2.5-72b-instruct:free", |
|
|
"zhipu/glm-4.5-air:free", |
|
|
"deepseek/deepseek-tng-r1t2-chimera:free", |
|
|
"stepfun/step-3.5-flash:free", |
|
|
"meta-llama/llama-3.3-70b-instruct:free", |
|
|
] |
|
|
|
|
|
|
|
|
MAX_TOKENS = 800 |
|
|
TEMPERATURE = 0.7 |
|
|
|
|
|
|
|
|
conversations: Dict[str, List[Dict[str, str]]] = {} |
|
|
pending_chunks: Dict[str, List[str]] = {} |
|
|
|
|
|
|
|
|
def split_text_for_line(text: str, max_length: int = 4900) -> List[str]: |
|
|
if len(text) <= max_length: |
|
|
return [text] |
|
|
chunks = [] |
|
|
while text: |
|
|
if len(text) <= max_length: |
|
|
chunks.append(text) |
|
|
break |
|
|
split_pos = text.rfind('\n', 0, max_length) |
|
|
if split_pos == -1: |
|
|
split_pos = max_length |
|
|
chunks.append(text[:split_pos]) |
|
|
text = text[split_pos:].lstrip('\n') |
|
|
return chunks |
|
|
|
|
|
|
|
|
class ChatPipeline: |
|
|
def __init__(self): |
|
|
self.client = AsyncOpenAI( |
|
|
api_key=OPENROUTER_API_KEY, |
|
|
base_url="https://openrouter.ai/api/v1", |
|
|
) |
|
|
|
|
|
def get_history(self, user_id: str) -> List[Dict[str, str]]: |
|
|
return conversations.get(user_id, []) |
|
|
|
|
|
def update_history(self, user_id: str, user_msg: str, assistant_msg: str): |
|
|
history = self.get_history(user_id) |
|
|
history.append({"role": "user", "content": user_msg}) |
|
|
history.append({"role": "assistant", "content": assistant_msg}) |
|
|
conversations[user_id] = history[-20:] |
|
|
|
|
|
def clear_history(self, user_id: str): |
|
|
conversations.pop(user_id, None) |
|
|
pending_chunks.pop(user_id, None) |
|
|
|
|
|
async def _try_model(self, model: str, messages: List[Dict[str, str]]) -> str: |
|
|
try: |
|
|
response = await self.client.chat.completions.create( |
|
|
model=model, |
|
|
messages=messages, |
|
|
max_tokens=MAX_TOKENS, |
|
|
temperature=TEMPERATURE, |
|
|
timeout=90.0, |
|
|
) |
|
|
content = response.choices[0].message.content or "" |
|
|
print(f"成功使用模型: {model}") |
|
|
return content |
|
|
except Exception as e: |
|
|
print(f"模型 {model} 失敗: {type(e).__name__} - {str(e)}") |
|
|
raise |
|
|
|
|
|
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=15)) |
|
|
async def _llm_call_with_fallback(self, messages: List[Dict[str, str]]) -> str: |
|
|
last_exception = None |
|
|
for idx, model in enumerate(FALLBACK_MODELS, 1): |
|
|
print(f"嘗試模型 {idx}/{len(FALLBACK_MODELS)}: {model}") |
|
|
try: |
|
|
return await self._try_model(model, messages) |
|
|
except Exception as e: |
|
|
last_exception = e |
|
|
|
|
|
if "rate limit" in str(e).lower() or "429" in str(e): |
|
|
print("遇到 rate limit,tenacity 會自動等待後重試") |
|
|
continue |
|
|
|
|
|
error_msg = f"所有模型皆失敗,最後錯誤:{type(last_exception).__name__}" |
|
|
print(error_msg) |
|
|
return "孩子,抱歉,此刻我無法清楚回應你的話。請稍後再試,願父保守你平安。" |
|
|
|
|
|
async def generate_response(self, user_id: str, user_text: str) -> str: |
|
|
|
|
|
if user_text.strip().lower() == "/clear": |
|
|
self.clear_history(user_id) |
|
|
return "對話紀錄已清除,孩子,願你平安。我們重新開始吧。" |
|
|
|
|
|
history = self.get_history(user_id) |
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": JESUS_PROMPT}, |
|
|
*history, |
|
|
{"role": "user", "content": user_text} |
|
|
] |
|
|
|
|
|
response = await self._llm_call_with_fallback(messages) |
|
|
response = response.replace('*', '').strip() |
|
|
|
|
|
self.update_history(user_id, user_text, response) |
|
|
return response |
|
|
|
|
|
|
|
|
@asynccontextmanager |
|
|
async def lifespan(app: FastAPI): |
|
|
global pipeline |
|
|
pipeline = ChatPipeline() |
|
|
yield |
|
|
|
|
|
app = FastAPI(lifespan=lifespan) |
|
|
pipeline = None |
|
|
|
|
|
configuration = Configuration(access_token=CHANNEL_ACCESS_TOKEN) |
|
|
async_client = AsyncApiClient(configuration) |
|
|
line_bot_api = AsyncMessagingApi(async_client) |
|
|
parser = WebhookParser(CHANNEL_SECRET) |
|
|
|
|
|
@app.post("/webhook") |
|
|
async def webhook(request: Request): |
|
|
signature = request.headers.get("X-Line-Signature", "") |
|
|
body = await request.body() |
|
|
|
|
|
try: |
|
|
events = parser.parse(body.decode(), signature) |
|
|
except InvalidSignatureError: |
|
|
raise HTTPException(status_code=400, detail="Invalid signature") |
|
|
|
|
|
for event in events: |
|
|
if event.type != "message" or event.message.type != "text": |
|
|
continue |
|
|
|
|
|
user_id = event.source.user_id |
|
|
reply_token = event.reply_token |
|
|
text = event.message.text.strip() |
|
|
|
|
|
if not text: |
|
|
continue |
|
|
|
|
|
|
|
|
if text.lower() == "繼續" and user_id in pending_chunks: |
|
|
remaining = pending_chunks[user_id] |
|
|
if not remaining: |
|
|
reply_text = "沒有更多內容了,孩子。" |
|
|
else: |
|
|
to_send = remaining[:5] |
|
|
messages = [TextMessage(text=chunk) for chunk in to_send] |
|
|
if len(remaining) > 5: |
|
|
messages[-1].text += "\n\n(還有內容,請再回覆「繼續」)" |
|
|
pending_chunks[user_id] = remaining[5:] |
|
|
else: |
|
|
messages[-1].text += "\n\n(已全部顯示)" |
|
|
pending_chunks.pop(user_id, None) |
|
|
await line_bot_api.reply_message(ReplyMessageRequest(reply_token=reply_token, messages=messages)) |
|
|
continue |
|
|
|
|
|
|
|
|
response = await pipeline.generate_response(user_id, text) |
|
|
chunks = split_text_for_line(response) |
|
|
|
|
|
if len(chunks) <= 5: |
|
|
messages = [TextMessage(text=chunk) for chunk in chunks] |
|
|
else: |
|
|
messages = [TextMessage(text=chunk) for chunk in chunks[:5]] |
|
|
messages[-1].text += "\n\n(內容較長,請回覆「繼續」查看下一部分)" |
|
|
pending_chunks[user_id] = chunks[5:] |
|
|
|
|
|
await line_bot_api.reply_message(ReplyMessageRequest(reply_token=reply_token, messages=messages)) |
|
|
|
|
|
return {"status": "ok"} |
|
|
|
|
|
@app.get("/") |
|
|
async def root(): |
|
|
return {"status": "ok", "message": "Jesus Bot is running"} |
|
|
|
|
|
@app.get("/health") |
|
|
async def health(): |
|
|
return {"status": "ok"} |
|
|
|
|
|
if __name__ == "__main__": |
|
|
port = int(os.getenv("PORT", 7860)) |
|
|
uvicorn.run(app, host="0.0.0.0", port=port) |