Song
commited on
Commit
·
cedd05f
1
Parent(s):
6b34f8b
hi
Browse files- Dockerfile +33 -0
- app.py +161 -0
- requirements.txt +4 -0
Dockerfile
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# ---- System deps ----
|
| 4 |
+
RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \
|
| 5 |
+
build-essential \
|
| 6 |
+
git \
|
| 7 |
+
curl \
|
| 8 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 9 |
+
|
| 10 |
+
# ---- Workdir ----
|
| 11 |
+
WORKDIR /app
|
| 12 |
+
|
| 13 |
+
# ---- Copy requirement & install ----
|
| 14 |
+
COPY requirements.txt /app/requirements.txt
|
| 15 |
+
RUN pip install --no-cache-dir -U pip \
|
| 16 |
+
&& pip install --no-cache-dir -r /app/requirements.txt
|
| 17 |
+
|
| 18 |
+
# ---- Runtime cache to /tmp (writeable) ----
|
| 19 |
+
ENV XDG_CACHE_HOME=/tmp/.cache
|
| 20 |
+
|
| 21 |
+
# ---- Copy app ----
|
| 22 |
+
COPY . /app
|
| 23 |
+
|
| 24 |
+
# ---- Healthcheck ----
|
| 25 |
+
HEALTHCHECK --interval=30s --timeout=10s --retries=3 \
|
| 26 |
+
CMD curl -f http://localhost:${PORT:-7860}/health || exit 1
|
| 27 |
+
|
| 28 |
+
# ---- Port & CMD ----
|
| 29 |
+
EXPOSE 7860
|
| 30 |
+
ENV PORT=7860 \
|
| 31 |
+
PYTHONUNBUFFERED=1
|
| 32 |
+
|
| 33 |
+
CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port ${PORT:-7860} --log-level info"]
|
app.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
# ---------- 環境與快取設定 (應置於最前) ----------
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
import time
|
| 7 |
+
import logging
|
| 8 |
+
from typing import List, Dict, Any
|
| 9 |
+
from contextlib import asynccontextmanager
|
| 10 |
+
from fastapi import FastAPI, Request, HTTPException, status
|
| 11 |
+
import uvicorn
|
| 12 |
+
from linebot import LineBotApi
|
| 13 |
+
from linebot.models import TextSendMessage
|
| 14 |
+
from openai import OpenAI
|
| 15 |
+
import hmac
|
| 16 |
+
import base64
|
| 17 |
+
import hashlib
|
| 18 |
+
|
| 19 |
+
# ==== CONFIG (從環境變數載入,或使用預設值) ====
|
| 20 |
+
def _require_env(var: str) -> str:
|
| 21 |
+
v = os.getenv(var)
|
| 22 |
+
if not v:
|
| 23 |
+
raise RuntimeError(f"FATAL: Missing required environment variable: {var}")
|
| 24 |
+
return v
|
| 25 |
+
|
| 26 |
+
CHANNEL_SECRET = _require_env("CHANNEL_SECRET")
|
| 27 |
+
CHANNEL_ACCESS_TOKEN = _require_env("CHANNEL_ACCESS_TOKEN")
|
| 28 |
+
|
| 29 |
+
LLM_API_CONFIG = {
|
| 30 |
+
"base_url": "https://litellm-ekkks8gsocw.dgx-coolify.apmic.ai/",
|
| 31 |
+
"api_key": "sk-eT_04m428oAPUD5kUmIhVA",
|
| 32 |
+
}
|
| 33 |
+
LLM_MODEL_CONFIG = {
|
| 34 |
+
"model": "gpt-oss-120b",
|
| 35 |
+
"max_tokens": int(os.getenv("MAX_TOKENS", 1024)),
|
| 36 |
+
"temperature": float(os.getenv("TEMPERATURE", 0.3)),
|
| 37 |
+
"seed": int(os.getenv("LLM_SEED", 42)),
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
SYSTEM_PROMPT = "你是一個友好的AI助手,請用簡單、親切的中文回覆用戶的問題。"
|
| 41 |
+
|
| 42 |
+
# ---------- 日誌設定 ----------
|
| 43 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 44 |
+
log = logging.getLogger(__name__)
|
| 45 |
+
|
| 46 |
+
# ---------- 記憶體儲存對話歷史 ----------
|
| 47 |
+
conversations: Dict[str, List[Dict[str, str]]] = {} # {user_id: messages_list}
|
| 48 |
+
|
| 49 |
+
# ---------- Helper to split long message for LINE API ----------
|
| 50 |
+
def split_text_for_line(text: str, max_length: int = 4800) -> List[str]:
|
| 51 |
+
if len(text) <= max_length:
|
| 52 |
+
return [text]
|
| 53 |
+
chunks = []
|
| 54 |
+
while text:
|
| 55 |
+
if len(text) <= max_length:
|
| 56 |
+
chunks.append(text)
|
| 57 |
+
break
|
| 58 |
+
split_pos = text.rfind('\n', 0, max_length)
|
| 59 |
+
if split_pos == -1:
|
| 60 |
+
split_pos = max_length
|
| 61 |
+
chunks.append(text[:split_pos])
|
| 62 |
+
text = text[split_pos:].lstrip()
|
| 63 |
+
return chunks
|
| 64 |
+
|
| 65 |
+
# ---------- Chat Pipeline ----------
|
| 66 |
+
class ChatPipeline:
|
| 67 |
+
def __init__(self):
|
| 68 |
+
if not LLM_API_CONFIG["api_key"] or not LLM_API_CONFIG["base_url"]:
|
| 69 |
+
raise ValueError("LLM API Key or Base URL is not configured.")
|
| 70 |
+
self.llm_client = OpenAI(api_key=LLM_API_CONFIG["api_key"], base_url=LLM_API_CONFIG["base_url"])
|
| 71 |
+
|
| 72 |
+
def _llm_call(self, messages: List[Dict[str, str]]) -> str:
|
| 73 |
+
log.info("LLM 呼叫開始.")
|
| 74 |
+
start_time = time.time()
|
| 75 |
+
response = self.llm_client.chat.completions.create(
|
| 76 |
+
model=LLM_MODEL_CONFIG["model"],
|
| 77 |
+
messages=messages,
|
| 78 |
+
max_tokens=LLM_MODEL_CONFIG["max_tokens"],
|
| 79 |
+
temperature=LLM_MODEL_CONFIG["temperature"],
|
| 80 |
+
seed=LLM_MODEL_CONFIG["seed"],
|
| 81 |
+
)
|
| 82 |
+
content = response.choices[0].message.content or ""
|
| 83 |
+
log.info(f"LLM 呼叫完成,耗時: {time.time() - start_time:.2f} 秒。內容長度: {len(content)}.")
|
| 84 |
+
return content
|
| 85 |
+
|
| 86 |
+
def get_conversation_history(self, user_id: str) -> List[Dict[str, str]]:
|
| 87 |
+
return conversations.get(user_id, [])
|
| 88 |
+
|
| 89 |
+
def update_conversation_history(self, user_id: str, messages: List[Dict[str, str]]):
|
| 90 |
+
# 保留最近10條訊息
|
| 91 |
+
if len(messages) > 10:
|
| 92 |
+
messages = messages[-10:]
|
| 93 |
+
conversations[user_id] = messages
|
| 94 |
+
|
| 95 |
+
def clear_conversation_history(self, user_id: str):
|
| 96 |
+
if user_id in conversations:
|
| 97 |
+
del conversations[user_id]
|
| 98 |
+
|
| 99 |
+
def answer_question(self, user_id: str, user_text: str) -> str:
|
| 100 |
+
if user_text.strip() == "/clear":
|
| 101 |
+
self.clear_conversation_history(user_id)
|
| 102 |
+
return "對話紀錄已清除!現在開始新的對話。"
|
| 103 |
+
|
| 104 |
+
history = self.get_conversation_history(user_id)
|
| 105 |
+
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 106 |
+
messages.extend(history)
|
| 107 |
+
messages.append({"role": "user", "content": user_text})
|
| 108 |
+
response = self._llm_call(messages)
|
| 109 |
+
# 更新歷史
|
| 110 |
+
history.append({"role": "user", "content": user_text})
|
| 111 |
+
history.append({"role": "assistant", "content": response})
|
| 112 |
+
self.update_conversation_history(user_id, history)
|
| 113 |
+
return response
|
| 114 |
+
|
| 115 |
+
# ---------- FastAPI 應用程式設置 ----------
|
| 116 |
+
@asynccontextmanager
|
| 117 |
+
async def lifespan(app: FastAPI):
|
| 118 |
+
log.info("應用程式啟動...")
|
| 119 |
+
global chat_pipeline
|
| 120 |
+
chat_pipeline = ChatPipeline()
|
| 121 |
+
yield
|
| 122 |
+
log.info("應用程式關閉...")
|
| 123 |
+
|
| 124 |
+
app = FastAPI(lifespan=lifespan)
|
| 125 |
+
chat_pipeline = None
|
| 126 |
+
|
| 127 |
+
# LINE Bot API
|
| 128 |
+
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
|
| 129 |
+
|
| 130 |
+
# ---------- LINE Webhook 處理 ----------
|
| 131 |
+
@app.post("/webhook")
|
| 132 |
+
async def line_webhook(request: Request):
|
| 133 |
+
signature = request.headers.get('X-Line-Signature', '')
|
| 134 |
+
body = await request.body()
|
| 135 |
+
hash_ = hmac.new(CHANNEL_SECRET.encode('utf-8'), body, hashlib.sha256).digest()
|
| 136 |
+
expected_signature = base64.b64encode(hash_).decode()
|
| 137 |
+
if not hmac.compare_digest(signature, expected_signature):
|
| 138 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid signature")
|
| 139 |
+
|
| 140 |
+
body_str = body.decode('utf-8')
|
| 141 |
+
payload = json.loads(body_str)
|
| 142 |
+
for event in payload.get('events', []):
|
| 143 |
+
user_id = event['source']['userId']
|
| 144 |
+
reply_token = event.get('replyToken')
|
| 145 |
+
if event['type'] == 'message' and event['message']['type'] == 'text':
|
| 146 |
+
user_text = event['message']['text'].strip()
|
| 147 |
+
if not user_text:
|
| 148 |
+
continue
|
| 149 |
+
try:
|
| 150 |
+
ai_response = chat_pipeline.answer_question(user_id, user_text)
|
| 151 |
+
chunks = split_text_for_line(ai_response)
|
| 152 |
+
messages_to_send = [TextSendMessage(text=chunk) for chunk in chunks]
|
| 153 |
+
line_bot_api.reply_message(reply_token, messages_to_send[:5]) # LINE限5條
|
| 154 |
+
except Exception as e:
|
| 155 |
+
log.error(f"Error processing request: {e}", exc_info=True)
|
| 156 |
+
error_message = "抱歉,系統發生錯誤,請稍後再試。"
|
| 157 |
+
line_bot_api.reply_message(reply_token, [TextSendMessage(text=error_message)])
|
| 158 |
+
return {"status": "ok"}
|
| 159 |
+
|
| 160 |
+
if __name__ == "__main__":
|
| 161 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
line-bot-sdk
|
| 4 |
+
openai
|