Spaces:
Sleeping
Sleeping
Update config.py
Browse files
config.py
CHANGED
|
@@ -1,47 +1,10 @@
|
|
| 1 |
-
# config.py
|
| 2 |
-
import os
|
| 3 |
-
import tempfile
|
| 4 |
-
from datetime import datetime
|
| 5 |
|
| 6 |
-
# ---
|
| 7 |
-
# [修正] 使用 HF_HOME 來統一設定 Hugging Face 所有檔案的儲存路徑
|
| 8 |
-
# 這會告訴 huggingface_hub (用於 login) 和 transformers (用於下載模型)
|
| 9 |
-
# 將所有檔案都存放在一個可寫入的 /tmp 目錄下,解決 Permission denied 問題。
|
| 10 |
-
HF_HOME_DIR = "/tmp/huggingface"
|
| 11 |
-
os.environ["HF_HOME"] = HF_HOME_DIR
|
| 12 |
-
os.makedirs(HF_HOME_DIR, exist_ok=True)
|
| 13 |
-
|
| 14 |
-
# 設定 Matplotlib 的快取目錄
|
| 15 |
-
os.environ.setdefault("MPLCONFIGDIR", "/tmp/matplotlib")
|
| 16 |
-
|
| 17 |
-
# --- LINE Bot 憑證 ---
|
| 18 |
-
CHANNEL_ACCESS_TOKEN = os.getenv("CHANNEL_ACCESS_TOKEN")
|
| 19 |
-
CHANNEL_SECRET = os.getenv("CHANNEL_SECRET")
|
| 20 |
-
|
| 21 |
-
# --- Hugging Face Space URL ---
|
| 22 |
-
HF_SPACE_URL = os.getenv("SPACEURL")
|
| 23 |
-
if not HF_SPACE_URL:
|
| 24 |
-
sid = os.getenv("SPACE_ID")
|
| 25 |
-
if sid and "/" in sid:
|
| 26 |
-
author, name = sid.split("/", 1)
|
| 27 |
-
HF_SPACE_URL = f"https://{author.replace('_', '-')}-{name.replace('_', '-')}.hf.space"
|
| 28 |
-
else:
|
| 29 |
-
HF_SPACE_URL = ""
|
| 30 |
-
|
| 31 |
-
# --- 靜態檔案目錄 ---
|
| 32 |
-
STATIC_DIR = os.getenv("STATIC_DIR", os.path.join(tempfile.gettempdir(), "static"))
|
| 33 |
-
os.makedirs(STATIC_DIR, exist_ok=True)
|
| 34 |
-
|
| 35 |
-
# --- API 端點 ---
|
| 36 |
-
CWA_ALARM_API = "https://app-2.cwa.gov.tw/api/v1/earthquake/alarm/list"
|
| 37 |
-
USGS_API_BASE_URL = "https://earthquake.usgs.gov/fdsnws/event/1/query"
|
| 38 |
-
|
| 39 |
-
# --- AI 模型設定 (Gemma) ---
|
| 40 |
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
| 41 |
-
LLM_MODEL = os.getenv("LLM_MODEL", "google/gemma-2b-it")
|
| 42 |
-
LLM_MAX_NEW_TOKENS = int(os.getenv("LLM_MAX_NEW_TOKENS", "256"))
|
| 43 |
-
LLM_TOP_K = int(os.getenv("LLM_TOP_K", "50"))
|
| 44 |
-
LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.7"))
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# in config.py
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
# --- AI 模型設定 ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
# [修改] 將模型換回可以在 CPU 上運行的較小模型
|
| 7 |
+
LLM_MODEL = os.getenv("LLM_MODEL", "ckiplab/gpt2-base-chinese")
|
| 8 |
+
|
| 9 |
+
LLM_MAX_NEW_TOKENS = int(os.getenv("LLM_MAX_NEW_TOKENS", "120")) # 可以調回較小的值
|
| 10 |
+
# ...
|