howard9963 commited on
Commit
7ba10aa
·
verified ·
1 Parent(s): 8bf232f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -8
app.py CHANGED
@@ -2,25 +2,31 @@ import os
2
  from fastapi import FastAPI, Header, HTTPException, Body
3
  from transformers import AutoTokenizer
4
 
5
- # Hugging Face Space 的 Secret 環境變數取得 API_KEY
6
- EXPECTED_API_KEY = os.environ.get("apikey", "")
7
 
8
- # 初始化 CKIP 模型 (使用 Hugging Face 提供的預訓練模型)
9
  tokenizer = AutoTokenizer.from_pretrained("ckiplab/bert-base-chinese-ws")
10
 
11
  app = FastAPI(title="CKIP Word Segmentation API")
12
 
 
13
  @app.get("/")
14
- def root():
15
- return {"message": "CKIP Tokenizer API is running!"}
16
 
 
17
  @app.post("/tokenize")
18
- async def tokenize(input_text: TextInput, x_api_key: str = Header(default=None)):
19
- # 驗證 API Key
 
 
 
20
  if not EXPECTED_API_KEY:
21
  raise HTTPException(status_code=500, detail="Server missing API_KEY config")
 
22
  if x_api_key != EXPECTED_API_KEY:
23
- raise HTTPException(status_code=401, detail="Unauthorized: Invalid API Key")
24
 
25
  tokens = tokenizer.tokenize(text)
26
  return {"tokens": tokens}
 
2
  from fastapi import FastAPI, Header, HTTPException, Body
3
  from transformers import AutoTokenizer
4
 
5
+ # 從環境變數讀取 secret
6
+ EXPECTED_API_KEY = os.environ.get("apikey")
7
 
8
+ # 初始化 tokenizer
9
  tokenizer = AutoTokenizer.from_pretrained("ckiplab/bert-base-chinese-ws")
10
 
11
  app = FastAPI(title="CKIP Word Segmentation API")
12
 
13
+ # ✅ GET 方法:健康檢查
14
  @app.get("/")
15
+ def health_check():
16
+ return {"status": "ok", "message": "API is running"}
17
 
18
+ # ✅ POST 方法:斷詞
19
  @app.post("/tokenize")
20
+ async def tokenize(
21
+ text: str = Body(..., embed=True),
22
+ x_api_key: str = Header(None)
23
+ ):
24
+ # 驗證 API Key 設定是否存在
25
  if not EXPECTED_API_KEY:
26
  raise HTTPException(status_code=500, detail="Server missing API_KEY config")
27
+ # 驗證 API Key 值是否正確
28
  if x_api_key != EXPECTED_API_KEY:
29
+ raise HTTPException(status_code=401, detail="Invalid API Key")
30
 
31
  tokens = tokenizer.tokenize(text)
32
  return {"tokens": tokens}