Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,18 +1,54 @@
|
|
| 1 |
from fastapi import FastAPI
|
|
|
|
| 2 |
from transformers import pipeline
|
|
|
|
| 3 |
|
| 4 |
-
app = FastAPI()
|
| 5 |
|
| 6 |
-
#
|
| 7 |
-
|
| 8 |
-
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
return {"message": "Text Moderation API is running."}
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
from transformers import pipeline
|
| 4 |
+
import torch
|
| 5 |
|
| 6 |
+
app = FastAPI(title="双模型综合文本审计 API")
|
| 7 |
|
| 8 |
+
# 1. 初始化两个模型
|
| 9 |
+
print("正在加载模型 A (多语言通用型)...")
|
| 10 |
+
model_a = pipeline("text-classification", model="textdetox/bert-multilingual-toxicity-classifier")
|
| 11 |
|
| 12 |
+
print("正在加载模型 B (中文强化型)...")
|
| 13 |
+
model_b = pipeline("text-classification", model="thu-coai/roberta-base-cold")
|
|
|
|
| 14 |
|
| 15 |
+
class TextInput(BaseModel):
|
| 16 |
+
text: str
|
| 17 |
+
|
| 18 |
+
@app.post("/check")
|
| 19 |
+
async def combined_check(input_data: TextInput):
|
| 20 |
+
text = input_data.text
|
| 21 |
+
if not text.strip():
|
| 22 |
+
return {"error": "Empty text"}
|
| 23 |
+
|
| 24 |
+
# 运行模型 A
|
| 25 |
+
res_a = model_a(text)[0]
|
| 26 |
+
# LABEL_1 在该模型通常代表 Toxic
|
| 27 |
+
score_a = res_a['score'] if res_a['label'] == 'LABEL_1' else 1 - res_a['score']
|
| 28 |
+
|
| 29 |
+
# 运行模型 B
|
| 30 |
+
res_b = model_b(text)[0]
|
| 31 |
+
# LABEL_1 在该模型代表 Offensive
|
| 32 |
+
score_b = res_b['score'] if res_b['label'] == 'LABEL_1' else 1 - res_b['score']
|
| 33 |
+
|
| 34 |
+
# 3. 综合评估逻辑
|
| 35 |
+
# 采用“最高分原则”叠加“平均分补偿”
|
| 36 |
+
# 公式:Final Score = (Max_Score * 0.7) + (Avg_Score * 0.3)
|
| 37 |
+
final_risk_score = (max(score_a, score_b) * 0.7) + (((score_a + score_b) / 2) * 0.3)
|
| 38 |
+
|
| 39 |
+
# 4. 判定结论
|
| 40 |
+
status = "通过"
|
| 41 |
+
if final_risk_score > 0.75:
|
| 42 |
+
status = "拦截"
|
| 43 |
+
elif final_risk_score > 0.45:
|
| 44 |
+
status = "建议人工复审"
|
| 45 |
+
|
| 46 |
+
return {
|
| 47 |
+
"text": text,
|
| 48 |
+
"combined_risk_score": round(final_risk_score, 4),
|
| 49 |
+
"status": status,
|
| 50 |
+
"detail": {
|
| 51 |
+
"general_toxicity_model": round(score_a, 4),
|
| 52 |
+
"chinese_offensive_model": round(score_b, 4)
|
| 53 |
+
}
|
| 54 |
+
}
|