StarrySkyWorld commited on
Commit
1e87b56
·
verified ·
1 Parent(s): 4af09ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -41
app.py CHANGED
@@ -1,54 +1,63 @@
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import pipeline
4
- import torch
5
 
6
- app = FastAPI(title="双模型综合文本审计 API")
7
 
8
- # 1. 初始化两个模型
9
- print("正在加载模型 A (多语言通用型)...")
10
- model_a = pipeline("text-classification", model="textdetox/bert-multilingual-toxicity-classifier")
 
 
11
 
12
- print("正在加载模型 B (中文强化型)...")
13
- model_b = pipeline("text-classification", model="thu-coai/roberta-base-cold")
14
-
15
- class TextInput(BaseModel):
16
  text: str
17
 
18
- @app.post("/check")
19
- async def combined_check(input_data: TextInput):
20
- text = input_data.text
21
  if not text.strip():
22
- return {"error": "Empty text"}
23
-
24
- # 运行模型 A
25
- res_a = model_a(text)[0]
26
- # LABEL_1 在该模型通常代表 Toxic
27
- score_a = res_a['score'] if res_a['label'] == 'LABEL_1' else 1 - res_a['score']
28
-
29
- # 运行模型 B
30
- res_b = model_b(text)[0]
31
- # LABEL_1 在该模型代表 Offensive
32
- score_b = res_b['score'] if res_b['label'] == 'LABEL_1' else 1 - res_b['score']
33
-
34
- # 3. 综合评估逻辑
35
- # 采用“最高分原则”叠加“平均分补偿”
36
- # 公式:Final Score = (Max_Score * 0.7) + (Avg_Score * 0.3)
37
- final_risk_score = (max(score_a, score_b) * 0.7) + (((score_a + score_b) / 2) * 0.3)
38
-
39
- # 4. 判定结论
40
- status = "通过"
41
- if final_risk_score > 0.75:
42
- status = "拦截"
43
- elif final_risk_score > 0.45:
44
- status = "建议人工复审"
 
 
 
 
 
45
 
46
  return {
47
- "text": text,
48
- "combined_risk_score": round(final_risk_score, 4),
49
- "status": status,
50
- "detail": {
51
- "general_toxicity_model": round(score_a, 4),
52
- "chinese_offensive_model": round(score_b, 4)
 
53
  }
54
  }
 
 
 
 
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import pipeline
4
+ import math
5
 
6
+ app = FastAPI(title="MODERATION_API_V2")
7
 
8
+ # 初始化模型
9
+ # Model A: 多语言通用毒性检测
10
+ pipe_a = pipeline("text-classification", model="textdetox/bert-multilingual-toxicity-classifier")
11
+ # Model B: 中文专门化攻击性检测
12
+ pipe_b = pipeline("text-classification", model="thu-coai/roberta-base-cold")
13
 
14
+ class CheckRequest(BaseModel):
 
 
 
15
  text: str
16
 
17
+ @app.post("/analyze")
18
+ async def analyze(request: CheckRequest):
19
+ text = request.text
20
  if not text.strip():
21
+ return {"STATUS": "ERROR", "REASON": "EMPTY_TEXT"}
22
+
23
+ # 推理并提取风险概率 (0.0 - 1.0)
24
+ # 针对 textdetox: LABEL_1 为有害
25
+ res_a = pipe_a(text)[0]
26
+ risk_a = res_a['score'] if res_a['label'] == 'LABEL_1' else 1 - res_a['score']
27
+
28
+ # 针对 thu-coai: LABEL_1 为攻击性
29
+ res_b = pipe_b(text)[0]
30
+ risk_b = res_b['score'] if res_b['label'] == 'LABEL_1' else 1 - res_b['score']
31
+
32
+ # 综合风险评分逻辑 (加权计算)
33
+ # 权重分配:Max_Score(70%) + Avg_Score(30%)
34
+ combined_risk = (max(risk_a, risk_b) * 0.7) + (((risk_a + risk_b) / 2) * 0.3)
35
+
36
+ # 计算数字风险等级 (1-5 )
37
+ # Level 1: [0.0-0.2] SAFE
38
+ # Level 5: [0.8-1.0] BLOCKED
39
+ risk_level = math.ceil(combined_risk * 5)
40
+ risk_level = max(1, min(5, risk_level))
41
+
42
+ # 状态映射
43
+ if risk_level >= 4:
44
+ status = "BLOCKED"
45
+ elif risk_level == 3:
46
+ status = "REVIEW"
47
+ else:
48
+ status = "PASSED"
49
 
50
  return {
51
+ "TEXT": text,
52
+ "STATUS": status,
53
+ "RISK_LEVEL": risk_level,
54
+ "CONFIDENCE_SCORE": round(combined_risk, 4),
55
+ "RAW_DATA": {
56
+ "GENERAL_MODEL": round(risk_a, 4),
57
+ "SPECIALIZED_MODEL": round(risk_b, 4)
58
  }
59
  }
60
+
61
+ @app.get("/health")
62
+ async def health():
63
+ return {"STATUS": "UP"}