Spaces:
Sleeping
Sleeping
File size: 2,353 Bytes
22db0eb 1e3779f 0e24b29 22db0eb 1e3779f 22db0eb 1e3779f 22db0eb 1e3779f 22db0eb 3662b93 2cb5c95 e25e3be 0e24b29 e25e3be 2cb5c95 ac74601 3662b93 fde1d98 e25e3be 22db0eb 3662b93 55c0410 3662b93 55c0410 6d27a75 55c0410 3662b93 55c0410 3662b93 6d27a75 3143491 55c0410 0e24b29 3662b93 ff91fbe 3662b93 55c0410 636f520 55c0410 8055a6c 7fd3b23 22db0eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# 加载模型和 tokenizer
model_name = "LilithHu/new-manipulation-model"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# 设置为评估模式
model.eval()
# 设置运行设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# 标签名
labels = ["Non-manipulative / 非操纵性", "Manipulative / 操纵性"]
# 推理函数
def classify(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=1)[0]
threshold = 0.7 # 自定义阈值
if probs[1].item() > threshold:
pred = 1 # 判为操纵性
else:
pred = 0 # 判为非操纵性
confidence = min(probs[pred].item(), 0.95) # 置信度依然可以控制上限
percent = round(confidence * 100, 2)
result = f"Prediction / 预测:\n{labels[pred]}\n"
return result
#谁大选谁
#pred = torch.argmax(probs).item()
#confidence = min(probs[pred].item(), 0.95) # 限制置信度最大为95%
# Gradio 界面
interface = gr.Interface(
fn=classify,
inputs=gr.Textbox(
lines=4,
placeholder="Enter text in English or Chinese... ",
label="📝 Input Text"
),
outputs=gr.Markdown(label="📊 Prediction"),
title="🔍 Manipulative Language Detector",
description="""
🧪 Enter a sentence in English or Chinese to detect if it's manipulative.
📌 **Disclaimer**
This system is for **research and educational purposes only**.
It **does not guarantee accuracy** and **should not be used as legal or clinical evidence**.
🤖 **Model Info**
- Model: `LilithHu/new-manipulation-model`
- Base: `mDeBERTa-v3` multilingual pre-trained model
- Fine-tuned using HuggingFace Transformers on 10,000 labeled Chinese data
⚠️ **About Examples**
The examples provided below are those **cited in the paper**, including implicit moral coercion, polite masking and false positives.
🌐 **Built with Gradio and hosted on HuggingFace Spaces**
"""
)
interface.launch()
|