import gradio as gr import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer model_path = "你的模型名稱或資料夾路徑" # 根據你模型的位置來填 tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path) model.eval() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) def detect_ai(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device) with torch.no_grad(): outputs = model(**inputs) probs = torch.softmax(outputs.logits, dim=1).squeeze() human_prob = probs[0].item() ai_prob = probs[1].item() label = "AI 生成" if ai_prob > human_prob else "人類撰寫" return f"預測:{label}\nAI 機率:{ai_prob:.2%}\n人類機率:{human_prob:.2%}" demo = gr.Interface( fn=detect_ai, inputs=gr.Textbox(label="請貼上要分析的文字", lines=10, placeholder="貼上或輸入內容..."), outputs="text", title="AI vs 人類文字偵測器", description="這個工具可以幫助你判斷輸入的文字是 AI 生成還是人類撰寫。", ) demo.launch()