Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 3 |
import torch
|
| 4 |
import math
|
| 5 |
import re
|
|
|
|
| 6 |
|
| 7 |
# -------------------------------
|
| 8 |
# Sentence splitter (no nltk)
|
|
@@ -12,22 +13,19 @@ def simple_sent_tokenize(text):
|
|
| 12 |
return [s for s in sentences if s.strip()]
|
| 13 |
|
| 14 |
# -------------------------------
|
| 15 |
-
# Load
|
| 16 |
# -------------------------------
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
]
|
| 21 |
-
|
| 22 |
-
detector_tokenizers = [AutoTokenizer.from_pretrained(name) for name in detector_names]
|
| 23 |
-
detector_models = [AutoModelForSequenceClassification.from_pretrained(name) for name in detector_names]
|
| 24 |
-
|
| 25 |
-
gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
| 26 |
-
gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")
|
| 27 |
|
| 28 |
# -------------------------------
|
| 29 |
-
# Helper
|
| 30 |
# -------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def compute_perplexity(text):
|
| 32 |
enc = gpt2_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
|
| 33 |
input_ids = enc.input_ids
|
|
@@ -35,42 +33,54 @@ def compute_perplexity(text):
|
|
| 35 |
loss = gpt2_model(input_ids, labels=input_ids).loss
|
| 36 |
return math.exp(loss.item())
|
| 37 |
|
|
|
|
|
|
|
|
|
|
| 38 |
def sentence_score(sentence):
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
| 45 |
ppl = compute_perplexity(sentence)
|
| 46 |
-
ppl_score = max(0, min(1, 100/ppl))
|
| 47 |
-
return sum(probs)/len(probs)*0.7 + ppl_score*0.3
|
| 48 |
|
| 49 |
-
#
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
return "Most likely human-written.", "green"
|
| 53 |
-
elif ai_prob <
|
| 54 |
return "Possibly human-written with minimal AI assistance.", "yellowgreen"
|
| 55 |
-
elif ai_prob <
|
| 56 |
return "Possibly AI-generated or human using AI assistance.", "orange"
|
| 57 |
else:
|
| 58 |
return "Likely AI-generated or heavily AI-assisted.", "red"
|
| 59 |
|
|
|
|
|
|
|
|
|
|
| 60 |
def analyze_text(user_text):
|
| 61 |
sentences = simple_sent_tokenize(user_text)
|
| 62 |
if not sentences:
|
| 63 |
return {"error": "Please enter some text."}
|
| 64 |
|
| 65 |
sentence_probs = [sentence_score(s) for s in sentences]
|
| 66 |
-
final_ai =
|
| 67 |
final_human = 1 - final_ai
|
| 68 |
-
verdict_text, verdict_color =
|
| 69 |
|
| 70 |
-
#
|
| 71 |
sentence_details = []
|
| 72 |
for s, p in zip(sentences, sentence_probs):
|
| 73 |
-
s_verdict, s_color =
|
| 74 |
sentence_details.append({
|
| 75 |
"sentence": s,
|
| 76 |
"AI Probability": round(p*100,2),
|
|
@@ -90,7 +100,7 @@ def analyze_text(user_text):
|
|
| 90 |
# Gradio UI
|
| 91 |
# -------------------------------
|
| 92 |
with gr.Blocks() as demo:
|
| 93 |
-
gr.Markdown("# 🌐
|
| 94 |
user_input = gr.Textbox(label="Enter Text", placeholder="Paste text here...", lines=12, type="text")
|
| 95 |
run_btn = gr.Button("Run Detection")
|
| 96 |
output = gr.JSON(label="Results")
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 3 |
import torch
|
| 4 |
import math
|
| 5 |
import re
|
| 6 |
+
import numpy as np
|
| 7 |
|
| 8 |
# -------------------------------
|
| 9 |
# Sentence splitter (no nltk)
|
|
|
|
| 13 |
return [s for s in sentences if s.strip()]
|
| 14 |
|
| 15 |
# -------------------------------
|
| 16 |
+
# Load Pre-trained Model (Template for Fine-tuning)
|
| 17 |
# -------------------------------
|
| 18 |
+
model_name = "roberta-large" # Replace with your fine-tuned AI detector
|
| 19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 20 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
# -------------------------------
|
| 23 |
+
# GPT-2 Perplexity Helper (Optional)
|
| 24 |
# -------------------------------
|
| 25 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 26 |
+
gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")
|
| 27 |
+
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
| 28 |
+
|
| 29 |
def compute_perplexity(text):
|
| 30 |
enc = gpt2_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
|
| 31 |
input_ids = enc.input_ids
|
|
|
|
| 33 |
loss = gpt2_model(input_ids, labels=input_ids).loss
|
| 34 |
return math.exp(loss.item())
|
| 35 |
|
| 36 |
+
# -------------------------------
|
| 37 |
+
# Sentence-level scoring (Stacked Classifier Template)
|
| 38 |
+
# -------------------------------
|
| 39 |
def sentence_score(sentence):
|
| 40 |
+
# 1️⃣ Transformer probability
|
| 41 |
+
inputs = tokenizer(sentence, return_tensors="pt", truncation=True, max_length=512)
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
logits = model(**inputs).logits
|
| 44 |
+
transformer_prob = torch.softmax(logits, dim=1).tolist()[0][1] # AI probability
|
| 45 |
+
|
| 46 |
+
# 2️⃣ GPT-2 perplexity feature
|
| 47 |
ppl = compute_perplexity(sentence)
|
| 48 |
+
ppl_score = max(0, min(1, 100/ppl)) # normalize
|
|
|
|
| 49 |
|
| 50 |
+
# 3️⃣ Meta-score (weighted stacking)
|
| 51 |
+
final_score = transformer_prob*0.8 + ppl_score*0.2 # Adjust weights after fine-tuning
|
| 52 |
+
return final_score
|
| 53 |
+
|
| 54 |
+
# -------------------------------
|
| 55 |
+
# Tuned Verdicts
|
| 56 |
+
# -------------------------------
|
| 57 |
+
def verdict_95(ai_prob):
|
| 58 |
+
if ai_prob < 0.3:
|
| 59 |
return "Most likely human-written.", "green"
|
| 60 |
+
elif ai_prob < 0.5:
|
| 61 |
return "Possibly human-written with minimal AI assistance.", "yellowgreen"
|
| 62 |
+
elif ai_prob < 0.7:
|
| 63 |
return "Possibly AI-generated or human using AI assistance.", "orange"
|
| 64 |
else:
|
| 65 |
return "Likely AI-generated or heavily AI-assisted.", "red"
|
| 66 |
|
| 67 |
+
# -------------------------------
|
| 68 |
+
# Main Analysis Function
|
| 69 |
+
# -------------------------------
|
| 70 |
def analyze_text(user_text):
|
| 71 |
sentences = simple_sent_tokenize(user_text)
|
| 72 |
if not sentences:
|
| 73 |
return {"error": "Please enter some text."}
|
| 74 |
|
| 75 |
sentence_probs = [sentence_score(s) for s in sentences]
|
| 76 |
+
final_ai = np.mean(sentence_probs)
|
| 77 |
final_human = 1 - final_ai
|
| 78 |
+
verdict_text, verdict_color = verdict_95(final_ai)
|
| 79 |
|
| 80 |
+
# Sentence-level colored verdicts
|
| 81 |
sentence_details = []
|
| 82 |
for s, p in zip(sentences, sentence_probs):
|
| 83 |
+
s_verdict, s_color = verdict_95(p)
|
| 84 |
sentence_details.append({
|
| 85 |
"sentence": s,
|
| 86 |
"AI Probability": round(p*100,2),
|
|
|
|
| 100 |
# Gradio UI
|
| 101 |
# -------------------------------
|
| 102 |
with gr.Blocks() as demo:
|
| 103 |
+
gr.Markdown("# 🌐 High-Accuracy Universal AI vs Human Text Detector")
|
| 104 |
user_input = gr.Textbox(label="Enter Text", placeholder="Paste text here...", lines=12, type="text")
|
| 105 |
run_btn = gr.Button("Run Detection")
|
| 106 |
output = gr.JSON(label="Results")
|