Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,129 +1,120 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn.functional as F
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 4 |
import re
|
| 5 |
-
import
|
| 6 |
import gradio as gr
|
|
|
|
| 7 |
|
| 8 |
# -----------------------------
|
| 9 |
-
#
|
| 10 |
# -----------------------------
|
| 11 |
-
MODEL_NAME = "fakespot-ai/roberta-base-ai-text-detection-v1"
|
| 12 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 13 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
-
dtype = torch.bfloat16 if (device.type=="cuda" and torch.cuda.is_bf16_supported()) else torch.float32
|
| 15 |
-
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, dtype=dtype).to(device).eval()
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
# -----------------------------
|
| 20 |
-
THRESHOLD = 0.70 # used only for per-sentence "AI/Human" tags & color
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
return ""
|
| 36 |
-
t = re.sub(r"\s*\n+\s*", " ", t) # normalize newlines
|
| 37 |
-
t = t.replace("...", "⟨ELLIPSIS⟩") # ellipses
|
| 38 |
-
t = re.sub(r"(?<=\d)\.(?=\d)", "⟨DECIMAL⟩", t) # decimals like 3.14
|
| 39 |
-
t = ABBR_REGEX.sub(r"\1⟨ABBRDOT⟩", t) # abbreviations' dot
|
| 40 |
-
return t
|
| 41 |
-
|
| 42 |
-
def _restore(text: str) -> str:
|
| 43 |
-
return (text
|
| 44 |
-
.replace("⟨ABBRDOT⟩", ".")
|
| 45 |
-
.replace("⟨DECIMAL⟩", ".")
|
| 46 |
-
.replace("⟨ELLIPSIS⟩", "..."))
|
| 47 |
-
|
| 48 |
-
def sentence_split(text: str):
|
| 49 |
-
t = _protect(text)
|
| 50 |
-
if not t:
|
| 51 |
-
return []
|
| 52 |
-
# split on [.?!] followed by whitespace and likely sentence start or end
|
| 53 |
-
parts = re.split(r"([.?!])\s+(?=(?:[\"“”‘’']?\s*[A-Z(])|$)", t)
|
| 54 |
-
sentences, buf = [], ""
|
| 55 |
-
for i, chunk in enumerate(parts):
|
| 56 |
-
if i % 2 == 0:
|
| 57 |
-
buf += chunk
|
| 58 |
-
else:
|
| 59 |
-
buf += chunk
|
| 60 |
-
sentences.append(buf.strip()); buf = ""
|
| 61 |
-
if buf.strip():
|
| 62 |
-
sentences.append(buf.strip())
|
| 63 |
-
return [_restore(s).strip() for s in sentences if s.strip()]
|
| 64 |
|
| 65 |
# -----------------------------
|
| 66 |
-
#
|
| 67 |
# -----------------------------
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
# -----------------------------
|
| 113 |
-
#
|
| 114 |
# -----------------------------
|
| 115 |
-
with gr.Blocks() as demo:
|
| 116 |
-
gr.Markdown("### 🕵️ AI Written Text Detector — Fakespot Model")
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
table = gr.Dataframe(headers=["#", "Sentence", "AI_Prob", "Label"], wrap=True)
|
| 125 |
|
| 126 |
-
|
| 127 |
|
| 128 |
if __name__ == "__main__":
|
| 129 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import re
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
import gradio as gr
|
| 4 |
+
from transformers import pipeline
|
| 5 |
|
| 6 |
# -----------------------------
|
| 7 |
+
# Model & simple pre-processing
|
| 8 |
# -----------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
+
MODEL_ID = "fakespot-ai/roberta-base-ai-text-detection-v1"
|
| 11 |
+
clf = pipeline("text-classification", model=MODEL_ID)
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
def clean_text(s: str) -> str:
|
| 14 |
+
s = s.strip()
|
| 15 |
+
s = re.sub(r"\s+", " ", s)
|
| 16 |
+
return s
|
| 17 |
+
|
| 18 |
+
def chunk_text(text: str, max_words: int = 300) -> List[str]:
|
| 19 |
+
words = text.split()
|
| 20 |
+
if len(words) <= max_words:
|
| 21 |
+
return [" ".join(words)]
|
| 22 |
+
chunks = []
|
| 23 |
+
for i in range(0, len(words), max_words):
|
| 24 |
+
chunks.append(" ".join(words[i : i + max_words]))
|
| 25 |
+
return chunks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
# -----------------------------
|
| 28 |
+
# Core inference
|
| 29 |
# -----------------------------
|
| 30 |
+
|
| 31 |
+
def detect_ai(text: str) -> Tuple[str, float, str]:
|
| 32 |
+
"""
|
| 33 |
+
Returns (label, score_float, explanation)
|
| 34 |
+
"""
|
| 35 |
+
if not text or not text.strip():
|
| 36 |
+
return "—", 0.0, "Please paste some text to analyze."
|
| 37 |
+
|
| 38 |
+
chunks = [clean_text(c) for c in chunk_text(text, max_words=300)]
|
| 39 |
+
# Run model in batch for speed
|
| 40 |
+
preds = clf(chunks)
|
| 41 |
+
|
| 42 |
+
# Aggregate: average confidence across chunks for the detected class
|
| 43 |
+
# The model returns a label per chunk; we map AI=1, Human=0 and average
|
| 44 |
+
ai_probs = []
|
| 45 |
+
for p in preds:
|
| 46 |
+
# Some models return 'AI'/'Human' labels; normalize
|
| 47 |
+
label = p.get("label", "").upper()
|
| 48 |
+
score = float(p.get("score", 0.0))
|
| 49 |
+
ai_prob = score if label.startswith("AI") else (1.0 - score)
|
| 50 |
+
ai_probs.append(ai_prob)
|
| 51 |
+
|
| 52 |
+
mean_ai = sum(ai_probs) / len(ai_probs)
|
| 53 |
+
label = "AI" if mean_ai >= 0.5 else "Human"
|
| 54 |
+
|
| 55 |
+
# Lightweight heuristic explanation (no extra LLM needed)
|
| 56 |
+
explanation = build_explanation(text, mean_ai, len(chunks))
|
| 57 |
+
|
| 58 |
+
return label, float(mean_ai), explanation
|
| 59 |
+
|
| 60 |
+
def build_explanation(text: str, ai_prob: float, n_chunks: int) -> str:
|
| 61 |
+
words = re.findall(r"\w+", text)
|
| 62 |
+
sentences = re.split(r"[.!?]+", text)
|
| 63 |
+
words = [w for w in words if w.strip()]
|
| 64 |
+
sentences = [s for s in sentences if s.strip()]
|
| 65 |
+
|
| 66 |
+
avg_len = (sum(len(s.split()) for s in sentences) / max(1, len(sentences))) if sentences else 0
|
| 67 |
+
vocab = set(w.lower() for w in words)
|
| 68 |
+
ttr = len(vocab) / max(1, len(words)) # type-token ratio
|
| 69 |
+
|
| 70 |
+
cues = []
|
| 71 |
+
if ai_prob >= 0.75:
|
| 72 |
+
cues.append("very strong statistical signal matching AI-generated patterns")
|
| 73 |
+
elif ai_prob >= 0.6:
|
| 74 |
+
cues.append("moderate signal matching AI-generated patterns")
|
| 75 |
+
elif ai_prob <= 0.25:
|
| 76 |
+
cues.append("very low likelihood of AI, text patterns align with human writing")
|
| 77 |
+
else:
|
| 78 |
+
cues.append("mixed indicators, borderline case")
|
| 79 |
+
|
| 80 |
+
if avg_len > 25:
|
| 81 |
+
cues.append("longer-than-usual sentences")
|
| 82 |
+
elif avg_len < 10:
|
| 83 |
+
cues.append("very short, choppy sentences")
|
| 84 |
+
|
| 85 |
+
if ttr < 0.35:
|
| 86 |
+
cues.append("lower lexical variety")
|
| 87 |
+
elif ttr > 0.6:
|
| 88 |
+
cues.append("high lexical variety")
|
| 89 |
+
|
| 90 |
+
cues.append(f"analyzed in {n_chunks} chunk(s)")
|
| 91 |
+
|
| 92 |
+
return (
|
| 93 |
+
f"Overall this text is estimated to be {ai_prob:.2%} likely AI-generated. "
|
| 94 |
+
f"Notable cues: " + "; ".join(cues) + ". "
|
| 95 |
+
"Remember: detectors can be wrong—use results as a hint, not proof."
|
| 96 |
+
)
|
| 97 |
|
| 98 |
# -----------------------------
|
| 99 |
+
# Gradio UI
|
| 100 |
# -----------------------------
|
|
|
|
|
|
|
| 101 |
|
| 102 |
+
with gr.Blocks(title="AI Text Detector") as demo:
|
| 103 |
+
gr.Markdown("## 🕵️ AI Text Detector (Simple)\nPaste text and get an approximate AI-likeness score.\n"
|
| 104 |
+
"> Model: `fakespot-ai/roberta-base-ai-text-detection-v1`")
|
| 105 |
+
|
| 106 |
+
with gr.Row():
|
| 107 |
+
inp = gr.Textbox(label="Input Text", lines=14, placeholder="Paste your text here...")
|
| 108 |
+
with gr.Row():
|
| 109 |
+
label_out = gr.Label(label="Predicted Class")
|
| 110 |
+
score_out = gr.Slider(label="AI Likelihood", minimum=0.0, maximum=1.0, step=0.001, interactive=False)
|
| 111 |
+
explain = gr.Textbox(label="Explanation", lines=6)
|
| 112 |
|
| 113 |
+
def _run(t):
|
| 114 |
+
label, score, expl = detect_ai(t)
|
| 115 |
+
return {label_out: {label: 1.0}, score_out: score, explain: expl}
|
|
|
|
| 116 |
|
| 117 |
+
gr.Button("Analyze").click(_run, inputs=inp, outputs=[label_out, score_out, explain])
|
| 118 |
|
| 119 |
if __name__ == "__main__":
|
| 120 |
+
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))
|