Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,133 +1,129 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
| 2 |
import re
|
| 3 |
-
|
| 4 |
-
|
| 5 |
import gradio as gr
|
| 6 |
-
from transformers import pipeline
|
| 7 |
|
| 8 |
# -----------------------------
|
| 9 |
-
#
|
| 10 |
# -----------------------------
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def clean_text(s: str) -> str:
|
| 18 |
-
s = s.strip()
|
| 19 |
-
s = re.sub(r"\s+", " ", s)
|
| 20 |
-
return s
|
| 21 |
-
|
| 22 |
-
def chunk_text(text: str, max_words: int = 300) -> List[str]:
|
| 23 |
-
words = text.split()
|
| 24 |
-
if len(words) <= max_words:
|
| 25 |
-
return [" ".join(words)]
|
| 26 |
-
chunks = []
|
| 27 |
-
for i in range(0, len(words), max_words):
|
| 28 |
-
chunks.append(" ".join(words[i : i + max_words]))
|
| 29 |
-
return chunks
|
| 30 |
|
| 31 |
# -----------------------------
|
| 32 |
-
#
|
| 33 |
# -----------------------------
|
| 34 |
-
|
| 35 |
-
def detect_ai(text: str) -> Tuple[str, float, str]:
|
| 36 |
-
"""
|
| 37 |
-
Returns (label, score_float, explanation)
|
| 38 |
-
- label: "AI" or "Human"
|
| 39 |
-
- score_float: mean AI likelihood in [0,1]
|
| 40 |
-
- explanation: short narrative with a few heuristic cues
|
| 41 |
-
"""
|
| 42 |
-
if not text or not text.strip():
|
| 43 |
-
return "—", 0.0, "Please paste some text to analyze."
|
| 44 |
-
|
| 45 |
-
chunks = [clean_text(c) for c in chunk_text(text, max_words=300)]
|
| 46 |
-
preds = clf(chunks)
|
| 47 |
-
|
| 48 |
-
ai_probs = []
|
| 49 |
-
for p in preds:
|
| 50 |
-
label = str(p.get("label", "")).upper()
|
| 51 |
-
score = float(p.get("score", 0.0))
|
| 52 |
-
ai_prob = score if label.startswith("AI") else (1.0 - score)
|
| 53 |
-
ai_probs.append(ai_prob)
|
| 54 |
-
|
| 55 |
-
mean_ai = sum(ai_probs) / len(ai_probs)
|
| 56 |
-
label = "AI" if mean_ai >= 0.5 else "Human"
|
| 57 |
-
|
| 58 |
-
explanation = build_explanation(text, mean_ai, len(chunks))
|
| 59 |
-
return label, float(mean_ai), explanation
|
| 60 |
-
|
| 61 |
-
def build_explanation(text: str, ai_prob: float, n_chunks: int) -> str:
|
| 62 |
-
words = re.findall(r"\w+", text)
|
| 63 |
-
sentences = re.split(r"[.!?]+", text)
|
| 64 |
-
words = [w for w in words if w.strip()]
|
| 65 |
-
sentences = [s for s in sentences if s.strip()]
|
| 66 |
-
|
| 67 |
-
avg_len = (
|
| 68 |
-
sum(len(s.split()) for s in sentences) / max(1, len(sentences))
|
| 69 |
-
if sentences else 0
|
| 70 |
-
)
|
| 71 |
-
vocab = set(w.lower() for w in words)
|
| 72 |
-
ttr = len(vocab) / max(1, len(words)) # type-token ratio
|
| 73 |
-
|
| 74 |
-
cues = []
|
| 75 |
-
if ai_prob >= 0.75:
|
| 76 |
-
cues.append("very strong statistical signal matching AI-generated patterns")
|
| 77 |
-
elif ai_prob >= 0.6:
|
| 78 |
-
cues.append("moderate signal matching AI-generated patterns")
|
| 79 |
-
elif ai_prob <= 0.25:
|
| 80 |
-
cues.append("very low likelihood of AI, text patterns align with human writing")
|
| 81 |
-
else:
|
| 82 |
-
cues.append("mixed indicators, borderline case")
|
| 83 |
-
|
| 84 |
-
if avg_len > 25:
|
| 85 |
-
cues.append("longer-than-usual sentences")
|
| 86 |
-
elif avg_len < 10:
|
| 87 |
-
cues.append("very short, choppy sentences")
|
| 88 |
-
|
| 89 |
-
if ttr < 0.35:
|
| 90 |
-
cues.append("lower lexical variety")
|
| 91 |
-
elif ttr > 0.6:
|
| 92 |
-
cues.append("high lexical variety")
|
| 93 |
-
|
| 94 |
-
cues.append(f"analyzed in {n_chunks} chunk(s)")
|
| 95 |
-
|
| 96 |
-
return (
|
| 97 |
-
f"Overall this text is estimated to be {ai_prob:.2%} likely AI-generated. "
|
| 98 |
-
f"Notable cues: " + "; ".join(cues) + ". "
|
| 99 |
-
"Reminder: detectors can be wrong—use results as a hint, not proof."
|
| 100 |
-
)
|
| 101 |
|
| 102 |
# -----------------------------
|
| 103 |
-
#
|
| 104 |
# -----------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
| 119 |
|
| 120 |
-
|
|
|
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
|
|
|
| 125 |
|
| 126 |
-
|
| 127 |
|
| 128 |
if __name__ == "__main__":
|
| 129 |
-
|
| 130 |
-
demo.queue().launch(
|
| 131 |
-
server_name="0.0.0.0",
|
| 132 |
-
server_port=int(os.getenv("PORT", 7860))
|
| 133 |
-
)
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 4 |
import re
|
| 5 |
+
import pandas as pd
|
|
|
|
| 6 |
import gradio as gr
|
|
|
|
| 7 |
|
| 8 |
# -----------------------------
|
| 9 |
+
# MODEL (Fakespot 2025)
|
| 10 |
# -----------------------------
|
| 11 |
+
MODEL_NAME = "fakespot-ai/roberta-base-ai-text-detection-v1"
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 13 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
+
dtype = torch.bfloat16 if (device.type=="cuda" and torch.cuda.is_bf16_supported()) else torch.float32
|
| 15 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, dtype=dtype).to(device).eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# -----------------------------
|
| 18 |
+
# INTERNAL THRESHOLD for sentence labels/colors
|
| 19 |
# -----------------------------
|
| 20 |
+
THRESHOLD = 0.70 # used only for per-sentence "AI/Human" tags & color
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
# -----------------------------
|
| 23 |
+
# SENTENCE SPLITTER (protect → split → restore; no lookbehinds)
|
| 24 |
# -----------------------------
|
| 25 |
+
ABBR = [
|
| 26 |
+
"e.g", "i.e", "mr", "mrs", "ms", "dr", "prof", "vs", "etc", "fig", "al",
|
| 27 |
+
"jr", "sr", "st", "no", "vol", "pp", "mt", "inc", "ltd", "co", "u.s", "u.k",
|
| 28 |
+
"a.m", "p.m"
|
| 29 |
+
]
|
| 30 |
+
ABBR_REGEX = re.compile(r"\b(" + "|".join(map(re.escape, ABBR)) + r")\.", flags=re.IGNORECASE)
|
| 31 |
+
|
| 32 |
+
def _protect(text: str) -> str:
|
| 33 |
+
t = text.strip()
|
| 34 |
+
if not t:
|
| 35 |
+
return ""
|
| 36 |
+
t = re.sub(r"\s*\n+\s*", " ", t) # normalize newlines
|
| 37 |
+
t = t.replace("...", "⟨ELLIPSIS⟩") # ellipses
|
| 38 |
+
t = re.sub(r"(?<=\d)\.(?=\d)", "⟨DECIMAL⟩", t) # decimals like 3.14
|
| 39 |
+
t = ABBR_REGEX.sub(r"\1⟨ABBRDOT⟩", t) # abbreviations' dot
|
| 40 |
+
return t
|
| 41 |
+
|
| 42 |
+
def _restore(text: str) -> str:
|
| 43 |
+
return (text
|
| 44 |
+
.replace("⟨ABBRDOT⟩", ".")
|
| 45 |
+
.replace("⟨DECIMAL⟩", ".")
|
| 46 |
+
.replace("⟨ELLIPSIS⟩", "..."))
|
| 47 |
+
|
| 48 |
+
def sentence_split(text: str):
|
| 49 |
+
t = _protect(text)
|
| 50 |
+
if not t:
|
| 51 |
+
return []
|
| 52 |
+
# split on [.?!] followed by whitespace and likely sentence start or end
|
| 53 |
+
parts = re.split(r"([.?!])\s+(?=(?:[\"“”‘’']?\s*[A-Z(])|$)", t)
|
| 54 |
+
sentences, buf = [], ""
|
| 55 |
+
for i, chunk in enumerate(parts):
|
| 56 |
+
if i % 2 == 0:
|
| 57 |
+
buf += chunk
|
| 58 |
+
else:
|
| 59 |
+
buf += chunk
|
| 60 |
+
sentences.append(buf.strip()); buf = ""
|
| 61 |
+
if buf.strip():
|
| 62 |
+
sentences.append(buf.strip())
|
| 63 |
+
return [_restore(s).strip() for s in sentences if s.strip()]
|
| 64 |
|
| 65 |
+
# -----------------------------
|
| 66 |
+
# CORE: overall AI score + highlights
|
| 67 |
+
# -----------------------------
|
| 68 |
+
def analyze(text, max_len=512):
|
| 69 |
+
sents = sentence_split(text)
|
| 70 |
+
if not sents:
|
| 71 |
+
return "—", "—", "<em>Paste some text to analyze.</em>", None
|
| 72 |
+
|
| 73 |
+
# light clean (per model card vibe)
|
| 74 |
+
clean_sents = [re.sub(r"\s+", " ", s).strip() for s in sents]
|
| 75 |
+
|
| 76 |
+
inputs = tokenizer(
|
| 77 |
+
clean_sents, return_tensors="pt", padding=True, truncation=True, max_length=max_len
|
| 78 |
+
).to(device)
|
| 79 |
+
|
| 80 |
+
with torch.no_grad():
|
| 81 |
+
logits = model(**inputs).logits
|
| 82 |
+
probs = F.softmax(logits, dim=-1) # [:,0]=Human, [:,1]=AI
|
| 83 |
+
|
| 84 |
+
ai_probs = probs[:, 1].detach().cpu().tolist()
|
| 85 |
+
overall_ai = sum(ai_probs) / len(ai_probs)
|
| 86 |
+
overall_pct = f"{overall_ai * 100:.1f}%"
|
| 87 |
+
overall_label = "🤖 Likely AI Written" if overall_ai >= THRESHOLD else "🧒 Likely Human Written"
|
| 88 |
+
|
| 89 |
+
# Per-sentence highlights (use THRESHOLD only for the tag/color)
|
| 90 |
+
rows, highlights = [], []
|
| 91 |
+
for i, orig in enumerate(sents, start=1):
|
| 92 |
+
ai_p = float(ai_probs[i-1])
|
| 93 |
+
label = "AI" if ai_p >= THRESHOLD else "Human"
|
| 94 |
+
pct = f"{ai_p*100:.1f}%"
|
| 95 |
+
if ai_p < 0.30:
|
| 96 |
+
color = "#11823b" # green
|
| 97 |
+
elif ai_p < 0.70:
|
| 98 |
+
color = "#b8860b" # amber
|
| 99 |
+
else:
|
| 100 |
+
color = "#b80d0d" # red
|
| 101 |
+
normalized = re.sub(r"\s+", " ", orig)
|
| 102 |
+
highlights.append(
|
| 103 |
+
"<div style='margin:6px 0; padding:6px 8px; border-radius:6px; background:rgba(0,0,0,0.03)'>"
|
| 104 |
+
f"<strong style='color:{color}'>[{pct} {label}]</strong> {normalized}</div>"
|
| 105 |
+
)
|
| 106 |
+
rows.append([i, orig, round(ai_p, 4), label])
|
| 107 |
+
|
| 108 |
+
html = "\n".join(highlights)
|
| 109 |
+
df = pd.DataFrame(rows, columns=["#", "Sentence", "AI_Prob", "Label"])
|
| 110 |
+
return overall_label, overall_pct, html, df
|
| 111 |
|
| 112 |
+
# -----------------------------
|
| 113 |
+
# GRADIO UI (verdict + score, plus highlights)
|
| 114 |
+
# -----------------------------
|
| 115 |
+
with gr.Blocks() as demo:
|
| 116 |
+
gr.Markdown("### 🕵️ AI Written Text Detector — Fakespot Model")
|
| 117 |
|
| 118 |
+
text_input = gr.Textbox(label="Paste text", lines=14, placeholder="Your content…")
|
| 119 |
+
btn = gr.Button("Analyze")
|
| 120 |
|
| 121 |
+
verdict = gr.Label(label="Verdict (Overall)")
|
| 122 |
+
score = gr.Label(label="AI Score (Average across sentences)")
|
| 123 |
+
highlights = gr.HTML(label="Per-Sentence Highlights")
|
| 124 |
+
table = gr.Dataframe(headers=["#", "Sentence", "AI_Prob", "Label"], wrap=True)
|
| 125 |
|
| 126 |
+
btn.click(analyze, inputs=[text_input], outputs=[verdict, score, highlights, table])
|
| 127 |
|
| 128 |
if __name__ == "__main__":
|
| 129 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|