VictorM-Coder commited on
Commit
c40b953
·
verified ·
1 Parent(s): 27d1d53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -111
app.py CHANGED
@@ -1,129 +1,120 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import re
5
- import pandas as pd
6
  import gradio as gr
 
7
 
8
  # -----------------------------
9
- # MODEL (Fakespot 2025)
10
  # -----------------------------
11
- MODEL_NAME = "fakespot-ai/roberta-base-ai-text-detection-v1"
12
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- dtype = torch.bfloat16 if (device.type=="cuda" and torch.cuda.is_bf16_supported()) else torch.float32
15
- model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, dtype=dtype).to(device).eval()
16
 
17
- # -----------------------------
18
- # INTERNAL THRESHOLD for sentence labels/colors
19
- # -----------------------------
20
- THRESHOLD = 0.70 # used only for per-sentence "AI/Human" tags & color
21
 
22
- # -----------------------------
23
- # SENTENCE SPLITTER (protect → split → restore; no lookbehinds)
24
- # -----------------------------
25
- ABBR = [
26
- "e.g", "i.e", "mr", "mrs", "ms", "dr", "prof", "vs", "etc", "fig", "al",
27
- "jr", "sr", "st", "no", "vol", "pp", "mt", "inc", "ltd", "co", "u.s", "u.k",
28
- "a.m", "p.m"
29
- ]
30
- ABBR_REGEX = re.compile(r"\b(" + "|".join(map(re.escape, ABBR)) + r")\.", flags=re.IGNORECASE)
31
-
32
- def _protect(text: str) -> str:
33
- t = text.strip()
34
- if not t:
35
- return ""
36
- t = re.sub(r"\s*\n+\s*", " ", t) # normalize newlines
37
- t = t.replace("...", "⟨ELLIPSIS⟩") # ellipses
38
- t = re.sub(r"(?<=\d)\.(?=\d)", "⟨DECIMAL⟩", t) # decimals like 3.14
39
- t = ABBR_REGEX.sub(r"\1⟨ABBRDOT⟩", t) # abbreviations' dot
40
- return t
41
-
42
- def _restore(text: str) -> str:
43
- return (text
44
- .replace("⟨ABBRDOT⟩", ".")
45
- .replace("⟨DECIMAL⟩", ".")
46
- .replace("⟨ELLIPSIS⟩", "..."))
47
-
48
- def sentence_split(text: str):
49
- t = _protect(text)
50
- if not t:
51
- return []
52
- # split on [.?!] followed by whitespace and likely sentence start or end
53
- parts = re.split(r"([.?!])\s+(?=(?:[\"“”‘’']?\s*[A-Z(])|$)", t)
54
- sentences, buf = [], ""
55
- for i, chunk in enumerate(parts):
56
- if i % 2 == 0:
57
- buf += chunk
58
- else:
59
- buf += chunk
60
- sentences.append(buf.strip()); buf = ""
61
- if buf.strip():
62
- sentences.append(buf.strip())
63
- return [_restore(s).strip() for s in sentences if s.strip()]
64
 
65
  # -----------------------------
66
- # CORE: overall AI score + highlights
67
  # -----------------------------
68
- def analyze(text, max_len=512):
69
- sents = sentence_split(text)
70
- if not sents:
71
- return "—", "—", "<em>Paste some text to analyze.</em>", None
72
-
73
- # light clean (per model card vibe)
74
- clean_sents = [re.sub(r"\s+", " ", s).strip() for s in sents]
75
-
76
- inputs = tokenizer(
77
- clean_sents, return_tensors="pt", padding=True, truncation=True, max_length=max_len
78
- ).to(device)
79
-
80
- with torch.no_grad():
81
- logits = model(**inputs).logits
82
- probs = F.softmax(logits, dim=-1) # [:,0]=Human, [:,1]=AI
83
-
84
- ai_probs = probs[:, 1].detach().cpu().tolist()
85
- overall_ai = sum(ai_probs) / len(ai_probs)
86
- overall_pct = f"{overall_ai * 100:.1f}%"
87
- overall_label = "🤖 Likely AI Written" if overall_ai >= THRESHOLD else "🧒 Likely Human Written"
88
-
89
- # Per-sentence highlights (use THRESHOLD only for the tag/color)
90
- rows, highlights = [], []
91
- for i, orig in enumerate(sents, start=1):
92
- ai_p = float(ai_probs[i-1])
93
- label = "AI" if ai_p >= THRESHOLD else "Human"
94
- pct = f"{ai_p*100:.1f}%"
95
- if ai_p < 0.30:
96
- color = "#11823b" # green
97
- elif ai_p < 0.70:
98
- color = "#b8860b" # amber
99
- else:
100
- color = "#b80d0d" # red
101
- normalized = re.sub(r"\s+", " ", orig)
102
- highlights.append(
103
- "<div style='margin:6px 0; padding:6px 8px; border-radius:6px; background:rgba(0,0,0,0.03)'>"
104
- f"<strong style='color:{color}'>[{pct} {label}]</strong> {normalized}</div>"
105
- )
106
- rows.append([i, orig, round(ai_p, 4), label])
107
-
108
- html = "\n".join(highlights)
109
- df = pd.DataFrame(rows, columns=["#", "Sentence", "AI_Prob", "Label"])
110
- return overall_label, overall_pct, html, df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  # -----------------------------
113
- # GRADIO UI (verdict + score, plus highlights)
114
  # -----------------------------
115
- with gr.Blocks() as demo:
116
- gr.Markdown("### 🕵️ AI Written Text Detector — Fakespot Model")
117
 
118
- text_input = gr.Textbox(label="Paste text", lines=14, placeholder="Your content…")
119
- btn = gr.Button("Analyze")
 
 
 
 
 
 
 
 
120
 
121
- verdict = gr.Label(label="Verdict (Overall)")
122
- score = gr.Label(label="AI Score (Average across sentences)")
123
- highlights = gr.HTML(label="Per-Sentence Highlights")
124
- table = gr.Dataframe(headers=["#", "Sentence", "AI_Prob", "Label"], wrap=True)
125
 
126
- btn.click(analyze, inputs=[text_input], outputs=[verdict, score, highlights, table])
127
 
128
  if __name__ == "__main__":
129
- demo.launch()
 
 
 
 
1
  import re
2
+ from typing import List, Tuple
3
  import gradio as gr
4
+ from transformers import pipeline
5
 
6
  # -----------------------------
7
+ # Model & simple pre-processing
8
  # -----------------------------
 
 
 
 
 
9
 
10
+ MODEL_ID = "fakespot-ai/roberta-base-ai-text-detection-v1"
11
+ clf = pipeline("text-classification", model=MODEL_ID)
 
 
12
 
13
+ def clean_text(s: str) -> str:
14
+ s = s.strip()
15
+ s = re.sub(r"\s+", " ", s)
16
+ return s
17
+
18
+ def chunk_text(text: str, max_words: int = 300) -> List[str]:
19
+ words = text.split()
20
+ if len(words) <= max_words:
21
+ return [" ".join(words)]
22
+ chunks = []
23
+ for i in range(0, len(words), max_words):
24
+ chunks.append(" ".join(words[i : i + max_words]))
25
+ return chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  # -----------------------------
28
+ # Core inference
29
  # -----------------------------
30
+
31
+ def detect_ai(text: str) -> Tuple[str, float, str]:
32
+ """
33
+ Returns (label, score_float, explanation)
34
+ """
35
+ if not text or not text.strip():
36
+ return "", 0.0, "Please paste some text to analyze."
37
+
38
+ chunks = [clean_text(c) for c in chunk_text(text, max_words=300)]
39
+ # Run model in batch for speed
40
+ preds = clf(chunks)
41
+
42
+ # Aggregate: average confidence across chunks for the detected class
43
+ # The model returns a label per chunk; we map AI=1, Human=0 and average
44
+ ai_probs = []
45
+ for p in preds:
46
+ # Some models return 'AI'/'Human' labels; normalize
47
+ label = p.get("label", "").upper()
48
+ score = float(p.get("score", 0.0))
49
+ ai_prob = score if label.startswith("AI") else (1.0 - score)
50
+ ai_probs.append(ai_prob)
51
+
52
+ mean_ai = sum(ai_probs) / len(ai_probs)
53
+ label = "AI" if mean_ai >= 0.5 else "Human"
54
+
55
+ # Lightweight heuristic explanation (no extra LLM needed)
56
+ explanation = build_explanation(text, mean_ai, len(chunks))
57
+
58
+ return label, float(mean_ai), explanation
59
+
60
+ def build_explanation(text: str, ai_prob: float, n_chunks: int) -> str:
61
+ words = re.findall(r"\w+", text)
62
+ sentences = re.split(r"[.!?]+", text)
63
+ words = [w for w in words if w.strip()]
64
+ sentences = [s for s in sentences if s.strip()]
65
+
66
+ avg_len = (sum(len(s.split()) for s in sentences) / max(1, len(sentences))) if sentences else 0
67
+ vocab = set(w.lower() for w in words)
68
+ ttr = len(vocab) / max(1, len(words)) # type-token ratio
69
+
70
+ cues = []
71
+ if ai_prob >= 0.75:
72
+ cues.append("very strong statistical signal matching AI-generated patterns")
73
+ elif ai_prob >= 0.6:
74
+ cues.append("moderate signal matching AI-generated patterns")
75
+ elif ai_prob <= 0.25:
76
+ cues.append("very low likelihood of AI, text patterns align with human writing")
77
+ else:
78
+ cues.append("mixed indicators, borderline case")
79
+
80
+ if avg_len > 25:
81
+ cues.append("longer-than-usual sentences")
82
+ elif avg_len < 10:
83
+ cues.append("very short, choppy sentences")
84
+
85
+ if ttr < 0.35:
86
+ cues.append("lower lexical variety")
87
+ elif ttr > 0.6:
88
+ cues.append("high lexical variety")
89
+
90
+ cues.append(f"analyzed in {n_chunks} chunk(s)")
91
+
92
+ return (
93
+ f"Overall this text is estimated to be {ai_prob:.2%} likely AI-generated. "
94
+ f"Notable cues: " + "; ".join(cues) + ". "
95
+ "Remember: detectors can be wrong—use results as a hint, not proof."
96
+ )
97
 
98
  # -----------------------------
99
+ # Gradio UI
100
  # -----------------------------
 
 
101
 
102
+ with gr.Blocks(title="AI Text Detector") as demo:
103
+ gr.Markdown("## 🕵️ AI Text Detector (Simple)\nPaste text and get an approximate AI-likeness score.\n"
104
+ "> Model: `fakespot-ai/roberta-base-ai-text-detection-v1`")
105
+
106
+ with gr.Row():
107
+ inp = gr.Textbox(label="Input Text", lines=14, placeholder="Paste your text here...")
108
+ with gr.Row():
109
+ label_out = gr.Label(label="Predicted Class")
110
+ score_out = gr.Slider(label="AI Likelihood", minimum=0.0, maximum=1.0, step=0.001, interactive=False)
111
+ explain = gr.Textbox(label="Explanation", lines=6)
112
 
113
+ def _run(t):
114
+ label, score, expl = detect_ai(t)
115
+ return {label_out: {label: 1.0}, score_out: score, explain: expl}
 
116
 
117
+ gr.Button("Analyze").click(_run, inputs=inp, outputs=[label_out, score_out, explain])
118
 
119
  if __name__ == "__main__":
120
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))