VictorM-Coder commited on
Commit
205f405
·
verified ·
1 Parent(s): 24abfdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -46
app.py CHANGED
@@ -4,10 +4,6 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import re
5
  import pandas as pd
6
  import gradio as gr
7
- import os
8
-
9
- # 1. OPTIMIZE CPU PERFORMANCE
10
- torch.set_num_threads(os.cpu_count() or 4) # Use all cores
11
 
12
  # -----------------------------
13
  # MODEL INITIALIZATION
@@ -15,20 +11,17 @@ torch.set_num_threads(os.cpu_count() or 4) # Use all cores
15
  MODEL_NAME = "desklib/ai-text-detector-v1.01"
16
  tokenizer = None
17
  model = None
18
- device = torch.device("cpu") # Hardcoding CPU as requested
19
- dtype = torch.float32
20
 
21
  def get_model():
22
  global tokenizer, model
23
  if model is None:
24
- print(f"Loading Model: {MODEL_NAME} on CPU...")
25
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
 
26
  model = AutoModelForSequenceClassification.from_pretrained(
27
  MODEL_NAME,
28
- num_labels=1,
29
- ignore_mismatched_sizes=True,
30
- low_cpu_mem_usage=True,
31
- torch_dtype=dtype
32
  ).to(device).eval()
33
  return tokenizer, model
34
 
@@ -69,101 +62,125 @@ def split_preserving_structure(text):
69
  return final_blocks
70
 
71
  # -----------------------------
72
- # ANALYSIS (With Batching & Progress)
73
  # -----------------------------
74
  @torch.inference_mode()
75
- def analyze(text, progress=gr.Progress(track_tqdm=True)):
76
  text = text.strip()
77
  if not text:
78
  return "—", "—", "<em>Please enter text...</em>", None
79
-
80
  word_count = len(text.split())
81
- if word_count < 300:
82
- warning_msg = f"⚠️ <b>Insufficient Text:</b> {word_count} words. Min 300 required."
83
- return "Too Short", "N/A", f"<div style='color: #b80d0d; border: 1px solid #b80d0d; padding: 10px;'>{warning_msg}</div>", None
84
 
85
  try:
86
  tok, mod = get_model()
87
  except Exception as e:
88
- return "ERROR", "0%", f"Load Error: {str(e)}", None
89
 
90
  blocks = split_preserving_structure(text)
91
  pure_sents_indices = [i for i, b in enumerate(blocks) if b.strip() and not b.startswith("\n")]
92
  pure_sents = [blocks[i] for i in pure_sents_indices]
93
 
94
  if not pure_sents:
95
- return "—", "—", "No sentences found.", None
96
 
97
- # Windows for context
98
  windows = []
99
  for i in range(len(pure_sents)):
100
  start = max(0, i - 1)
101
  end = min(len(pure_sents), i + 2)
102
  windows.append(" ".join(pure_sents[start:end]))
103
 
104
- # 2. BATCHED INFERENCE (Crucial for CPU)
 
105
  probs = []
106
- batch_size = 4 # Small batches so CPU doesn't hang
107
-
108
- progress(0, desc="Starting Analysis...")
109
  for i in range(0, len(windows), batch_size):
110
  batch = windows[i : i + batch_size]
111
  inputs = tok(batch, return_tensors="pt", padding=True, truncation=True, max_length=512).to(device)
112
  output = mod(**inputs)
113
- batch_probs = torch.sigmoid(output.logits).cpu().numpy().flatten().tolist()
 
 
 
 
 
 
114
  probs.extend(batch_probs)
115
- progress((i + batch_size) / len(windows), desc=f"Analyzing sentences {i+1}-{min(i+batch_size, len(windows))}...")
116
 
117
- # Statistics
118
  lengths = [len(s.split()) for s in pure_sents]
119
  total_words = sum(lengths)
120
  weighted_avg = sum(p * l for p, l in zip(probs, lengths)) / total_words if total_words > 0 else 0
121
 
122
- # 3. HTML GENERATION
123
- highlighted_html = "<div style='font-family: sans-serif; line-height: 1.8;'>"
 
 
124
  prob_map = {idx: probs[i] for i, idx in enumerate(pure_sents_indices)}
125
 
126
  for i, block in enumerate(blocks):
127
  if block.startswith("\n") or block.isspace():
128
  highlighted_html += block.replace("\n", "<br>")
129
  continue
130
-
131
  if i in prob_map:
132
  score = prob_map[i]
133
- color, bg = ("#b80d0d", "rgba(184, 13, 13, 0.15)") if score >= THRESHOLD else ("#11823b", "rgba(17, 130, 59, 0.15)")
 
 
 
 
 
 
 
134
  highlighted_html += (
135
- f"<span style='background:{bg}; padding:2px 4px; border-radius:4px; border-bottom: 2px solid {color};'>"
136
- f"<b style='color:{color}; font-size: 0.8em;'>[{score:.1%}]</b> {block}</span>"
 
 
137
  )
138
  else:
139
  highlighted_html += block
140
-
141
  highlighted_html += "</div>"
142
-
 
 
143
  df = pd.DataFrame({"Sentence": pure_sents, "AI Confidence": [f"{p:.2%}" for p in probs]})
144
- return f"{weighted_avg:.1%} AI Probability", f"{weighted_avg:.2%}", highlighted_html, df
 
145
 
146
  # -----------------------------
147
  # GRADIO INTERFACE
148
  # -----------------------------
149
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
150
- gr.Markdown("## 🕵️ AI Detector Pro (CPU Optimized)")
 
151
 
152
  with gr.Row():
153
  with gr.Column(scale=3):
154
- text_input = gr.Textbox(label="Paste Text", lines=12, placeholder="Min 300 words...")
155
- run_btn = gr.Button("Run Analysis", variant="primary")
 
 
 
156
  with gr.Column(scale=1):
157
- verdict_out = gr.Label(label="Weighted Verdict")
158
- score_out = gr.Label(label="Exact Score")
159
-
 
 
 
160
  with gr.Tabs():
161
  with gr.TabItem("Visual Heatmap"):
162
- html_out = gr.HTML()
163
- with gr.TabItem("Detailed Breakdown"):
164
  table_out = gr.Dataframe(headers=["Sentence", "AI Confidence"], wrap=True)
165
 
166
  run_btn.click(analyze, inputs=text_input, outputs=[verdict_out, score_out, html_out, table_out])
 
167
 
168
  if __name__ == "__main__":
169
  demo.launch()
 
4
  import re
5
  import pandas as pd
6
  import gradio as gr
 
 
 
 
7
 
8
  # -----------------------------
9
  # MODEL INITIALIZATION
 
11
  MODEL_NAME = "desklib/ai-text-detector-v1.01"
12
  tokenizer = None
13
  model = None
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
15
 
16
  def get_model():
17
  global tokenizer, model
18
  if model is None:
19
+ print(f"Loading Specialized Model: {MODEL_NAME} on {device}")
20
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
21
+ # Load with default labels; if the model has 2 (Human/AI), we handle it in analyze()
22
  model = AutoModelForSequenceClassification.from_pretrained(
23
  MODEL_NAME,
24
+ ignore_mismatched_sizes=True
 
 
 
25
  ).to(device).eval()
26
  return tokenizer, model
27
 
 
62
  return final_blocks
63
 
64
  # -----------------------------
65
+ # ANALYSIS
66
  # -----------------------------
67
  @torch.inference_mode()
68
+ def analyze(text):
69
  text = text.strip()
70
  if not text:
71
  return "—", "—", "<em>Please enter text...</em>", None
72
+
73
  word_count = len(text.split())
74
+ if word_count < 300: # Slightly lowered for testing flexibility
75
+ warning_msg = f"⚠️ <b>Insufficient Text:</b> Your input has {word_count} words. Please enter at least 250-300 words for accurate results."
76
+ return "Too Short", "N/A", f"<div style='color: #b80d0d; padding: 20px; border: 1px solid #b80d0d; border-radius: 8px;'>{warning_msg}</div>", None
77
 
78
  try:
79
  tok, mod = get_model()
80
  except Exception as e:
81
+ return "ERROR", "0%", f"Failed to load model: {str(e)}", None
82
 
83
  blocks = split_preserving_structure(text)
84
  pure_sents_indices = [i for i, b in enumerate(blocks) if b.strip() and not b.startswith("\n")]
85
  pure_sents = [blocks[i] for i in pure_sents_indices]
86
 
87
  if not pure_sents:
88
+ return "—", "—", "<em>No sentences detected.</em>", None
89
 
90
+ # Sliding Window Generation (Context of 3 sentences)
91
  windows = []
92
  for i in range(len(pure_sents)):
93
  start = max(0, i - 1)
94
  end = min(len(pure_sents), i + 2)
95
  windows.append(" ".join(pure_sents[start:end]))
96
 
97
+ # --- BATCHED INFERENCE (Prevents OOM) ---
98
+ batch_size = 8
99
  probs = []
 
 
 
100
  for i in range(0, len(windows), batch_size):
101
  batch = windows[i : i + batch_size]
102
  inputs = tok(batch, return_tensors="pt", padding=True, truncation=True, max_length=512).to(device)
103
  output = mod(**inputs)
104
+
105
+ # Check if model is binary classification (2 labels) or regression (1 label)
106
+ if output.logits.shape[1] > 1:
107
+ # Assumes Label 1 is 'AI'
108
+ batch_probs = F.softmax(output.logits, dim=-1)[:, 1].cpu().numpy().tolist()
109
+ else:
110
+ batch_probs = torch.sigmoid(output.logits).cpu().numpy().flatten().tolist()
111
  probs.extend(batch_probs)
 
112
 
113
+ # Calculation for Final Score
114
  lengths = [len(s.split()) for s in pure_sents]
115
  total_words = sum(lengths)
116
  weighted_avg = sum(p * l for p, l in zip(probs, lengths)) / total_words if total_words > 0 else 0
117
 
118
+ # -----------------------------
119
+ # HTML RECONSTRUCTION
120
+ # -----------------------------
121
+ highlighted_html = "<div style='font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, sans-serif; line-height: 1.8;'>"
122
  prob_map = {idx: probs[i] for i, idx in enumerate(pure_sents_indices)}
123
 
124
  for i, block in enumerate(blocks):
125
  if block.startswith("\n") or block.isspace():
126
  highlighted_html += block.replace("\n", "<br>")
127
  continue
128
+
129
  if i in prob_map:
130
  score = prob_map[i]
131
+ # Color logic based on Threshold
132
+ if score >= THRESHOLD:
133
+ color, bg = "#d32f2f", "rgba(211, 47, 47, 0.12)" # Soft Red
134
+ border = "2px solid #d32f2f"
135
+ else:
136
+ color, bg = "#2e7d32", "rgba(46, 125, 50, 0.08)" # Soft Green
137
+ border = "1px solid transparent"
138
+
139
  highlighted_html += (
140
+ f"<span style='background:{bg}; padding:1px 2px; border-radius:3px; border-bottom: {border}; cursor: help;' "
141
+ f"title='AI Confidence: {score:.2%}'>"
142
+ f"<span style='color:{color}; font-weight: bold; font-size: 0.75em; vertical-align: super; margin-right: 2px;'>{score:.0%}</span>"
143
+ f"{block}</span>"
144
  )
145
  else:
146
  highlighted_html += block
 
147
  highlighted_html += "</div>"
148
+
149
+ label = f"{weighted_avg:.1%} AI Written"
150
+ display_score = f"{weighted_avg:.2%}"
151
  df = pd.DataFrame({"Sentence": pure_sents, "AI Confidence": [f"{p:.2%}" for p in probs]})
152
+
153
+ return label, display_score, highlighted_html, df
154
 
155
  # -----------------------------
156
  # GRADIO INTERFACE
157
  # -----------------------------
158
+ with gr.Blocks(theme=gr.themes.Soft(), title="AI Detector Pro") as demo:
159
+ gr.Markdown("# 🕵️ AI Detector Pro")
160
+ gr.Markdown(f"Utilizing **{MODEL_NAME}**. Values above **{THRESHOLD*100:.0f}%** are flagged as highly likely AI.")
161
 
162
  with gr.Row():
163
  with gr.Column(scale=3):
164
+ text_input = gr.Textbox(label="Input Text", lines=15, placeholder="Paste your essay here (minimum 250 words for accuracy)...")
165
+ with gr.Row():
166
+ clear_btn = gr.Button("Clear")
167
+ run_btn = gr.Button("Analyze Text", variant="primary")
168
+
169
  with gr.Column(scale=1):
170
+ verdict_out = gr.Label(label="Global Verdict")
171
+ score_out = gr.Label(label="Weighted Probability")
172
+ gr.Markdown("---")
173
+ gr.Markdown("### How to read:")
174
+ gr.Markdown("- **Red Highlight:** High AI probability\n- **Green Highlight:** Likely Human\n- **Super-script:** Exact sentence-level AI score")
175
+
176
  with gr.Tabs():
177
  with gr.TabItem("Visual Heatmap"):
178
+ html_out = gr.HTML(label="Heatmap")
179
+ with gr.TabItem("Data Breakdown"):
180
  table_out = gr.Dataframe(headers=["Sentence", "AI Confidence"], wrap=True)
181
 
182
  run_btn.click(analyze, inputs=text_input, outputs=[verdict_out, score_out, html_out, table_out])
183
+ clear_btn.click(lambda: ["", "", "", "", None], outputs=[text_input, verdict_out, score_out, html_out, table_out])
184
 
185
  if __name__ == "__main__":
186
  demo.launch()