mahmoudsaber0 commited on
Commit
db41da3
·
verified ·
1 Parent(s): 29cd84d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -138
app.py CHANGED
@@ -1,81 +1,42 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
- import re
5
  import matplotlib.pyplot as plt
6
- from tokenizers.normalizers import Sequence, Replace, Strip
7
- from tokenizers import Regex
8
-
9
- # Device setup
10
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
-
12
- # --- Model and Tokenizer Setup ---
13
- model1_path = "modernbert.bin"
14
- model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
15
- model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
16
-
17
- tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
18
-
19
- # Load Model 1 (local)
20
- model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
21
- model_1.load_state_dict(torch.load(model1_path, map_location=device))
22
- model_1.to(device).eval()
23
-
24
- # Load Model 2 (URL)
25
- model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
26
- model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device))
27
- model_2.to(device).eval()
28
-
29
- # Load Model 3 (URL)
30
- model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
31
- model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
32
- model_3.to(device).eval()
33
-
34
-
35
- # --- Label Mapping ---
36
- label_mapping = {
37
- 0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
38
- 6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
39
- 11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
40
- 14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
41
- 18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
42
- 22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
43
- 27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
44
- 31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
45
- 35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
46
- 39: 'text-davinci-002', 40: 'text-davinci-003'
47
- }
48
-
49
- # --- Text Cleaning ---
50
- def clean_text(text: str) -> str:
51
- text = re.sub(r'\s{2,}', ' ', text)
52
- text = re.sub(r'\s+([,.;:?!])', r'\1', text)
53
- return text
54
 
55
- newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
56
- tokenizer.backend_tokenizer.normalizer = Sequence([
57
- tokenizer.backend_tokenizer.normalizer,
58
- newline_to_space,
59
- Strip()
60
- ])
 
61
 
62
- # --- Classification Function (Per Paragraph) ---
63
  def classify_text(text):
64
- """
65
- Classifies each paragraph separately and provides per-paragraph scores
66
- + overall result.
67
- """
68
  cleaned_text = clean_text(text)
69
  if not cleaned_text.strip():
70
  return "", None
71
 
72
- # Split text into paragraphs (2+ newlines)
73
- paragraphs = [p.strip() for p in re.split(r'\n{2,}', cleaned_text) if p.strip()]
74
- chunk_scores = []
 
 
 
 
 
75
  all_probabilities = []
76
 
77
- for i, paragraph in enumerate(paragraphs):
78
- inputs = tokenizer(paragraph, return_tensors="pt", truncation=True, padding=True).to(device)
79
 
80
  with torch.no_grad():
81
  logits_1 = model_1(**inputs).logits
@@ -95,41 +56,48 @@ def classify_text(text):
95
  ai_probs_clone[24] = 0
96
  ai_total_prob = ai_probs_clone.sum().item()
97
 
98
- total_decision_prob = human_prob + ai_total_prob
99
- human_percentage = (human_prob / total_decision_prob) * 100
100
- ai_percentage = (ai_total_prob / total_decision_prob) * 100
101
- ai_argmax_index = torch.argmax(ai_probs_clone).item()
102
- ai_argmax_model = label_mapping[ai_argmax_index]
103
-
104
- chunk_scores.append({
105
- "paragraph": paragraph[:150] + ("..." if len(paragraph) > 150 else ""),
106
- "human": human_percentage,
107
- "ai": ai_percentage,
108
- "model": ai_argmax_model
 
 
 
109
  })
110
 
111
- # --- Overall Average ---
112
- avg_human = sum(c["human"] for c in chunk_scores) / len(chunk_scores)
113
- avg_ai = sum(c["ai"] for c in chunk_scores) / len(chunk_scores)
114
 
115
  if avg_human > avg_ai:
116
- result_message = f"**Overall Result:** <span class='highlight-human'>{avg_human:.2f}% Human-written</span>"
117
  else:
118
- top_model = max(chunk_scores, key=lambda c: c['ai'])['model']
119
- result_message = f"**Overall Result:** <span class='highlight-ai'>{avg_ai:.2f}% AI-generated (likely {top_model})</span>"
120
-
121
- # --- Paragraph Table ---
122
- paragraph_table = "\n\n**Paragraph Analysis:**\n"
123
- for idx, c in enumerate(chunk_scores, 1):
124
- color = "#4CAF50" if c['human'] > c['ai'] else "#FF5733"
125
- paragraph_table += (
126
- f"<div style='margin-bottom:10px; border-left:4px solid {color}; padding-left:10px;'>"
127
- f"<b>Paragraph {idx}</b>: {c['human']:.2f}% Human | {c['ai']:.2f}% AI → <i>{c['model']}</i><br>"
128
- f"<small>{c['paragraph']}</small>"
129
- f"</div>\n"
130
- )
131
-
132
- # --- Plot (Top 5 Models Overall) ---
 
 
 
 
133
  mean_probs = torch.mean(torch.stack(all_probabilities), dim=0)
134
  top_5_probs, top_5_indices = torch.topk(mean_probs, 5)
135
  top_5_probs = top_5_probs.cpu().numpy()
@@ -147,46 +115,21 @@ def classify_text(text):
147
  ax.set_xlim(0, max(top_5_probs) * 1.18)
148
  plt.tight_layout()
149
 
150
- return result_message + "\n\n" + paragraph_table, fig
151
-
152
 
153
- # --- UI ---
154
- title = "AI Text Detector"
155
- description = """
156
- This tool uses <b>ModernBERT</b> to detect AI-generated text.
157
- Each paragraph is analyzed separately to show which parts are likely AI-generated.
158
  """
159
- bottom_text = "**Developed by SzegedAI – Extended by Saber**"
160
-
161
- AI_texts = [
162
- "Artificial intelligence (AI) is reshaping industries by automating tasks, enhancing decision-making, and driving innovation. From predictive analytics in finance to autonomous vehicles in transportation, AI technologies are becoming integral to daily operations. The future of AI lies not only in technological advancement but also in ensuring ethical use, transparency, and accountability."
163
- ]
164
-
165
- Human_texts = [
166
- "Mathematics has always been a cornerstone of scientific discovery. It provides a precise language for describing natural phenomena, from the orbit of planets to the behavior of subatomic particles. The beauty of mathematics lies in its universality—its principles hold true regardless of context or culture."
167
- ]
168
-
169
- iface = gr.Blocks(css="""
170
- @import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
171
- #text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; box-sizing: border-box; margin: auto; }
172
- #result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 16px; padding: 15px; margin-top: 20px; width: 80%; box-sizing: border-box; margin: auto; }
173
- body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; display: block; justify-content: center; align-items: center; overflow-y: auto; }
174
- .gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.6); max-width: 900px; margin: auto; }
175
- .highlight-human { color: #4CAF50; font-weight: bold; }
176
- .highlight-ai { color: #FF5733; font-weight: bold; }
177
- """)
178
-
179
- with iface:
180
- gr.Markdown(f"# {title}")
181
- gr.Markdown(description)
182
- text_input = gr.Textbox(label="", placeholder="Paste your article here...", elem_id="text_input_box", lines=10)
183
- result_output = gr.HTML("", elem_id="result_output_box")
184
- plot_output = gr.Plot(label="Model Probability Distribution")
185
- text_input.change(classify_text, inputs=text_input, outputs=[result_output, plot_output])
186
- with gr.Tab("AI Examples"):
187
- gr.Examples(AI_texts, inputs=text_input)
188
- with gr.Tab("Human Examples"):
189
- gr.Examples(Human_texts, inputs=text_input)
190
- gr.Markdown(bottom_text)
191
-
192
- iface.launch(share=True)
 
 
 
1
  import torch
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import matplotlib.pyplot as plt
4
+ import re
5
+ import gradio as gr
6
+
7
+ # --- Load models ---
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
+ model_1 = AutoModelForSequenceClassification.from_pretrained("roberta-base-openai-detector").to(device)
11
+ model_2 = AutoModelForSequenceClassification.from_pretrained("roberta-large-openai-detector").to(device)
12
+ model_3 = AutoModelForSequenceClassification.from_pretrained("Hello-SimpleAI/chatgpt-detector-roberta").to(device)
13
+ tokenizer = AutoTokenizer.from_pretrained("roberta-base-openai-detector")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # --- Label Mapping (example) ---
16
+ label_mapping = {i: f"model_{i}" for i in range(25)}
17
+ label_mapping[24] = "Human"
18
+
19
+ def clean_text(text):
20
+ text = re.sub(r'\s+', ' ', text).strip()
21
+ return text
22
 
 
23
  def classify_text(text):
 
 
 
 
24
  cleaned_text = clean_text(text)
25
  if not cleaned_text.strip():
26
  return "", None
27
 
28
+ # Split into paragraphs (two newlines)
29
+ paragraphs = re.split(r'\n{2,}', cleaned_text)
30
+ if len(paragraphs) == 1 and len(cleaned_text.split()) > 300:
31
+ # Fallback: split by ~300 words
32
+ words = cleaned_text.split()
33
+ paragraphs = [' '.join(words[i:i + 300]) for i in range(0, len(words), 300)]
34
+
35
+ paragraph_scores = []
36
  all_probabilities = []
37
 
38
+ for i, para in enumerate(paragraphs):
39
+ inputs = tokenizer(para, return_tensors="pt", truncation=True, padding=True).to(device)
40
 
41
  with torch.no_grad():
42
  logits_1 = model_1(**inputs).logits
 
56
  ai_probs_clone[24] = 0
57
  ai_total_prob = ai_probs_clone.sum().item()
58
 
59
+ total = human_prob + ai_total_prob
60
+ human_pct = (human_prob / total) * 100
61
+ ai_pct = (ai_total_prob / total) * 100
62
+ ai_index = torch.argmax(ai_probs_clone).item()
63
+ ai_model = label_mapping[ai_index]
64
+
65
+ short_preview = (para[:180] + "...") if len(para) > 180 else para
66
+
67
+ paragraph_scores.append({
68
+ "id": i + 1,
69
+ "human": human_pct,
70
+ "ai": ai_pct,
71
+ "model": ai_model,
72
+ "preview": short_preview
73
  })
74
 
75
+ # --- Averages ---
76
+ avg_human = sum(p["human"] for p in paragraph_scores) / len(paragraph_scores)
77
+ avg_ai = sum(p["ai"] for p in paragraph_scores) / len(paragraph_scores)
78
 
79
  if avg_human > avg_ai:
80
+ result_message = f"<b>Overall Result:</b> <span class='highlight-human'>{avg_human:.2f}% Human-written</span>"
81
  else:
82
+ top_model = max(paragraph_scores, key=lambda p: p['ai'])['model']
83
+ result_message = f"<b>Overall Result:</b> <span class='highlight-ai'>{avg_ai:.2f}% AI-generated (likely {top_model})</span>"
84
+
85
+ # --- Paragraph Analysis HTML ---
86
+ html_output = f"<div style='font-family: Arial, sans-serif; line-height:1.6;'>{result_message}<br><br>"
87
+ html_output += "<h3>Paragraph Analysis:</h3>"
88
+
89
+ for p in paragraph_scores:
90
+ color = "#28a745" if p["human"] > p["ai"] else "#FF5733"
91
+ html_output += f"""
92
+ <div style='margin-bottom:10px; border-left:5px solid {color}; padding-left:10px; background:#f9f9f9; border-radius:6px;'>
93
+ <b>Paragraph {p["id"]}</b>: {p["human"]:.2f}% Human | {p["ai"]:.2f}% AI → <i>{p["model"]}</i><br>
94
+ <small>{p["preview"]}</small>
95
+ </div>
96
+ """
97
+
98
+ html_output += "</div>"
99
+
100
+ # --- Top 5 Plot ---
101
  mean_probs = torch.mean(torch.stack(all_probabilities), dim=0)
102
  top_5_probs, top_5_indices = torch.topk(mean_probs, 5)
103
  top_5_probs = top_5_probs.cpu().numpy()
 
115
  ax.set_xlim(0, max(top_5_probs) * 1.18)
116
  plt.tight_layout()
117
 
118
+ return html_output, fig
 
119
 
120
+ # --- Gradio UI ---
121
+ css = """
122
+ .highlight-ai { color: #FF5733; font-weight: bold; }
123
+ .highlight-human { color: #28a745; font-weight: bold; }
 
124
  """
125
+
126
+ with gr.Blocks(css=css, theme="soft") as demo:
127
+ gr.Markdown("# 🧠 AI/Human Text Detector")
128
+ text_input = gr.Textbox(label="Paste your text here", lines=12, placeholder="Paste your article or essay...")
129
+ output_html = gr.HTML(label="Analysis Results")
130
+ output_plot = gr.Plot(label="Top 5 Models")
131
+ analyze_btn = gr.Button("🔍 Analyze Text", variant="primary")
132
+
133
+ analyze_btn.click(classify_text, inputs=text_input, outputs=[output_html, output_plot])
134
+
135
+ demo.launch()