mahmoudsaber0 commited on
Commit
29cd84d
·
verified ·
1 Parent(s): d51afa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -99
app.py CHANGED
@@ -2,12 +2,11 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
  import re
5
- from tokenizers import normalizers
6
- from tokenizers.normalizers import Sequence, Replace, Strip, NFKC
7
- from tokenizers import Regex
8
  import matplotlib.pyplot as plt
 
 
9
 
10
- # Set device to GPU if available, otherwise CPU
11
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
 
13
  # --- Model and Tokenizer Setup ---
@@ -17,23 +16,23 @@ model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model
17
 
18
  tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
19
 
20
- # Load Model 1 from local path
21
  model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
22
  model_1.load_state_dict(torch.load(model1_path, map_location=device))
23
  model_1.to(device).eval()
24
 
25
- # Load Model 2 from URL
26
  model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
27
  model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device))
28
  model_2.to(device).eval()
29
 
30
- # Load Model 3 from URL
31
  model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
32
  model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
33
  model_3.to(device).eval()
34
 
35
 
36
- # --- Label Mapping and Text Cleaning ---
37
  label_mapping = {
38
  0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
39
  6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
@@ -47,152 +46,147 @@ label_mapping = {
47
  39: 'text-davinci-002', 40: 'text-davinci-003'
48
  }
49
 
 
50
  def clean_text(text: str) -> str:
51
  text = re.sub(r'\s{2,}', ' ', text)
52
  text = re.sub(r'\s+([,.;:?!])', r'\1', text)
53
  return text
54
 
55
  newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
56
- join_hyphen_break = Replace(Regex(r'(\w+)[--]\s*\n\s*(\w+)'), r"\1\2")
57
  tokenizer.backend_tokenizer.normalizer = Sequence([
58
  tokenizer.backend_tokenizer.normalizer,
59
- join_hyphen_break,
60
  newline_to_space,
61
  Strip()
62
  ])
63
 
64
-
65
  def classify_text(text):
66
  """
67
- Classifies the text and generates a plot of the top 5 AI model predictions.
68
- Returns both the result message and the plot figure.
69
  """
70
  cleaned_text = clean_text(text)
71
- # If input is empty, clear the outputs
72
  if not cleaned_text.strip():
73
  return "", None
74
 
75
- # Tokenize input and move to the appropriate device
76
- inputs = tokenizer(cleaned_text, return_tensors="pt", truncation=True, padding=True).to(device)
77
-
78
- # Perform inference with the three models
79
- with torch.no_grad():
80
- logits_1 = model_1(**inputs).logits
81
- logits_2 = model_2(**inputs).logits
82
- logits_3 = model_3(**inputs).logits
83
-
84
- softmax_1 = torch.softmax(logits_1, dim=1)
85
- softmax_2 = torch.softmax(logits_2, dim=1)
86
- softmax_3 = torch.softmax(logits_3, dim=1)
87
-
88
- averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
89
- probabilities = averaged_probabilities[0]
90
-
91
- human_prob = probabilities[24].item()
92
- ai_probs_clone = probabilities.clone()
93
- ai_probs_clone[24] = 0
94
- ai_total_prob = ai_probs_clone.sum().item()
95
-
96
- total_decision_prob = human_prob + ai_total_prob
97
- human_percentage = (human_prob / total_decision_prob) * 100
98
- ai_percentage = (ai_total_prob / total_decision_prob) * 100
99
-
100
- ai_argmax_index = torch.argmax(ai_probs_clone).item()
101
- ai_argmax_model = label_mapping[ai_argmax_index]
102
-
103
- if human_percentage > ai_percentage:
104
- result_message = (
105
- f"**The text is** <span class='highlight-human'>**{human_percentage:.2f}%** likely <b>Human written</b>.</span>"
106
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  else:
108
- result_message = (
109
- f"**The text is** <span class='highlight-ai'>**{ai_percentage:.2f}%** likely <b>AI generated</b>.</span>\n\n"
110
- f"**Identified LLM: {ai_argmax_model}**"
 
 
 
 
 
 
 
 
 
111
  )
112
 
113
-
114
- ai_probs_for_plot = probabilities.clone()
115
- top_5_probs, top_5_indices = torch.topk(ai_probs_for_plot, 5)
116
-
117
  top_5_probs = top_5_probs.cpu().numpy()
118
  top_5_labels = [label_mapping[i.item()] for i in top_5_indices]
119
 
120
  fig, ax = plt.subplots(figsize=(10, 5))
121
  bars = ax.barh(top_5_labels, top_5_probs, color='#4CAF50', alpha=0.8)
122
  ax.set_xlabel('Probability', fontsize=12)
123
- ax.set_title('Top 5 Predictions', fontsize=14, fontweight='bold')
124
  ax.invert_yaxis()
125
  ax.grid(axis='x', linestyle='--', alpha=0.6)
126
-
127
-
128
  for bar in bars:
129
  width = bar.get_width()
130
- label_x_pos = width + 0.01
131
- ax.text(label_x_pos, bar.get_y() + bar.get_height() / 2, f'{width:.2%}', va='center')
132
-
133
- ax.set_xlim(0, max(top_5_probs) * 1.18)
134
  plt.tight_layout()
135
 
136
-
137
- return result_message, fig
138
-
139
 
140
 
 
141
  title = "AI Text Detector"
142
-
143
  description = """
144
- This tool uses the <b>ModernBERT</b> model to identify whether a given text was written by a human or generated by artificial intelligence (AI). It works with a soft voting ensemble using <b>three</b> models, combining their outputs to improve the accuracy.<br>
145
- <div style="line-height: 1.8;">
146
- ✅ <b>Human Verification:</b> Human-written content is clearly marked.<br>
147
- 🔍 <b>Model Detection:</b> Can identify content from over 40 AI models.<br>
148
- 📈 <b>Accuracy:</b> Works best with longer texts.<br>
149
- 📄 <b>Read more:</b> Our method is detailed in our paper:
150
- <a href="https://aclanthology.org/2025.genaidetect-1.15/" target="_blank" style="color: #007bff; text-decoration: none;"><b>LINK</b></a>.
151
- </div>
152
- <br>
153
- Paste your text below to analyze its origin.
154
  """
155
- bottom_text = "**Developed by SzegedAI**"
156
 
157
  AI_texts = [
158
- "Camels are remarkable desert animals known for their unique adaptations to harsh, arid environments. Native to the Middle East, North Africa, and parts of Asia, camels have been essential to human life for centuries, serving as a mode of transportation, a source of food, and even a symbol of endurance and survival. There are two primary species of camels: the dromedary camel, which has a single hump and is commonly found in the Middle East and North Africa, and the Bactrian camel, which has two humps and is native to Central Asia. Their humps store fat, not water, as commonly believed, allowing them to survive long periods without food by metabolizing the stored fat for energy. Camels are highly adapted to desert life. They can go for weeks without water, and when they do drink, they can consume up to 40 gallons in one sitting. Their thick eyelashes, sealable nostrils, and wide, padded feet protect them from sand and help them walk easily on loose desert terrain.",
159
- "Wines are a fascinating reflection of culture, history, and craftsmanship. They embody a rich diversity shaped by the land, climate, and traditions where they are produced. From the bold reds of Bordeaux to the crisp whites of New Zealand, each bottle tells a unique story. What makes wine so special is its ability to connect people. Whether shared at a family dinner, a celebratory event, or a quiet evening with friends, wine enhances experiences and brings people together. The variety of flavors and aromas, influenced by grape type, fermentation techniques, and aging processes, make wine tasting a complex yet rewarding journey for the senses.",
160
- "I find artificial intelligence (AI) to be one of the most transformative and fascinating technologies of our time. Its potential spans a wide range of applications, from automating mundane tasks to revolutionizing industries like healthcare, education, and entertainment. AI has already made significant contributions in fields like language processing, image recognition, and decision-making systems, enabling innovations that were once purely science fiction. However, as powerful as AI can be, it also brings challenges and responsibilities. Ethical considerations, such as bias in data, transparency, and the potential for misuse, need to be carefully addressed to ensure fairness and accountability. The rise of generative AI has also sparked debates about creativity, originality, and intellectual property, making it essential to strike a balance between technological advancement and respecting human contributions."
161
  ]
162
 
163
  Human_texts = [
164
- "The present book is intended as a text in basic mathematics. As such, it can have multiple use: for a one-year course in the high schools during the third or fourth year (if possible the third, so that calculus can be taken during the fourth year); for a complementary reference in earlier high school grades (elementary algebra and geometry are covered); for a one-semester course at the college level, to review or to get a firm foundation in the basic mathematics necessary to go ahead in calculus, linear algebra, or other topics. Years ago, the colleges used to give courses in college algebra” and other subjects which should have been covered in high school. More recently, such courses have been thought unnecessary, but some experiences I have had show that they are just as necessary as ever. What is happening is that thecolleges are getting a wide variety of students from high schools, ranging from exceedingly well-prepared ones who have had a good first course in calculus, down to very poorly prepared ones.",
165
- "Fats are rich in energy, build body cells, support brain development of infants, help body processes, and facilitate the absorption and use of fat-soluble vitamins A, D, E, and K. The major component of lipids is glycerol and fatty acids. According to chemical properties, fatty acids can be divided into saturated and unsaturated fatty acids. Generally lipids containing saturated fatty acids are solid at room temperature and include animal fats (butter, lard, tallow, ghee) and tropical oils (palm,coconut, palm kernel). Saturated fats increase the risk of heart disease.",
166
- "To make BERT handle a variety of down-stream tasks, our input representation is able to unambiguously represent both a single sentence and a pair of sentences (e.g., h Question, Answeri) in one token sequence. Throughout this work, a “sentence” can be an arbitrary span of contiguous text, rather than an actual linguistic sentence. A “sequence” refers to the input token sequence to BERT, which may be a single sentence or two sentences packed together. We use WordPiece embeddings (Wu et al., 2016) with a 30,000 token vocabulary. The first token of every sequence is always a special classification token ([CLS]). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks. Sentence pairs are packed together into a single sequence."]
167
 
168
  iface = gr.Blocks(css="""
169
  @import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
170
  #text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; box-sizing: border-box; margin: auto; }
171
- .form.svelte-633qhp { background: none; border: none; box-shadow: none; }
172
- #result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-top: 20px; width: 40%; box-sizing: border-box; text-align: center; margin: auto; }
173
- @media (max-width: 768px) { #result_output_box { width: 100%; } #text_input_box{ width: 100%; } }
174
- body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; display: block; justify-content: center; align-items: center; height: 100vh; overflow-y: auto; }
175
- .gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.6); max-width: 800px; margin: auto; overflow-y: auto; }
176
- h1 { text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 30px; }
177
- .highlight-human { color: #4CAF50; font-weight: bold; background: rgba(76, 175, 80, 0.2); padding: 5px; border-radius: 8px; }
178
- .highlight-ai { color: #FF5733; font-weight: bold; background: rgba(255, 87, 51, 0.2); padding: 5px; border-radius: 8px; }
179
- #bottom_text { text-align: center; margin-top: 50px; font-weight: bold; font-size: 20px; }
180
- .block.svelte-11xb1hd{ background: none !important; }
181
  """)
182
 
183
  with iface:
184
  gr.Markdown(f"# {title}")
185
  gr.Markdown(description)
186
- text_input = gr.Textbox(label="", placeholder="Type or paste your content here...", elem_id="text_input_box", lines=5)
187
- result_output = gr.Markdown("", elem_id="result_output_box")
188
  plot_output = gr.Plot(label="Model Probability Distribution")
189
-
190
  text_input.change(classify_text, inputs=text_input, outputs=[result_output, plot_output])
191
-
192
- with gr.Tab("AI text examples"):
193
  gr.Examples(AI_texts, inputs=text_input)
194
- with gr.Tab("Human text examples"):
195
  gr.Examples(Human_texts, inputs=text_input)
196
- gr.Markdown(bottom_text, elem_id="bottom_text")
197
 
198
- iface.launch(share=True)
 
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
  import re
 
 
 
5
  import matplotlib.pyplot as plt
6
+ from tokenizers.normalizers import Sequence, Replace, Strip
7
+ from tokenizers import Regex
8
 
9
+ # Device setup
10
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
 
12
  # --- Model and Tokenizer Setup ---
 
16
 
17
  tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
18
 
19
+ # Load Model 1 (local)
20
  model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
21
  model_1.load_state_dict(torch.load(model1_path, map_location=device))
22
  model_1.to(device).eval()
23
 
24
+ # Load Model 2 (URL)
25
  model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
26
  model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device))
27
  model_2.to(device).eval()
28
 
29
+ # Load Model 3 (URL)
30
  model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
31
  model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
32
  model_3.to(device).eval()
33
 
34
 
35
+ # --- Label Mapping ---
36
  label_mapping = {
37
  0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
38
  6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
 
46
  39: 'text-davinci-002', 40: 'text-davinci-003'
47
  }
48
 
49
+ # --- Text Cleaning ---
50
  def clean_text(text: str) -> str:
51
  text = re.sub(r'\s{2,}', ' ', text)
52
  text = re.sub(r'\s+([,.;:?!])', r'\1', text)
53
  return text
54
 
55
  newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
 
56
  tokenizer.backend_tokenizer.normalizer = Sequence([
57
  tokenizer.backend_tokenizer.normalizer,
 
58
  newline_to_space,
59
  Strip()
60
  ])
61
 
62
+ # --- Classification Function (Per Paragraph) ---
63
  def classify_text(text):
64
  """
65
+ Classifies each paragraph separately and provides per-paragraph scores
66
+ + overall result.
67
  """
68
  cleaned_text = clean_text(text)
 
69
  if not cleaned_text.strip():
70
  return "", None
71
 
72
+ # Split text into paragraphs (2+ newlines)
73
+ paragraphs = [p.strip() for p in re.split(r'\n{2,}', cleaned_text) if p.strip()]
74
+ chunk_scores = []
75
+ all_probabilities = []
76
+
77
+ for i, paragraph in enumerate(paragraphs):
78
+ inputs = tokenizer(paragraph, return_tensors="pt", truncation=True, padding=True).to(device)
79
+
80
+ with torch.no_grad():
81
+ logits_1 = model_1(**inputs).logits
82
+ logits_2 = model_2(**inputs).logits
83
+ logits_3 = model_3(**inputs).logits
84
+
85
+ softmax_1 = torch.softmax(logits_1, dim=1)
86
+ softmax_2 = torch.softmax(logits_2, dim=1)
87
+ softmax_3 = torch.softmax(logits_3, dim=1)
88
+
89
+ averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
90
+ probabilities = averaged_probabilities[0]
91
+ all_probabilities.append(probabilities.cpu())
92
+
93
+ human_prob = probabilities[24].item()
94
+ ai_probs_clone = probabilities.clone()
95
+ ai_probs_clone[24] = 0
96
+ ai_total_prob = ai_probs_clone.sum().item()
97
+
98
+ total_decision_prob = human_prob + ai_total_prob
99
+ human_percentage = (human_prob / total_decision_prob) * 100
100
+ ai_percentage = (ai_total_prob / total_decision_prob) * 100
101
+ ai_argmax_index = torch.argmax(ai_probs_clone).item()
102
+ ai_argmax_model = label_mapping[ai_argmax_index]
103
+
104
+ chunk_scores.append({
105
+ "paragraph": paragraph[:150] + ("..." if len(paragraph) > 150 else ""),
106
+ "human": human_percentage,
107
+ "ai": ai_percentage,
108
+ "model": ai_argmax_model
109
+ })
110
+
111
+ # --- Overall Average ---
112
+ avg_human = sum(c["human"] for c in chunk_scores) / len(chunk_scores)
113
+ avg_ai = sum(c["ai"] for c in chunk_scores) / len(chunk_scores)
114
+
115
+ if avg_human > avg_ai:
116
+ result_message = f"**Overall Result:** <span class='highlight-human'>{avg_human:.2f}% Human-written</span>"
117
  else:
118
+ top_model = max(chunk_scores, key=lambda c: c['ai'])['model']
119
+ result_message = f"**Overall Result:** <span class='highlight-ai'>{avg_ai:.2f}% AI-generated (likely {top_model})</span>"
120
+
121
+ # --- Paragraph Table ---
122
+ paragraph_table = "\n\n**Paragraph Analysis:**\n"
123
+ for idx, c in enumerate(chunk_scores, 1):
124
+ color = "#4CAF50" if c['human'] > c['ai'] else "#FF5733"
125
+ paragraph_table += (
126
+ f"<div style='margin-bottom:10px; border-left:4px solid {color}; padding-left:10px;'>"
127
+ f"<b>Paragraph {idx}</b>: {c['human']:.2f}% Human | {c['ai']:.2f}% AI → <i>{c['model']}</i><br>"
128
+ f"<small>{c['paragraph']}</small>"
129
+ f"</div>\n"
130
  )
131
 
132
+ # --- Plot (Top 5 Models Overall) ---
133
+ mean_probs = torch.mean(torch.stack(all_probabilities), dim=0)
134
+ top_5_probs, top_5_indices = torch.topk(mean_probs, 5)
 
135
  top_5_probs = top_5_probs.cpu().numpy()
136
  top_5_labels = [label_mapping[i.item()] for i in top_5_indices]
137
 
138
  fig, ax = plt.subplots(figsize=(10, 5))
139
  bars = ax.barh(top_5_labels, top_5_probs, color='#4CAF50', alpha=0.8)
140
  ax.set_xlabel('Probability', fontsize=12)
141
+ ax.set_title('Top 5 Predictions (Averaged)', fontsize=14, fontweight='bold')
142
  ax.invert_yaxis()
143
  ax.grid(axis='x', linestyle='--', alpha=0.6)
 
 
144
  for bar in bars:
145
  width = bar.get_width()
146
+ ax.text(width + 0.01, bar.get_y() + bar.get_height() / 2, f'{width:.2%}', va='center')
147
+ ax.set_xlim(0, max(top_5_probs) * 1.18)
 
 
148
  plt.tight_layout()
149
 
150
+ return result_message + "\n\n" + paragraph_table, fig
 
 
151
 
152
 
153
+ # --- UI ---
154
  title = "AI Text Detector"
 
155
  description = """
156
+ This tool uses <b>ModernBERT</b> to detect AI-generated text.
157
+ Each paragraph is analyzed separately to show which parts are likely AI-generated.
 
 
 
 
 
 
 
 
158
  """
159
+ bottom_text = "**Developed by SzegedAI – Extended by Saber**"
160
 
161
  AI_texts = [
162
+ "Artificial intelligence (AI) is reshaping industries by automating tasks, enhancing decision-making, and driving innovation. From predictive analytics in finance to autonomous vehicles in transportation, AI technologies are becoming integral to daily operations. The future of AI lies not only in technological advancement but also in ensuring ethical use, transparency, and accountability."
 
 
163
  ]
164
 
165
  Human_texts = [
166
+ "Mathematics has always been a cornerstone of scientific discovery. It provides a precise language for describing natural phenomena, from the orbit of planets to the behavior of subatomic particles. The beauty of mathematics lies in its universality—its principles hold true regardless of context or culture."
167
+ ]
 
168
 
169
  iface = gr.Blocks(css="""
170
  @import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
171
  #text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; box-sizing: border-box; margin: auto; }
172
+ #result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 16px; padding: 15px; margin-top: 20px; width: 80%; box-sizing: border-box; margin: auto; }
173
+ body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; display: block; justify-content: center; align-items: center; overflow-y: auto; }
174
+ .gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.6); max-width: 900px; margin: auto; }
175
+ .highlight-human { color: #4CAF50; font-weight: bold; }
176
+ .highlight-ai { color: #FF5733; font-weight: bold; }
 
 
 
 
 
177
  """)
178
 
179
  with iface:
180
  gr.Markdown(f"# {title}")
181
  gr.Markdown(description)
182
+ text_input = gr.Textbox(label="", placeholder="Paste your article here...", elem_id="text_input_box", lines=10)
183
+ result_output = gr.HTML("", elem_id="result_output_box")
184
  plot_output = gr.Plot(label="Model Probability Distribution")
 
185
  text_input.change(classify_text, inputs=text_input, outputs=[result_output, plot_output])
186
+ with gr.Tab("AI Examples"):
 
187
  gr.Examples(AI_texts, inputs=text_input)
188
+ with gr.Tab("Human Examples"):
189
  gr.Examples(Human_texts, inputs=text_input)
190
+ gr.Markdown(bottom_text)
191
 
192
+ iface.launch(share=True)