File size: 8,849 Bytes
c57c848
b53833d
 
55cbf6a
34b438b
29cd84d
 
e99c594
29cd84d
be9e2ba
c57c848
34b438b
be9e2ba
 
 
dad3685
be9e2ba
 
29cd84d
be9e2ba
 
 
dad3685
29cd84d
be9e2ba
 
dad3685
c57c848
29cd84d
be9e2ba
 
 
 
b57f115
29cd84d
c57c848
 
 
a2b0083
c57c848
a2b0083
c57c848
 
 
 
 
 
 
29cd84d
e99c594
34b438b
e99c594
 
a57714d
34b438b
e99c594
be9e2ba
 
 
e99c594
a57714d
29cd84d
c57c848
34b438b
29cd84d
 
34b438b
a57714d
34b438b
 
f97e9a9
29cd84d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f97e9a9
29cd84d
 
 
 
 
 
 
 
 
 
 
 
f97e9a9
b57f115
29cd84d
 
 
34b438b
 
 
 
 
 
29cd84d
cd8fc19
34b438b
 
 
29cd84d
 
34b438b
b57f115
29cd84d
b57f115
 
29cd84d
25dea08
 
29cd84d
 
0fd14a7
29cd84d
da1ebea
2160e26
29cd84d
2160e26
 
 
29cd84d
 
34b438b
b57f115
 
34b438b
29cd84d
 
 
 
 
b57f115
 
 
 
 
29cd84d
 
34b438b
 
29cd84d
b57f115
29cd84d
b57f115
29cd84d
c57c848
29cd84d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import re
import matplotlib.pyplot as plt
from tokenizers.normalizers import Sequence, Replace, Strip
from tokenizers import Regex

# Device setup
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# --- Model and Tokenizer Setup ---
model1_path = "modernbert.bin"
model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"

tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")

# Load Model 1 (local)
model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
model_1.load_state_dict(torch.load(model1_path, map_location=device))
model_1.to(device).eval()

# Load Model 2 (URL)
model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device))
model_2.to(device).eval()

# Load Model 3 (URL)
model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
model_3.to(device).eval()


# --- Label Mapping ---
label_mapping = {
    0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
    6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
    11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
    14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
    18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
    22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
    27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
    31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
    35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
    39: 'text-davinci-002', 40: 'text-davinci-003'
}

# --- Text Cleaning ---
def clean_text(text: str) -> str:
    text = re.sub(r'\s{2,}', ' ', text)
    text = re.sub(r'\s+([,.;:?!])', r'\1', text)
    return text

newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
tokenizer.backend_tokenizer.normalizer = Sequence([
    tokenizer.backend_tokenizer.normalizer,
    newline_to_space,
    Strip()
])

# --- Classification Function (Per Paragraph) ---
def classify_text(text):
    """
    Classifies each paragraph separately and provides per-paragraph scores
    + overall result.
    """
    cleaned_text = clean_text(text)
    if not cleaned_text.strip():
        return "", None

    # Split text into paragraphs (2+ newlines)
    paragraphs = [p.strip() for p in re.split(r'\n{2,}', cleaned_text) if p.strip()]
    chunk_scores = []
    all_probabilities = []

    for i, paragraph in enumerate(paragraphs):
        inputs = tokenizer(paragraph, return_tensors="pt", truncation=True, padding=True).to(device)

        with torch.no_grad():
            logits_1 = model_1(**inputs).logits
            logits_2 = model_2(**inputs).logits
            logits_3 = model_3(**inputs).logits

            softmax_1 = torch.softmax(logits_1, dim=1)
            softmax_2 = torch.softmax(logits_2, dim=1)
            softmax_3 = torch.softmax(logits_3, dim=1)

            averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
            probabilities = averaged_probabilities[0]
            all_probabilities.append(probabilities.cpu())

        human_prob = probabilities[24].item()
        ai_probs_clone = probabilities.clone()
        ai_probs_clone[24] = 0
        ai_total_prob = ai_probs_clone.sum().item()

        total_decision_prob = human_prob + ai_total_prob
        human_percentage = (human_prob / total_decision_prob) * 100
        ai_percentage = (ai_total_prob / total_decision_prob) * 100
        ai_argmax_index = torch.argmax(ai_probs_clone).item()
        ai_argmax_model = label_mapping[ai_argmax_index]

        chunk_scores.append({
            "paragraph": paragraph[:150] + ("..." if len(paragraph) > 150 else ""),
            "human": human_percentage,
            "ai": ai_percentage,
            "model": ai_argmax_model
        })

    # --- Overall Average ---
    avg_human = sum(c["human"] for c in chunk_scores) / len(chunk_scores)
    avg_ai = sum(c["ai"] for c in chunk_scores) / len(chunk_scores)

    if avg_human > avg_ai:
        result_message = f"**Overall Result:** <span class='highlight-human'>{avg_human:.2f}% Human-written</span>"
    else:
        top_model = max(chunk_scores, key=lambda c: c['ai'])['model']
        result_message = f"**Overall Result:** <span class='highlight-ai'>{avg_ai:.2f}% AI-generated (likely {top_model})</span>"

    # --- Paragraph Table ---
    paragraph_table = "\n\n**Paragraph Analysis:**\n"
    for idx, c in enumerate(chunk_scores, 1):
        color = "#4CAF50" if c['human'] > c['ai'] else "#FF5733"
        paragraph_table += (
            f"<div style='margin-bottom:10px; border-left:4px solid {color}; padding-left:10px;'>"
            f"<b>Paragraph {idx}</b>: {c['human']:.2f}% Human | {c['ai']:.2f}% AI → <i>{c['model']}</i><br>"
            f"<small>{c['paragraph']}</small>"
            f"</div>\n"
        )

    # --- Plot (Top 5 Models Overall) ---
    mean_probs = torch.mean(torch.stack(all_probabilities), dim=0)
    top_5_probs, top_5_indices = torch.topk(mean_probs, 5)
    top_5_probs = top_5_probs.cpu().numpy()
    top_5_labels = [label_mapping[i.item()] for i in top_5_indices]

    fig, ax = plt.subplots(figsize=(10, 5))
    bars = ax.barh(top_5_labels, top_5_probs, color='#4CAF50', alpha=0.8)
    ax.set_xlabel('Probability', fontsize=12)
    ax.set_title('Top 5 Predictions (Averaged)', fontsize=14, fontweight='bold')
    ax.invert_yaxis()
    ax.grid(axis='x', linestyle='--', alpha=0.6)
    for bar in bars:
        width = bar.get_width()
        ax.text(width + 0.01, bar.get_y() + bar.get_height() / 2, f'{width:.2%}', va='center')
    ax.set_xlim(0, max(top_5_probs) * 1.18)
    plt.tight_layout()

    return result_message + "\n\n" + paragraph_table, fig


# --- UI ---
title = "AI Text Detector"
description = """
This tool uses <b>ModernBERT</b> to detect AI-generated text.  
Each paragraph is analyzed separately to show which parts are likely AI-generated.
"""
bottom_text = "**Developed by SzegedAI – Extended by Saber**"

AI_texts = [
"Artificial intelligence (AI) is reshaping industries by automating tasks, enhancing decision-making, and driving innovation. From predictive analytics in finance to autonomous vehicles in transportation, AI technologies are becoming integral to daily operations. The future of AI lies not only in technological advancement but also in ensuring ethical use, transparency, and accountability."
]

Human_texts = [
"Mathematics has always been a cornerstone of scientific discovery. It provides a precise language for describing natural phenomena, from the orbit of planets to the behavior of subatomic particles. The beauty of mathematics lies in its universality—its principles hold true regardless of context or culture."
]

iface = gr.Blocks(css="""
    @import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
    #text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; box-sizing: border-box; margin: auto; }
    #result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 16px; padding: 15px; margin-top: 20px; width: 80%; box-sizing: border-box; margin: auto; }
    body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; display: block; justify-content: center; align-items: center; overflow-y: auto; }
    .gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.6); max-width: 900px; margin: auto; }
    .highlight-human { color: #4CAF50; font-weight: bold; }
    .highlight-ai { color: #FF5733; font-weight: bold; }
""")

with iface:
    gr.Markdown(f"# {title}")
    gr.Markdown(description)
    text_input = gr.Textbox(label="", placeholder="Paste your article here...", elem_id="text_input_box", lines=10)
    result_output = gr.HTML("", elem_id="result_output_box")
    plot_output = gr.Plot(label="Model Probability Distribution")
    text_input.change(classify_text, inputs=text_input, outputs=[result_output, plot_output])
    with gr.Tab("AI Examples"):
        gr.Examples(AI_texts, inputs=text_input)
    with gr.Tab("Human Examples"):
        gr.Examples(Human_texts, inputs=text_input)
    gr.Markdown(bottom_text)

iface.launch(share=True)