Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,12 +12,15 @@ def split_sentences(paragraph):
|
|
| 12 |
"""Split a paragraph into sentences."""
|
| 13 |
return re.split(r'(?<=[.!?]) +', paragraph.strip())
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
def detect_ai(text):
|
| 16 |
-
# Split into paragraphs
|
| 17 |
paragraphs = re.split(r"\n\s*\n", text.strip())
|
| 18 |
|
| 19 |
results = []
|
| 20 |
-
|
| 21 |
|
| 22 |
highlighted = ""
|
| 23 |
for para in paragraphs:
|
|
@@ -25,50 +28,57 @@ def detect_ai(text):
|
|
| 25 |
continue
|
| 26 |
|
| 27 |
sentences = split_sentences(para)
|
|
|
|
| 28 |
highlighted_para = ""
|
| 29 |
|
| 30 |
-
for
|
| 31 |
-
if not
|
| 32 |
continue
|
| 33 |
|
| 34 |
-
inputs = tokenizer(
|
| 35 |
with torch.no_grad():
|
| 36 |
outputs = model(**inputs)
|
| 37 |
probs = torch.softmax(outputs.logits, dim=1)
|
| 38 |
|
| 39 |
ai_score = float(probs[0][1]) # AI likelihood
|
| 40 |
human_score = 1 - ai_score # Human likelihood
|
| 41 |
-
all_scores.append(human_score)
|
| 42 |
|
| 43 |
-
#
|
| 44 |
-
if
|
| 45 |
label = "π΄ AI"
|
| 46 |
color = "rgb(255,120,120)" # red
|
|
|
|
| 47 |
else:
|
| 48 |
label = "π’ Human"
|
| 49 |
color = "rgb(120,255,120)" # green
|
|
|
|
| 50 |
|
| 51 |
highlighted_para += (
|
| 52 |
-
f"<
|
| 53 |
-
f"
|
| 54 |
-
f"{
|
| 55 |
)
|
| 56 |
|
| 57 |
-
highlighted += f"<div style='margin-bottom:
|
| 58 |
-
|
| 59 |
-
# Compute overall
|
| 60 |
-
if
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
else:
|
| 65 |
-
|
| 66 |
|
| 67 |
-
return highlighted, {"
|
| 68 |
|
| 69 |
with gr.Blocks() as demo:
|
| 70 |
-
gr.Markdown("## π€ AI Detector (
|
| 71 |
-
gr.Markdown("
|
| 72 |
input_text = gr.Textbox(lines=12, placeholder="Paste your essay or report here...")
|
| 73 |
output_html = gr.HTML()
|
| 74 |
output_json = gr.JSON()
|
|
|
|
| 12 |
"""Split a paragraph into sentences."""
|
| 13 |
return re.split(r'(?<=[.!?]) +', paragraph.strip())
|
| 14 |
|
| 15 |
+
def group_sentences(sentences, size=2):
|
| 16 |
+
"""Group sentences into chunks of 2 (or remaining)."""
|
| 17 |
+
return [" ".join(sentences[i:i+size]) for i in range(0, len(sentences), size)]
|
| 18 |
+
|
| 19 |
def detect_ai(text):
|
|
|
|
| 20 |
paragraphs = re.split(r"\n\s*\n", text.strip())
|
| 21 |
|
| 22 |
results = []
|
| 23 |
+
all_ai_flags = [] # store 1 = AI, 0 = Human
|
| 24 |
|
| 25 |
highlighted = ""
|
| 26 |
for para in paragraphs:
|
|
|
|
| 28 |
continue
|
| 29 |
|
| 30 |
sentences = split_sentences(para)
|
| 31 |
+
chunks = group_sentences(sentences, size=2)
|
| 32 |
highlighted_para = ""
|
| 33 |
|
| 34 |
+
for chunk in chunks:
|
| 35 |
+
if not chunk.strip():
|
| 36 |
continue
|
| 37 |
|
| 38 |
+
inputs = tokenizer(chunk, return_tensors="pt", truncation=True, max_length=512)
|
| 39 |
with torch.no_grad():
|
| 40 |
outputs = model(**inputs)
|
| 41 |
probs = torch.softmax(outputs.logits, dim=1)
|
| 42 |
|
| 43 |
ai_score = float(probs[0][1]) # AI likelihood
|
| 44 |
human_score = 1 - ai_score # Human likelihood
|
|
|
|
| 45 |
|
| 46 |
+
# Threshold check (AI > 20% β AI)
|
| 47 |
+
if ai_score > 0.2:
|
| 48 |
label = "π΄ AI"
|
| 49 |
color = "rgb(255,120,120)" # red
|
| 50 |
+
all_ai_flags.append(1)
|
| 51 |
else:
|
| 52 |
label = "π’ Human"
|
| 53 |
color = "rgb(120,255,120)" # green
|
| 54 |
+
all_ai_flags.append(0)
|
| 55 |
|
| 56 |
highlighted_para += (
|
| 57 |
+
f"<div style='background-color:{color}; padding:4px; margin-bottom:4px; border-radius:4px'>"
|
| 58 |
+
f"<b>{label}</b> β Human {round(human_score*100,1)}% | AI {round(ai_score*100,1)}%<br>"
|
| 59 |
+
f"{chunk}</div>"
|
| 60 |
)
|
| 61 |
|
| 62 |
+
highlighted += f"<div style='margin-bottom:12px'>{highlighted_para}</div>"
|
| 63 |
+
|
| 64 |
+
# Compute overall result
|
| 65 |
+
if all_ai_flags:
|
| 66 |
+
ai_ratio = sum(all_ai_flags) / len(all_ai_flags)
|
| 67 |
+
if ai_ratio == 1:
|
| 68 |
+
overall = "π΄ Overall: 100% AI"
|
| 69 |
+
elif ai_ratio == 0:
|
| 70 |
+
overall = "π’ Overall: 100% Human"
|
| 71 |
+
else:
|
| 72 |
+
overall = f"βοΈ Overall AI Probability: {round(ai_ratio*100,2)}%"
|
| 73 |
+
highlighted += f"<p><b>{overall}</b></p>"
|
| 74 |
else:
|
| 75 |
+
overall = "No text detected"
|
| 76 |
|
| 77 |
+
return highlighted, {"overall": overall, "chunks_checked": len(all_ai_flags)}
|
| 78 |
|
| 79 |
with gr.Blocks() as demo:
|
| 80 |
+
gr.Markdown("## π€ AI Detector (2-sentence chunks)")
|
| 81 |
+
gr.Markdown("Groups of 2 sentences are checked. If AI >20%, the group is flagged as AI.")
|
| 82 |
input_text = gr.Textbox(lines=12, placeholder="Paste your essay or report here...")
|
| 83 |
output_html = gr.HTML()
|
| 84 |
output_json = gr.JSON()
|