ai-detector / app.py
codelion's picture
Flat dataset: one record per ID, feedback updates in place
873ae84 verified
"""HuggingFace Space for AI text detection using adaptive-classifier."""
import json
import re
import urllib.parse
import urllib.request
from datetime import datetime
from html.parser import HTMLParser
from uuid import uuid4
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
from adaptive_classifier import AdaptiveClassifier
# ---------------------------------------------------------------------------
# Model
# ---------------------------------------------------------------------------
print("Loading model...")
classifier = AdaptiveClassifier.from_pretrained(
"adaptive-classifier/ai-detector", use_onnx=False
)
print("Model loaded!")
# ---------------------------------------------------------------------------
# Persistent dataset via CommitScheduler
# ---------------------------------------------------------------------------
DATASET_REPO = "adaptive-classifier/ai-detector-data"
_predictions = {} # In-memory cache
_hf_api = None
def _get_api():
global _hf_api
if _hf_api is None:
from huggingface_hub import HfApi
_hf_api = HfApi()
return _hf_api
def _load_dataset() -> list[dict]:
"""Download the full JSONL dataset from HF."""
try:
api = _get_api()
path = api.hf_hub_download(DATASET_REPO, "data/predictions.jsonl", repo_type="dataset")
records = []
for line in open(path).read().strip().split("\n"):
if line:
records.append(json.loads(line))
return records
except Exception:
return []
def _save_dataset(records: list[dict], message: str = "Update dataset"):
"""Upload the full JSONL dataset to HF."""
import io
api = _get_api()
content = "\n".join(json.dumps(r) for r in records) + "\n"
api.upload_file(
path_or_fileobj=io.BytesIO(content.encode()),
path_in_repo="data/predictions.jsonl",
repo_id=DATASET_REPO,
repo_type="dataset",
commit_message=message,
)
def save_prediction(pred_id: str, text: str, url: str, label: str, confidence: float):
"""Save a prediction to memory and push to HF dataset."""
record = {
"id": pred_id,
"text": text,
"url": url,
"prediction": label,
"confidence": confidence,
"feedback": None,
"timestamp": datetime.now().isoformat(),
}
_predictions[pred_id] = record
try:
records = _load_dataset()
records.append(record)
_save_dataset(records, f"Add prediction {pred_id}")
except Exception as e:
print(f"Warning: failed to push prediction: {e}")
def lookup_prediction(pred_id: str) -> dict | None:
"""Look up a prediction by ID — check memory, then HF dataset."""
if pred_id in _predictions:
return _predictions[pred_id]
for rec in _load_dataset():
if rec.get("id") == pred_id:
_predictions[pred_id] = rec
return rec
return None
def save_feedback(pred_id: str, feedback: str):
"""Update the existing prediction record with feedback."""
if pred_id in _predictions:
_predictions[pred_id]["feedback"] = feedback
try:
records = _load_dataset()
updated = False
for rec in records:
if rec.get("id") == pred_id:
rec["feedback"] = feedback
updated = True
break
if updated:
_save_dataset(records, f"Add feedback for {pred_id}")
except Exception as e:
print(f"Warning: failed to push feedback: {e}")
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
SPACE_URL = "https://adaptive-classifier-ai-detector.hf.space"
HEADERS = {
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/131.0.0.0 Safari/537.36"
),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9",
}
CSS = """
@import url('https://fonts.googleapis.com/css2?family=DM+Mono:wght@300;400;500&family=Outfit:wght@300;400;500;600;700&display=swap');
:root {
--bg-deep: #0a0e17;
--bg-surface: #111827;
--bg-card: #1a2234;
--bg-input: #0f1729;
--border-subtle: #1e2d45;
--border-accent: #2563eb;
--text-primary: #e2e8f0;
--text-secondary: #8892a6;
--text-muted: #4a5568;
--accent-blue: #3b82f6;
--accent-cyan: #06b6d4;
--accent-human: #10b981;
--accent-ai: #f59e0b;
--glow-blue: rgba(59, 130, 246, 0.15);
}
.gradio-container {
background: var(--bg-deep) !important;
font-family: 'Outfit', sans-serif !important;
max-width: 820px !important;
margin: 0 auto !important;
}
.main, .contain { background: transparent !important; }
footer { display: none !important; }
.header-block { text-align: center; padding: 2rem 1rem 1rem; }
.header-block h1 {
font-family: 'DM Mono', monospace !important;
font-size: 1.6rem !important; font-weight: 500 !important;
color: var(--text-primary) !important; letter-spacing: 0.04em;
margin-bottom: 0.4rem !important;
}
.header-block p {
font-size: 0.85rem !important; color: var(--text-secondary) !important;
line-height: 1.5 !important; max-width: 560px; margin: 0 auto !important;
}
.header-block a {
color: var(--accent-cyan) !important; text-decoration: none !important;
border-bottom: 1px solid rgba(6, 182, 212, 0.3);
}
.header-block a:hover { border-bottom-color: var(--accent-cyan); }
.tabs { background: transparent !important; border: none !important; }
.tab-nav {
background: transparent !important; border: none !important;
justify-content: center !important; gap: 0.25rem !important;
padding: 0.5rem 0 !important;
}
.tab-nav button {
font-family: 'DM Mono', monospace !important;
font-size: 0.8rem !important; font-weight: 400 !important;
letter-spacing: 0.06em; text-transform: uppercase;
color: var(--text-muted) !important; background: transparent !important;
border: 1px solid var(--border-subtle) !important;
border-radius: 6px !important; padding: 0.5rem 1.5rem !important;
transition: all 0.2s ease !important;
}
.tab-nav button:hover { color: var(--text-secondary) !important; border-color: var(--text-muted) !important; }
.tab-nav button.selected {
color: var(--accent-cyan) !important;
background: rgba(6, 182, 212, 0.08) !important;
border-color: var(--accent-cyan) !important;
}
.tabitem { background: transparent !important; border: none !important; padding: 0 !important; min-height: 520px !important; }
.input-card {
background: var(--bg-card) !important; border: 1px solid var(--border-subtle) !important;
border-radius: 10px !important; padding: 1.25rem !important; margin-top: 0.75rem !important;
}
textarea, input[type="text"] {
font-family: 'DM Mono', monospace !important; font-size: 0.85rem !important;
line-height: 1.65 !important; color: var(--text-primary) !important;
background: var(--bg-input) !important; border: 1px solid var(--border-subtle) !important;
border-radius: 8px !important; padding: 0.85rem 1rem !important;
transition: border-color 0.2s ease !important;
}
textarea:focus, input[type="text"]:focus {
border-color: var(--accent-blue) !important;
box-shadow: 0 0 0 3px var(--glow-blue) !important; outline: none !important;
}
label span {
font-family: 'DM Mono', monospace !important; font-size: 0.7rem !important;
text-transform: uppercase !important; letter-spacing: 0.08em !important;
color: var(--text-muted) !important;
}
.detect-btn {
font-family: 'DM Mono', monospace !important; font-size: 0.8rem !important;
font-weight: 500 !important; letter-spacing: 0.06em !important;
text-transform: uppercase !important;
background: linear-gradient(135deg, var(--accent-blue), var(--accent-cyan)) !important;
color: #fff !important; border: none !important; border-radius: 8px !important;
padding: 0.7rem 2rem !important; cursor: pointer !important;
transition: all 0.25s ease !important;
box-shadow: 0 2px 12px rgba(59, 130, 246, 0.25) !important;
}
.detect-btn:hover { box-shadow: 0 4px 20px rgba(59, 130, 246, 0.4) !important; transform: translateY(-1px) !important; }
.result-card {
background: var(--bg-card) !important; border: 1px solid var(--border-subtle) !important;
border-radius: 10px !important; padding: 1.25rem !important; margin-top: 0.5rem !important;
}
.result-card .output-class { font-family: 'DM Mono', monospace !important; }
.output-label { background: transparent !important; }
.output-label .label-name { font-family: 'DM Mono', monospace !important; font-size: 1.1rem !important; }
.examples-heading {
font-family: 'DM Mono', monospace !important; font-size: 0.7rem !important;
text-transform: uppercase !important; letter-spacing: 0.08em !important;
color: var(--text-muted) !important; margin-top: 1.25rem !important; margin-bottom: 0.5rem !important;
}
.gallery { gap: 0.5rem !important; }
.gallery .gallery-item {
background: var(--bg-input) !important; border: 1px solid var(--border-subtle) !important;
border-radius: 8px !important; padding: 0.75rem !important; transition: border-color 0.2s ease !important;
}
.gallery .gallery-item:hover { border-color: var(--text-muted) !important; }
.preview-box textarea { color: var(--text-secondary) !important; font-size: 0.78rem !important; opacity: 0.85; }
.gr-group, .gr-block, .gr-box, .gr-panel { background: transparent !important; border: none !important; }
.gr-padded { padding: 0 !important; }
.info-strip { text-align: center; padding: 1.25rem 1rem; }
.info-strip p { font-family: 'DM Mono', monospace !important; font-size: 0.68rem !important; color: var(--text-muted) !important; letter-spacing: 0.03em; }
.info-strip a { color: var(--text-secondary) !important; text-decoration: none !important; border-bottom: 1px dotted var(--text-muted); }
#share-card .image-toolbar, #text-share-card .image-toolbar,
#share-card .icon-buttons, #text-share-card .icon-buttons,
#share-card button[aria-label], #text-share-card button[aria-label] { display: none !important; }
.feedback-btn {
font-family: 'DM Mono', monospace !important; font-size: 0.75rem !important;
border-radius: 6px !important; padding: 0.4rem 1rem !important;
cursor: pointer !important; transition: all 0.2s ease !important;
}
.feedback-msg {
font-family: 'DM Mono', monospace !important; font-size: 0.75rem !important;
color: var(--accent-cyan) !important; padding: 0.4rem 0 !important;
}
"""
HUMAN_EXAMPLE = (
"Nottinghamshire Healthcare NHS Trust is proposing to close Broomhill House "
"in Gedling and another unit at Heather Close in Mansfield. The trust said "
"patient feedback showed most preferred to be cared for in their own homes "
"rather than a hospital setting. Staff and patients at both sites have been "
"informed of the consultation. The proposals would see 38 inpatient beds "
"replaced by more intensive community support. Mark Stocks, the trust medical "
"director, said the community-based approach meant more patients could be "
"helped. He said a full public consultation would take place before any "
"final decisions were made."
)
AI_EXAMPLE = (
"Understanding Intramuscular Injections: A Vital Medical Delivery Method. "
"When we think about receiving medication, most people immediately picture "
"swallowing pills or receiving shots in the arm. That said, intramuscular "
"injections represent a crucial and nuanced approach to drug delivery that "
"deserves a closer look. These injections deliver medication directly into "
"muscle tissue, allowing for efficient absorption into the bloodstream. "
"The technique requires careful consideration of injection site selection, "
"needle gauge, and proper anatomical knowledge to ensure both safety and "
"efficacy for the patient."
)
# ---------------------------------------------------------------------------
# HTML extraction
# ---------------------------------------------------------------------------
class _TextExtractor(HTMLParser):
def __init__(self):
super().__init__()
self._parts, self._skip = [], False
self._skip_tags = {"script", "style", "nav", "header", "footer", "noscript"}
def handle_starttag(self, tag, attrs):
if tag in self._skip_tags:
self._skip = True
if tag in ("p", "br", "div", "h1", "h2", "h3", "h4", "li", "tr"):
self._parts.append("\n")
def handle_endtag(self, tag):
if tag in self._skip_tags:
self._skip = False
def handle_data(self, data):
if not self._skip:
self._parts.append(data)
def get_text(self):
return re.sub(r"\s+", " ", " ".join(self._parts)).strip()
def fetch_url(url: str) -> str:
if not url or not url.strip():
return ""
url = url.strip()
if not url.startswith(("http://", "https://")):
url = "https://" + url
req = urllib.request.Request(url, headers=HEADERS)
with urllib.request.urlopen(req, timeout=15) as resp:
html = resp.read().decode("utf-8", errors="ignore")
parser = _TextExtractor()
parser.feed(html)
return parser.get_text()
# ---------------------------------------------------------------------------
# Result card image
# ---------------------------------------------------------------------------
def make_result_card(source: str, label: str, confidence: float) -> Image.Image:
W, H = 800, 418
bg, card_bg = "#0a0e17", "#1a2234"
human_color, ai_color = "#10b981", "#f59e0b"
text_color, muted_color = "#e2e8f0", "#8892a6"
img = Image.new("RGB", (W, H), bg)
draw = ImageDraw.Draw(img)
try:
fl = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 32)
fm = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 20)
fs = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 14)
fmono = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", 16)
except OSError:
fl = fm = fs = fmono = ImageFont.load_default()
draw.rounded_rectangle([30, 30, W - 30, H - 30], radius=16, fill=card_bg)
draw.text((60, 55), "AI Text Detector", fill=text_color, font=fl)
is_ai = label.lower() == "ai"
rc = ai_color if is_ai else human_color
draw.text((60, 120), "Result:", fill=muted_color, font=fm)
draw.text((60, 155), "AI-Generated" if is_ai else "Human-Written", fill=rc, font=fl)
draw.text((60, 200), f"Confidence: {confidence * 100:.1f}%", fill=text_color, font=fm)
bx, by, bw, bh = 60, 240, W - 120, 24
draw.rounded_rectangle([bx, by, bx + bw, by + bh], radius=12, fill="#0f1729")
fw = int(bw * confidence)
if fw > 0:
draw.rounded_rectangle([bx, by, bx + fw, by + bh], radius=12, fill=rc)
is_url = source.startswith("http") or source.startswith("www")
draw.text((60, 285), "URL analyzed:" if is_url else "Text analyzed:", fill=muted_color, font=fs)
max_text_w = W - 120 # 60px padding each side
if is_url:
# URLs: single line, truncate with ...
disp = source
while fmono.getlength(disp) > max_text_w and len(disp) > 10:
disp = disp[:-4] + "..."
draw.text((60, 308), disp, fill=text_color, font=fmono)
else:
# Text: wrap into up to 3 lines
words = source.split()
lines = []
current = ""
for w in words:
test = f"{current} {w}".strip()
if fmono.getlength(test) > max_text_w:
if current:
lines.append(current)
current = w
else:
current = test
if len(lines) >= 2:
current = current + "..."
break
if current:
lines.append(current)
for i, line in enumerate(lines[:3]):
draw.text((60, 308 + i * 20), line, fill=text_color, font=fmono)
draw.text((60, H - 60), "adaptive-classifier-ai-detector.hf.space", fill=muted_color, font=fs)
return img
# ---------------------------------------------------------------------------
# Detection logic
# ---------------------------------------------------------------------------
def _error_label(msg: str) -> dict:
return {msg: 1.0}
def _classify(text: str) -> dict:
if not text or len(text.strip().split()) < 10:
return _error_label("Please enter at least a few sentences (~50 words)")
predictions = classifier.predict(text, k=2)
return {label: round(score, 4) for label, score in predictions}
def detect_text_full(text: str):
"""Returns (result, share_link, card, pred_id)"""
result = _classify(text)
if any(k.startswith("Please") for k in result):
return result, "", None, ""
top_label = max(result, key=result.get)
pred_id = uuid4().hex[:12]
save_prediction(pred_id, text, "", top_label, result[top_label])
share_link = f"{SPACE_URL}/?id={pred_id}"
preview = text.strip()[:80] + ("..." if len(text.strip()) > 80 else "")
card = make_result_card(preview, top_label, result[top_label])
return result, share_link, card, pred_id
def detect_url_full(url: str):
"""Returns (result, preview_text, share_link, card, pred_id)"""
if not url or not url.strip():
return _error_label("Please enter a URL"), "", "", None, ""
try:
text = fetch_url(url)
except Exception as e:
return _error_label(f"Could not fetch URL: {e}"), "", "", None, ""
js_hints = ["javascript is not available", "enable javascript", "javascript is disabled",
"please enable js", "requires javascript", "noscript",
"if you are not redirected", "please click here"]
text_lower = text.lower()
if any(h in text_lower for h in js_hints):
return _error_label("This site requires JavaScript (e.g. Twitter/X). Paste the text directly instead."), "", "", None, ""
if len(text.split()) < 10:
return _error_label("Not enough readable text found at that URL"), text[:500], "", None, ""
words = text.split()
if len(words) > 2000:
text = " ".join(words[:2000])
result = _classify(text)
if any(k.startswith("Please") or k.startswith("Not enough") for k in result):
return result, text[:500], "", None, ""
top_label = max(result, key=result.get)
pred_id = uuid4().hex[:12]
save_prediction(pred_id, text, url.strip(), top_label, result[top_label])
preview = text[:1500] + ("..." if len(text) > 1500 else "")
share_link = f"{SPACE_URL}/?id={pred_id}"
card = make_result_card(url.strip(), top_label, result[top_label])
return result, preview, share_link, card, pred_id
# ---------------------------------------------------------------------------
# UI
# ---------------------------------------------------------------------------
with gr.Blocks(css=CSS, title="AI Text Detector", theme=gr.themes.Base()) as demo:
gr.HTML("""
<div class="header-block">
<h1>AI Text Detector</h1>
<p>
Classify text as <strong>human-written</strong> or <strong>AI-generated</strong>.
Built with <a href="https://github.com/codelion/adaptive-classifier">adaptive-classifier</a>
and trained on the <a href="https://huggingface.co/datasets/pangram/editlens_iclr">EditLens</a> dataset.
Paste text directly or enter a URL to fetch and analyze.
</p>
</div>
""")
with gr.Tabs() as tabs:
# ---- TEXT TAB ----
with gr.TabItem("Text", id="text-tab"):
with gr.Group(elem_classes="input-card"):
text_input = gr.Textbox(lines=7, placeholder="Paste text here to analyze...", label="Input Text", show_label=True)
text_btn = gr.Button("Analyze", variant="primary", elem_classes="detect-btn")
with gr.Group(elem_classes="result-card"):
text_output = gr.Label(num_top_classes=2, label="Result")
text_pred_id = gr.Textbox(visible=False, elem_id="text-pred-id")
# Feedback
with gr.Row(visible=False) as text_fb_row:
text_fb_up = gr.Button("Correct", size="sm", elem_classes="feedback-btn")
text_fb_down = gr.Button("Incorrect", size="sm", elem_classes="feedback-btn")
text_fb_msg = gr.HTML(visible=False, elem_classes="feedback-msg")
# Share
text_share_link = gr.Textbox(label="Share this result", visible=False, interactive=False, elem_id="text-share-url")
with gr.Row(visible=False) as text_share_row:
text_copy_link_btn = gr.Button("Copy Link", size="sm", elem_classes="detect-btn")
text_copy_img_btn = gr.Button("Copy Image", size="sm", elem_classes="detect-btn")
text_dl_img_btn = gr.Button("Download Image", size="sm", elem_classes="detect-btn")
text_share_card = gr.Image(label="Result card", visible=False, type="pil", elem_id="text-share-card")
def run_text(text):
result, link, card, pid = detect_text_full(text)
has = bool(link)
return (
result, gr.update(value=pid, visible=False),
gr.update(visible=has), gr.update(visible=False),
gr.update(value=link, visible=has), gr.update(visible=has),
gr.update(value=card, visible=has),
)
text_btn.click(
fn=run_text, inputs=text_input,
outputs=[text_output, text_pred_id, text_fb_row, text_fb_msg, text_share_link, text_share_row, text_share_card],
api_name="detect",
)
def text_fb_positive(pid):
if pid:
save_feedback(pid, "correct")
return gr.update(visible=False), gr.update(value='<span class="feedback-msg">Thanks for your feedback!</span>', visible=True)
def text_fb_negative(pid):
if pid:
save_feedback(pid, "incorrect")
return gr.update(visible=False), gr.update(value='<span class="feedback-msg">Thanks for your feedback!</span>', visible=True)
text_fb_up.click(fn=text_fb_positive, inputs=text_pred_id, outputs=[text_fb_row, text_fb_msg])
text_fb_down.click(fn=text_fb_negative, inputs=text_pred_id, outputs=[text_fb_row, text_fb_msg])
text_copy_link_btn.click(fn=None, inputs=text_share_link, js="(u) => { navigator.clipboard.writeText(u); }")
text_copy_img_btn.click(fn=None, js="""() => {
const img = document.querySelector('#text-share-card img');
if (img) { const c = document.createElement('canvas'); c.width = img.naturalWidth; c.height = img.naturalHeight;
c.getContext('2d').drawImage(img, 0, 0);
c.toBlob(b => navigator.clipboard.write([new ClipboardItem({'image/png': b})]), 'image/png'); }
}""")
text_dl_img_btn.click(fn=None, js="""() => {
const img = document.querySelector('#text-share-card img');
if (img) { const a = document.createElement('a'); a.href = img.src; a.download = 'ai-detector-result.png'; a.click(); }
}""")
gr.HTML('<div class="examples-heading">Try an example</div>')
gr.Examples(examples=[[HUMAN_EXAMPLE], [AI_EXAMPLE]], inputs=text_input, label="")
# ---- URL TAB ----
with gr.TabItem("URL", id="url-tab"):
with gr.Group(elem_classes="input-card"):
url_input = gr.Textbox(lines=1, placeholder="https://example.com/article", label="Web Page URL", show_label=True)
url_btn = gr.Button("Fetch & Analyze", variant="primary", elem_classes="detect-btn")
with gr.Group(elem_classes="result-card"):
url_output = gr.Label(num_top_classes=2, label="Result")
url_pred_id = gr.Textbox(visible=False, elem_id="url-pred-id")
# Feedback
with gr.Row(visible=False) as url_fb_row:
url_fb_up = gr.Button("Correct", size="sm", elem_classes="feedback-btn")
url_fb_down = gr.Button("Incorrect", size="sm", elem_classes="feedback-btn")
url_fb_msg = gr.HTML(visible=False, elem_classes="feedback-msg")
# Share
share_link = gr.Textbox(label="Share this result", visible=False, interactive=False, elem_id="share-url")
with gr.Row(visible=False) as share_row:
copy_link_btn = gr.Button("Copy Link", size="sm", elem_classes="detect-btn")
copy_img_btn = gr.Button("Copy Image", size="sm", elem_classes="detect-btn")
dl_img_btn = gr.Button("Download Image", size="sm", elem_classes="detect-btn")
share_card = gr.Image(label="Result card", visible=False, type="pil", elem_id="share-card")
with gr.Group(elem_classes="input-card"):
url_preview = gr.Textbox(label="Extracted Text", lines=5, interactive=False, elem_classes="preview-box")
def run_url(url):
result, preview, link, card, pid = detect_url_full(url)
has = bool(link)
return (
result, gr.update(value=pid, visible=False),
gr.update(visible=has), gr.update(visible=False),
gr.update(value=link, visible=has), gr.update(visible=has),
gr.update(value=card, visible=has), preview,
)
url_btn.click(
fn=run_url, inputs=url_input,
outputs=[url_output, url_pred_id, url_fb_row, url_fb_msg, share_link, share_row, share_card, url_preview],
api_name="detect_url",
)
def url_fb_positive(pid):
if pid:
save_feedback(pid, "correct")
return gr.update(visible=False), gr.update(value='<span class="feedback-msg">Thanks for your feedback!</span>', visible=True)
def url_fb_negative(pid):
if pid:
save_feedback(pid, "incorrect")
return gr.update(visible=False), gr.update(value='<span class="feedback-msg">Thanks for your feedback!</span>', visible=True)
url_fb_up.click(fn=url_fb_positive, inputs=url_pred_id, outputs=[url_fb_row, url_fb_msg])
url_fb_down.click(fn=url_fb_negative, inputs=url_pred_id, outputs=[url_fb_row, url_fb_msg])
copy_link_btn.click(fn=None, inputs=share_link, js="(u) => { navigator.clipboard.writeText(u); }")
copy_img_btn.click(fn=None, js="""() => {
const img = document.querySelector('#share-card img');
if (img) { const c = document.createElement('canvas'); c.width = img.naturalWidth; c.height = img.naturalHeight;
c.getContext('2d').drawImage(img, 0, 0);
c.toBlob(b => navigator.clipboard.write([new ClipboardItem({'image/png': b})]), 'image/png'); }
}""")
dl_img_btn.click(fn=None, js="""() => {
const img = document.querySelector('#share-card img');
if (img) { const a = document.createElement('a'); a.href = img.src; a.download = 'ai-detector-result.png'; a.click(); }
}""")
gr.HTML('<div class="examples-heading">Try an example</div>')
gr.Examples(
examples=[
["https://en.wikipedia.org/wiki/Constitution_of_the_United_States"],
["https://garryslist.org/posts/richmond-just-voted-to-reinstate-their-flock-cameras-after-crime-spiked"],
],
inputs=url_input, label="",
)
gr.HTML("""
<div class="info-strip">
<p>
Model: <a href="https://huggingface.co/adaptive-classifier/ai-detector">adaptive-classifier/ai-detector</a>
&nbsp;&middot;&nbsp;
<a href="https://github.com/codelion/adaptive-classifier">GitHub</a>
&nbsp;&middot;&nbsp;
Best with 50+ words
</p>
</div>
""")
# Handle ?id= share links on page load
def _on_load(request: gr.Request):
pred_id = request.query_params.get("id", "")
# Defaults for all outputs
d = gr.update()
n = ""
hide = gr.update(visible=False)
empty = {}
defaults = (d, n, empty, hide, hide, hide, n, hide, hide, n, empty, hide, hide, hide, hide, hide, hide, n)
rec = lookup_prediction(pred_id) if pred_id else None
if rec:
result = {rec["prediction"]: rec["confidence"]}
other = "human" if rec["prediction"] == "ai" else "ai"
result[other] = round(1.0 - rec["confidence"], 4)
source = rec["url"] or rec["text"][:80] + "..."
card = make_result_card(source, rec["prediction"], rec["confidence"])
link = f"{SPACE_URL}/?id={pred_id}"
if rec["url"]:
return (
gr.update(selected="url-tab"),
n, empty, hide, hide, hide, n, hide, hide,
rec["url"], result, gr.update(value=pred_id, visible=False),
gr.update(visible=True), hide,
gr.update(value=link, visible=True), gr.update(visible=True),
gr.update(value=card, visible=True), rec["text"][:1500],
)
else:
return (
gr.update(selected="text-tab"),
rec["text"], result, gr.update(value=pred_id, visible=False),
gr.update(visible=True), hide,
gr.update(value=link, visible=True), gr.update(visible=True),
gr.update(value=card, visible=True),
n, empty, hide, hide, hide, hide, hide, hide, n,
)
return defaults
demo.load(
fn=_on_load, inputs=None,
outputs=[
tabs,
# text tab
text_input, text_output, text_pred_id, text_fb_row, text_fb_msg,
text_share_link, text_share_row, text_share_card,
# url tab
url_input, url_output, url_pred_id, url_fb_row, url_fb_msg,
share_link, share_row, share_card, url_preview,
],
)
if __name__ == "__main__":
demo.launch(share=False, ssr_mode=False)