AI_Detector_App / app.py
ash12321's picture
Update app.py
5e8ae74 verified
# app.py
import gradio as gr
from transformers import pipeline, logging
logging.set_verbosity_error() # mute transformers INFO logs to keep the UI logs clean
# -----------------------
# NOTE: These are public models known to load on Spaces.
# - text model: small DistilBERT sentiment model (used as a safe demo for text "credibility")
# - image model: ViT image-classifier (generic). Replace later with a custom deepfake model when ready.
# -----------------------
TEXT_MODEL_ID = "distilbert-base-uncased-finetuned-sst-2-english"
IMAGE_MODEL_ID = "google/vit-base-patch16-224"
# Load pipelines (will download weights on first run)
try:
text_pipe = pipeline("text-classification", model=TEXT_MODEL_ID)
except Exception as e:
text_pipe = None
text_load_error = str(e)
else:
text_load_error = None
try:
image_pipe = pipeline("image-classification", model=IMAGE_MODEL_ID)
except Exception as e:
image_pipe = None
image_load_error = str(e)
else:
image_load_error = None
# Friendly mapping (different text models return different label names)
TEXT_FRIENDLY = {
"NEGATIVE": "Not credible / Fake (demo)",
"LABEL_0": "Not credible / Fake (demo)",
"POSITIVE": "Credible / Real (demo)",
"LABEL_1": "Credible / Real (demo)"
}
def friendly_text_label(raw_label: str) -> str:
if raw_label is None:
return "Unknown"
raw = str(raw_label).upper()
return TEXT_FRIENDLY.get(raw, raw_label)
def classify_text(text: str):
if text is None or not str(text).strip():
return "Please paste some text to analyze.", {}
if text_pipe is None:
return f"Text model failed to load: {text_load_error}", {}
try:
# request top 2 classes for a small confidence breakdown
preds = text_pipe(text, top_k=2)
# preds is a list of dicts like {'label':'POSITIVE','score':0.98}
label_dict = {}
for p in preds:
lab = friendly_text_label(p.get("label"))
label_dict[lab] = float(p.get("score", 0.0))
# choose top
top_lab = max(label_dict.items(), key=lambda kv: kv[1])
summary = f"{top_lab[0]} ({top_lab[1]*100:.2f}%)"
return summary, label_dict
except Exception as e:
return f"Error during text classification: {e}", {}
def classify_image(image):
if image is None:
return "Please upload an image.", {}
if image_pipe is None:
return f"Image model failed to load: {image_load_error}", {}
try:
preds = image_pipe(image, top_k=5)
label_dict = {p['label']: float(p['score']) for p in preds}
top_lab = max(label_dict.items(), key=lambda kv: kv[1])
summary = f"{top_lab[0]} ({top_lab[1]*100:.2f}%)"
return summary, label_dict
except Exception as e:
return f"Error during image classification: {e}", {}
# --- UI ---
with gr.Blocks(title="AI Detector (Text + Image)") as demo:
gr.Markdown("## ๐Ÿ”Ž AI Detector\nText (credibility demo) and Image (generic classifier).")
gr.Markdown(
"> This app uses public models that load in Spaces. When you have your own trained deepfake model, "
"you can swap the image model ID in `app.py` to point at your Hugging Face model."
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### ๐Ÿ“ Text Analysis")
txt = gr.Textbox(lines=6, placeholder="Paste text here...", label="Input Text")
txt_result = gr.Textbox(label="Summary")
txt_probs = gr.Label(label="Confidence (top 2)")
with gr.Row():
btn_txt = gr.Button("Analyze Text")
btn_txt.click(classify_text, inputs=txt, outputs=[txt_result, txt_probs])
btn_txt_clear = gr.Button("Clear")
btn_txt_clear.click(lambda: ("", {}, ""), outputs=[txt, txt_result, txt_probs])
with gr.Column(scale=1):
gr.Markdown("### ๐Ÿ–ผ๏ธ Image Analysis")
img = gr.Image(type="pil", label="Upload Image")
img_result = gr.Textbox(label="Summary")
img_probs = gr.Label(label="Top-5 Confidence")
with gr.Row():
btn_img = gr.Button("Analyze Image")
btn_img.click(classify_image, inputs=img, outputs=[img_result, img_probs])
btn_img_clear = gr.Button("Clear")
btn_img_clear.click(lambda: (None, "", {}), outputs=[img, img_result, img_probs])
# Footer: show load errors if any
if text_load_error or image_load_error:
with gr.Column():
gr.Markdown("**Model load warnings:**")
if text_load_error:
gr.Markdown(f"- Text model load error: `{text_load_error}`")
if image_load_error:
gr.Markdown(f"- Image model load error: `{image_load_error}`")
demo.launch()