File size: 4,837 Bytes
3249109
c810eed
5e8ae74
 
c810eed
5e8ae74
 
 
 
 
 
 
c810eed
5e8ae74
 
 
 
 
 
 
 
c810eed
5e8ae74
 
 
 
 
 
 
c810eed
5e8ae74
 
 
 
 
 
 
c810eed
5e8ae74
 
 
 
 
c810eed
5e8ae74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c810eed
5e8ae74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3249109
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# app.py
import gradio as gr
from transformers import pipeline, logging
logging.set_verbosity_error()  # mute transformers INFO logs to keep the UI logs clean

# -----------------------
# NOTE: These are public models known to load on Spaces.
# - text model: small DistilBERT sentiment model (used as a safe demo for text "credibility")
# - image model: ViT image-classifier (generic). Replace later with a custom deepfake model when ready.
# -----------------------
TEXT_MODEL_ID = "distilbert-base-uncased-finetuned-sst-2-english"
IMAGE_MODEL_ID = "google/vit-base-patch16-224"

# Load pipelines (will download weights on first run)
try:
    text_pipe = pipeline("text-classification", model=TEXT_MODEL_ID)
except Exception as e:
    text_pipe = None
    text_load_error = str(e)
else:
    text_load_error = None

try:
    image_pipe = pipeline("image-classification", model=IMAGE_MODEL_ID)
except Exception as e:
    image_pipe = None
    image_load_error = str(e)
else:
    image_load_error = None

# Friendly mapping (different text models return different label names)
TEXT_FRIENDLY = {
    "NEGATIVE": "Not credible / Fake (demo)",
    "LABEL_0": "Not credible / Fake (demo)",
    "POSITIVE": "Credible / Real (demo)",
    "LABEL_1": "Credible / Real (demo)"
}

def friendly_text_label(raw_label: str) -> str:
    if raw_label is None:
        return "Unknown"
    raw = str(raw_label).upper()
    return TEXT_FRIENDLY.get(raw, raw_label)

def classify_text(text: str):
    if text is None or not str(text).strip():
        return "Please paste some text to analyze.", {}
    if text_pipe is None:
        return f"Text model failed to load: {text_load_error}", {}
    try:
        # request top 2 classes for a small confidence breakdown
        preds = text_pipe(text, top_k=2)
        # preds is a list of dicts like {'label':'POSITIVE','score':0.98}
        label_dict = {}
        for p in preds:
            lab = friendly_text_label(p.get("label"))
            label_dict[lab] = float(p.get("score", 0.0))
        # choose top
        top_lab = max(label_dict.items(), key=lambda kv: kv[1])
        summary = f"{top_lab[0]} ({top_lab[1]*100:.2f}%)"
        return summary, label_dict
    except Exception as e:
        return f"Error during text classification: {e}", {}

def classify_image(image):
    if image is None:
        return "Please upload an image.", {}
    if image_pipe is None:
        return f"Image model failed to load: {image_load_error}", {}
    try:
        preds = image_pipe(image, top_k=5)
        label_dict = {p['label']: float(p['score']) for p in preds}
        top_lab = max(label_dict.items(), key=lambda kv: kv[1])
        summary = f"{top_lab[0]} ({top_lab[1]*100:.2f}%)"
        return summary, label_dict
    except Exception as e:
        return f"Error during image classification: {e}", {}

# --- UI ---
with gr.Blocks(title="AI Detector (Text + Image)") as demo:
    gr.Markdown("## ๐Ÿ”Ž AI Detector\nText (credibility demo) and Image (generic classifier).")
    gr.Markdown(
        "> This app uses public models that load in Spaces. When you have your own trained deepfake model, "
        "you can swap the image model ID in `app.py` to point at your Hugging Face model."
    )

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### ๐Ÿ“ Text Analysis")
            txt = gr.Textbox(lines=6, placeholder="Paste text here...", label="Input Text")
            txt_result = gr.Textbox(label="Summary")
            txt_probs = gr.Label(label="Confidence (top 2)")
            with gr.Row():
                btn_txt = gr.Button("Analyze Text")
                btn_txt.click(classify_text, inputs=txt, outputs=[txt_result, txt_probs])
                btn_txt_clear = gr.Button("Clear")
                btn_txt_clear.click(lambda: ("", {}, ""), outputs=[txt, txt_result, txt_probs])

        with gr.Column(scale=1):
            gr.Markdown("### ๐Ÿ–ผ๏ธ Image Analysis")
            img = gr.Image(type="pil", label="Upload Image")
            img_result = gr.Textbox(label="Summary")
            img_probs = gr.Label(label="Top-5 Confidence")
            with gr.Row():
                btn_img = gr.Button("Analyze Image")
                btn_img.click(classify_image, inputs=img, outputs=[img_result, img_probs])
                btn_img_clear = gr.Button("Clear")
                btn_img_clear.click(lambda: (None, "", {}), outputs=[img, img_result, img_probs])

    # Footer: show load errors if any
    if text_load_error or image_load_error:
        with gr.Column():
            gr.Markdown("**Model load warnings:**")
            if text_load_error:
                gr.Markdown(f"- Text model load error: `{text_load_error}`")
            if image_load_error:
                gr.Markdown(f"- Image model load error: `{image_load_error}`")

demo.launch()