import os import urllib.request import numpy as np import cv2 import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms as transforms import timm import gradio as gr from PIL import Image # ===================== # CONFIG # ===================== MODEL_URL = "https://huggingface.co/ARPAN2026/dfake-hcnext/resolve/main/best_model_New.pth" MODEL_PATH = "best_model_New.pth" IMG_SIZE = 224 DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ===================== # MODEL DEFINITION # ===================== class DeepfakeModel(nn.Module): def __init__(self): super().__init__() self.backbone = timm.create_model("convnext_base", pretrained=False, num_classes=0) dim = self.backbone.num_features self.classifier = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, 256), nn.GELU(), nn.Dropout(0.4), nn.Linear(256, 2), ) def forward(self, x): f = self.backbone.forward_features(x) if len(f.shape) == 4: f = f.flatten(2).mean(-1) return self.classifier(f) # ===================== # DOWNLOAD + LOAD # ===================== def download_model(): if not os.path.exists(MODEL_PATH): print("Downloading model weights…") urllib.request.urlretrieve(MODEL_URL, MODEL_PATH) print("Download complete.") download_model() model = DeepfakeModel().to(DEVICE) model.load_state_dict(torch.load(MODEL_PATH, map_location=DEVICE)) model.eval() print("Model loaded successfully.") # ===================== # TRANSFORM # ===================== transform = transforms.Compose([ transforms.Resize((IMG_SIZE, IMG_SIZE)), transforms.ToTensor(), transforms.Normalize([0.5] * 3, [0.5] * 3), ]) # ===================== # INFERENCE # ===================== def predict(image: Image.Image): if image is None: return {"Error": 1.0}, "⚠️ Please upload an image." img_tensor = transform(image.convert("RGB")).unsqueeze(0).to(DEVICE) with torch.no_grad(): logits = model(img_tensor) probs = torch.softmax(logits, dim=1).cpu().numpy()[0] real_prob = float(probs[0]) fake_prob = float(probs[1]) confidence = max(real_prob, fake_prob) * 100 if fake_prob > real_prob: verdict = "🔴 DEEPFAKE DETECTED" verdict_md = f"## {verdict}\n**Confidence:** {confidence:.1f}%" else: verdict = "🟢 LIKELY REAL" verdict_md = f"## {verdict}\n**Confidence:** {confidence:.1f}%" label_dict = { "Real": round(real_prob, 4), "Fake": round(fake_prob, 4), } return label_dict, verdict_md # ===================== # CUSTOM CSS (dark forensic theme) # ===================== CSS = """ @import url('https://fonts.googleapis.com/css2?family=Share+Tech+Mono&family=Syne:wght@400;700;800&display=swap'); :root { --bg: #0a0c10; --surface: #111318; --border: #1e2330; --accent: #00e5ff; --danger: #ff3b5c; --safe: #00e676; --text: #d0d8f0; --muted: #5a6480; --radius: 8px; } body, .gradio-container { background: var(--bg) !important; font-family: 'Syne', sans-serif !important; color: var(--text) !important; } /* ---- header ---- */ .gr-header { background: transparent !important; } h1.title-heading { font-family: 'Syne', sans-serif; font-weight: 800; font-size: 2.4rem; letter-spacing: -0.02em; background: linear-gradient(90deg, var(--accent), #7b61ff); -webkit-background-clip: text; -webkit-text-fill-color: transparent; margin: 0; } p.subtitle { color: var(--muted); font-family: 'Share Tech Mono', monospace; font-size: 0.85rem; margin-top: 4px; letter-spacing: 0.08em; } /* ---- panels ---- */ .gr-box, .gr-panel, .gr-form { background: var(--surface) !important; border: 1px solid var(--border) !important; border-radius: var(--radius) !important; } /* ---- upload zone ---- */ .gr-image, .svelte-1n8nu59 { border: 2px dashed var(--border) !important; border-radius: var(--radius) !important; background: #0d0f14 !important; } /* ---- buttons ---- */ button.primary { background: var(--accent) !important; color: #000 !important; font-family: 'Syne', sans-serif !important; font-weight: 700 !important; border: none !important; border-radius: var(--radius) !important; letter-spacing: 0.05em; } button.secondary { background: transparent !important; border: 1px solid var(--border) !important; color: var(--muted) !important; font-family: 'Syne', sans-serif !important; border-radius: var(--radius) !important; } /* ---- labels / markdown output ---- */ .gr-markdown h2 { font-family: 'Syne', sans-serif; font-size: 1.4rem; font-weight: 700; margin: 0 0 4px; } /* ---- confidence bars ---- */ .gr-label .wrap { background: var(--surface) !important; border: 1px solid var(--border) !important; border-radius: var(--radius) !important; } .gr-label .label-wrap span { font-family: 'Share Tech Mono', monospace !important; color: var(--text) !important; } /* confidence fill colors */ .gr-label .bar { background: linear-gradient(90deg, var(--accent), #7b61ff) !important; } /* ---- footer ---- */ footer { display: none !important; } """ # ===================== # GRADIO UI # ===================== with gr.Blocks(css=CSS, title="DeepFake Detector") as demo: gr.HTML("""

DEEPFAKE DETECTOR

ConvNeXt-Base · Trained on RVF Faces · Hackathon Edition

""") with gr.Row(): with gr.Column(scale=1): image_input = gr.Image( type="pil", label="Upload Face Image", height=320, ) with gr.Row(): submit_btn = gr.Button("🔍 Analyze", variant="primary") clear_btn = gr.ClearButton([image_input], value="✕ Clear") gr.HTML("""
MODEL ConvNeXt-Base + custom head
TRAINED Real vs Fake Faces (80/20 split)
INPUT 224 × 224 · RGB · normalized
CLASSES Real · Fake
""") with gr.Column(scale=1): verdict_output = gr.Markdown( value="*Upload an image and click **Analyze** to begin.*", label="Verdict", ) label_output = gr.Label( num_top_classes=2, label="Class Probabilities", ) # example images (optional — works if you add them to the Space repo) gr.Examples( examples=[], # add paths like [["examples/real1.jpg"], ["examples/fake1.jpg"]] inputs=image_input, label="Example Images", ) submit_btn.click( fn=predict, inputs=image_input, outputs=[label_output, verdict_output], ) gr.HTML("""
Built with ❤ · Gradio · HuggingFace Spaces · PyTorch
""") demo.launch()