MITTop commited on
Commit
ccd4124
Β·
verified Β·
1 Parent(s): c0c759e

Upload app (1).py

Browse files
Files changed (1) hide show
  1. app (1).py +359 -0
app (1).py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepFake Detector β€” HuggingFace Gradio Space
3
+ =============================================
4
+ Architecture : ConvNeXt-Base + custom classifier head
5
+ Weights : ARPAN2026/dfake-hcnext (auto-downloaded on first run)
6
+ Classes : Real (0) | Fake (1)
7
+ """
8
+
9
+ import os
10
+ import urllib.request
11
+
12
+ import numpy as np
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ import torchvision.transforms as transforms
17
+ import timm
18
+ import gradio as gr
19
+ from PIL import Image
20
+
21
+
22
+ # ============================================================
23
+ # CONFIG
24
+ # ============================================================
25
+ MODEL_URL = "https://huggingface.co/ARPAN2026/dfake-hcnext/resolve/main/best_model_New.pth"
26
+ MODEL_PATH = "best_model_New.pth"
27
+ IMG_SIZE = 224
28
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
+
30
+ # Image normalisation (same as training pipeline)
31
+ TRANSFORM = transforms.Compose([
32
+ transforms.Resize((IMG_SIZE, IMG_SIZE)),
33
+ transforms.ToTensor(),
34
+ transforms.Normalize([0.5] * 3, [0.5] * 3),
35
+ ])
36
+
37
+
38
+ # ============================================================
39
+ # MODEL DEFINITION
40
+ # ============================================================
41
+ class DeepfakeModel(nn.Module):
42
+ """ConvNeXt-Base backbone with a lightweight two-class classifier head."""
43
+
44
+ def __init__(self) -> None:
45
+ super().__init__()
46
+ self.backbone = timm.create_model(
47
+ "convnext_base", pretrained=False, num_classes=0
48
+ )
49
+ dim = self.backbone.num_features
50
+ self.classifier = nn.Sequential(
51
+ nn.LayerNorm(dim),
52
+ nn.Linear(dim, 256),
53
+ nn.GELU(),
54
+ nn.Dropout(0.4),
55
+ nn.Linear(256, 2),
56
+ )
57
+
58
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
59
+ features = self.backbone.forward_features(x)
60
+ if features.ndim == 4: # (B, C, H, W) β†’ (B, C)
61
+ features = features.flatten(2).mean(-1)
62
+ return self.classifier(features)
63
+
64
+
65
+ # ============================================================
66
+ # UTILITIES
67
+ # ============================================================
68
+ def download_weights() -> None:
69
+ """Download model weights from HuggingFace Hub if not already present."""
70
+ if not os.path.exists(MODEL_PATH):
71
+ print(f"[INFO] Downloading weights from:\n {MODEL_URL}")
72
+ urllib.request.urlretrieve(MODEL_URL, MODEL_PATH)
73
+ print("[INFO] Download complete.")
74
+ else:
75
+ print(f"[INFO] Weights already found at '{MODEL_PATH}' β€” skipping download.")
76
+
77
+
78
+ def load_model() -> DeepfakeModel:
79
+ """Instantiate DeepfakeModel, load saved weights, and set to eval mode."""
80
+ net = DeepfakeModel().to(DEVICE)
81
+ state_dict = torch.load(MODEL_PATH, map_location=DEVICE)
82
+ net.load_state_dict(state_dict)
83
+ net.eval()
84
+ print(f"[INFO] Model ready on {DEVICE}.")
85
+ return net
86
+
87
+
88
+ # ============================================================
89
+ # INFERENCE
90
+ # ============================================================
91
+ def predict(model: DeepfakeModel, image: Image.Image):
92
+ """
93
+ Run inference on a single PIL image.
94
+
95
+ Parameters
96
+ ----------
97
+ model : DeepfakeModel
98
+ Loaded, eval-mode model.
99
+ image : PIL.Image.Image | None
100
+ Image uploaded by the user.
101
+
102
+ Returns
103
+ -------
104
+ label_dict : dict[str, float]
105
+ Mapping of class name β†’ probability (consumed by gr.Label).
106
+ verdict_md : str
107
+ Markdown-formatted verdict string.
108
+ """
109
+ if image is None:
110
+ return {"Error": 1.0}, "⚠️ Please upload an image first."
111
+
112
+ tensor = TRANSFORM(image.convert("RGB")).unsqueeze(0).to(DEVICE)
113
+
114
+ with torch.no_grad():
115
+ logits = model(tensor)
116
+ probs = torch.softmax(logits, dim=1).cpu().numpy()[0]
117
+
118
+ real_prob = float(probs[0])
119
+ fake_prob = float(probs[1])
120
+ confidence = max(real_prob, fake_prob) * 100
121
+
122
+ if fake_prob > real_prob:
123
+ verdict_md = f"## πŸ”΄ DEEPFAKE DETECTED\n**Confidence:** {confidence:.1f}%"
124
+ else:
125
+ verdict_md = f"## 🟒 LIKELY REAL\n**Confidence:** {confidence:.1f}%"
126
+
127
+ label_dict = {
128
+ "Real": round(real_prob, 4),
129
+ "Fake": round(fake_prob, 4),
130
+ }
131
+ return label_dict, verdict_md
132
+
133
+
134
+ # ============================================================
135
+ # CUSTOM CSS (dark forensic theme)
136
+ # ============================================================
137
+ CSS = """
138
+ @import url('https://fonts.googleapis.com/css2?family=Share+Tech+Mono&family=Syne:wght@400;700;800&display=swap');
139
+
140
+ :root {
141
+ --bg: #0a0c10;
142
+ --surface: #111318;
143
+ --border: #1e2330;
144
+ --accent: #00e5ff;
145
+ --danger: #ff3b5c;
146
+ --safe: #00e676;
147
+ --text: #d0d8f0;
148
+ --muted: #5a6480;
149
+ --radius: 8px;
150
+ }
151
+
152
+ body, .gradio-container {
153
+ background: var(--bg) !important;
154
+ font-family: 'Syne', sans-serif !important;
155
+ color: var(--text) !important;
156
+ }
157
+
158
+ h1.title-heading {
159
+ font-family: 'Syne', sans-serif;
160
+ font-weight: 800;
161
+ font-size: 2.4rem;
162
+ letter-spacing: -0.02em;
163
+ background: linear-gradient(90deg, var(--accent), #7b61ff);
164
+ -webkit-background-clip: text;
165
+ -webkit-text-fill-color: transparent;
166
+ margin: 0;
167
+ }
168
+
169
+ p.subtitle {
170
+ color: var(--muted);
171
+ font-family: 'Share Tech Mono', monospace;
172
+ font-size: 0.85rem;
173
+ margin-top: 4px;
174
+ letter-spacing: 0.08em;
175
+ }
176
+
177
+ .gr-box, .gr-panel, .gr-form {
178
+ background: var(--surface) !important;
179
+ border: 1px solid var(--border) !important;
180
+ border-radius: var(--radius) !important;
181
+ }
182
+
183
+ .gr-image, .svelte-1n8nu59 {
184
+ border: 2px dashed var(--border) !important;
185
+ border-radius: var(--radius) !important;
186
+ background: #0d0f14 !important;
187
+ }
188
+
189
+ button.primary {
190
+ background: var(--accent) !important;
191
+ color: #000 !important;
192
+ font-family: 'Syne', sans-serif !important;
193
+ font-weight: 700 !important;
194
+ border: none !important;
195
+ border-radius: var(--radius) !important;
196
+ letter-spacing: 0.05em;
197
+ }
198
+
199
+ button.secondary {
200
+ background: transparent !important;
201
+ border: 1px solid var(--border) !important;
202
+ color: var(--muted) !important;
203
+ font-family: 'Syne', sans-serif !important;
204
+ border-radius: var(--radius) !important;
205
+ }
206
+
207
+ .gr-markdown h2 {
208
+ font-family: 'Syne', sans-serif;
209
+ font-size: 1.4rem;
210
+ font-weight: 700;
211
+ margin: 0 0 4px;
212
+ }
213
+
214
+ .gr-label .wrap {
215
+ background: var(--surface) !important;
216
+ border: 1px solid var(--border) !important;
217
+ border-radius: var(--radius) !important;
218
+ }
219
+
220
+ .gr-label .label-wrap span {
221
+ font-family: 'Share Tech Mono', monospace !important;
222
+ color: var(--text) !important;
223
+ }
224
+
225
+ .gr-label .bar {
226
+ background: linear-gradient(90deg, var(--accent), #7b61ff) !important;
227
+ }
228
+
229
+ footer { display: none !important; }
230
+ """
231
+
232
+
233
+ # ============================================================
234
+ # GRADIO UI BUILDER
235
+ # ============================================================
236
+ def build_ui(model: DeepfakeModel) -> gr.Blocks:
237
+ """
238
+ Construct and return the Gradio Blocks interface.
239
+
240
+ Parameters
241
+ ----------
242
+ model : DeepfakeModel
243
+ Pre-loaded, eval-mode model passed in via closure.
244
+
245
+ Returns
246
+ -------
247
+ gr.Blocks
248
+ The assembled Gradio app (not yet launched).
249
+ """
250
+
251
+ def _predict_wrapper(image: Image.Image):
252
+ """Closure wrapper β€” captures `model` from the outer scope."""
253
+ return predict(model, image)
254
+
255
+ with gr.Blocks(css=CSS, title="DeepFake Detector") as demo:
256
+
257
+ # ── Header ──────────────────────────────────────────────────
258
+ gr.HTML("""
259
+ <div style="text-align:center; padding:32px 0 16px;">
260
+ <h1 class='title-heading'>DEEPFAKE DETECTOR</h1>
261
+ <p class='subtitle'>
262
+ ConvNeXt-Base &nbsp;Β·&nbsp; Trained on RVF Faces &nbsp;Β·&nbsp; Hackathon Edition
263
+ </p>
264
+ </div>
265
+ """)
266
+
267
+ # ── Main two-column layout ───────────────────────────────────
268
+ with gr.Row():
269
+
270
+ # Left column β€” upload + controls + model info
271
+ with gr.Column(scale=1):
272
+ image_input = gr.Image(
273
+ type="pil",
274
+ label="Upload Face Image",
275
+ height=320,
276
+ )
277
+
278
+ with gr.Row():
279
+ submit_btn = gr.Button("πŸ” Analyze", variant="primary")
280
+ clear_btn = gr.ClearButton(
281
+ components=[image_input],
282
+ value="βœ• Clear",
283
+ )
284
+
285
+ gr.HTML("""
286
+ <div style="margin-top:12px; padding:12px 16px;
287
+ background:#0d0f14; border:1px solid #1e2330;
288
+ border-radius:8px; font-family:'Share Tech Mono',monospace;
289
+ font-size:0.78rem; color:#5a6480; line-height:1.9;">
290
+ <b style="color:#00e5ff;">MODEL</b> &nbsp;&nbsp; ConvNeXt-Base + custom head<br>
291
+ <b style="color:#00e5ff;">TRAINED</b>&nbsp; Real vs Fake Faces (80/20 split)<br>
292
+ <b style="color:#00e5ff;">INPUT</b> &nbsp;&nbsp; 224 Γ— 224 Β· RGB Β· normalised<br>
293
+ <b style="color:#00e5ff;">CLASSES</b>&nbsp; Real &nbsp;|&nbsp; Fake
294
+ </div>
295
+ """)
296
+
297
+ # Right column β€” verdict + probability bars
298
+ with gr.Column(scale=1):
299
+ verdict_output = gr.Markdown(
300
+ value="*Upload an image and click **Analyze** to begin.*",
301
+ label="Verdict",
302
+ )
303
+ label_output = gr.Label(
304
+ num_top_classes=2,
305
+ label="Class Probabilities",
306
+ )
307
+
308
+ # ── Example images (add files to repo root to enable) ────────
309
+ gr.Examples(
310
+ examples=[], # e.g. [["examples/real1.jpg"], ["examples/fake1.jpg"]]
311
+ inputs=image_input,
312
+ label="Example Images",
313
+ )
314
+
315
+ # ── Event wiring ─────────────────────────────────────────────
316
+ submit_btn.click(
317
+ fn=_predict_wrapper,
318
+ inputs=image_input,
319
+ outputs=[label_output, verdict_output],
320
+ )
321
+
322
+ # ── Footer ───────────────────────────────────────────────────
323
+ gr.HTML("""
324
+ <div style="text-align:center; padding:24px 0 8px;
325
+ font-family:'Share Tech Mono',monospace;
326
+ font-size:0.75rem; color:#2a3050;">
327
+ Built with ❀ &nbsp;·&nbsp; Gradio &nbsp;·&nbsp; HuggingFace Spaces &nbsp;·&nbsp; PyTorch
328
+ </div>
329
+ """)
330
+
331
+ return demo
332
+
333
+
334
+ # ============================================================
335
+ # MAIN
336
+ # ============================================================
337
+ def main() -> None:
338
+ """
339
+ Application entry point β€” runs the full pipeline:
340
+ 1. Download model weights from HuggingFace Hub (if not cached).
341
+ 2. Instantiate and load the DeepfakeModel.
342
+ 3. Build the Gradio UI.
343
+ 4. Launch the Space server.
344
+ """
345
+ # ── Step 1: Weights ──────────────────────────────────────────
346
+ download_weights()
347
+
348
+ # ── Step 2: Model ────────────────────────────────────────────
349
+ model = load_model()
350
+
351
+ # ── Step 3: UI ───────────────────────────────────────────────
352
+ demo = build_ui(model)
353
+
354
+ # ── Step 4: Launch ───────────────────────────────────────────
355
+ demo.launch()
356
+
357
+
358
+ if __name__ == "__main__":
359
+ main()