bithal26 commited on
Commit
f90bad5
·
verified ·
1 Parent(s): d828d72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -177
app.py CHANGED
@@ -1,19 +1,22 @@
1
  import os
2
- import cv2
3
  import torch
4
  import numpy as np
5
  from PIL import Image
 
6
  import gradio as gr
7
  from gradio_client import Client, handle_file
8
  from torchvision.transforms import Normalize
9
  from facenet_pytorch.models.mtcnn import MTCNN
10
  import concurrent.futures
11
  import tempfile
 
 
 
 
12
 
13
  # ==========================================
14
  # 1. API ROUTER CONFIGURATION
15
  # ==========================================
16
- # These must match your exact Hugging Face Worker Space names
17
  WORKER_SPACES = [
18
  "bithal26/DeepFake-Worker-1",
19
  "bithal26/DeepFake-Worker-2",
@@ -24,21 +27,19 @@ WORKER_SPACES = [
24
  "bithal26/DeepFake-Worker-7"
25
  ]
26
 
27
- # Note: If your worker spaces are PRIVATE, you must add your HF_TOKEN
28
- # to this UI Space's Secrets for the Client to connect successfully.
29
  clients = []
30
  print("Initializing connections to 7 API Workers...")
 
31
 
32
- # Explicitly grab the token from the Space Secrets
33
- hf_token = os.environ.get("HF_TOKEN")
34
 
35
  for space in WORKER_SPACES:
36
  try:
37
- # Pass the token directly to the Client so it can unlock the private spaces
38
  clients.append(Client(space, token=hf_token))
39
  except Exception as e:
40
- print(f"Warning: Could not connect to {space}. Is it private/sleeping? Error: {e}")
41
-
42
  # ==========================================
43
  # 2. MTCNN PREPROCESSING ENGINE
44
  # ==========================================
@@ -125,32 +126,43 @@ def confident_strategy(pred, t=0.8):
125
  else:
126
  return np.mean(pred)
127
 
128
- # ==========================================
129
- # 3. PARALLEL API EXECUTION
130
- # ==========================================
131
  def call_worker(client, tensor_filepath):
132
- """Pings a single Hugging Face API Worker"""
133
  try:
134
  result = client.predict(tensor_file=handle_file(tensor_filepath), api_name="/predict")
135
- # Result should be a dictionary: {"predictions": [...]}
136
  preds = result.get("predictions", [])
137
- if not preds:
138
- return 0.5 # Default middle ground if error
139
  return confident_strategy(preds)
140
  except Exception as e:
141
  print(f"API Call Failed: {e}")
142
  return 0.5
143
 
144
- def analyze_video(video_path):
145
- if not video_path:
146
- return "<div style='color:var(--red); font-family:Syne;'>Please upload a video file.</div>"
147
-
148
- # 1. Extract Faces locally
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  input_size = 380
150
  faces = face_extractor.process_video(video_path, frames_per_video=16)
151
 
152
  if len(faces) == 0:
153
- return "<div style='color:var(--amber); font-family:Syne; padding:20px;'>No faces detected. Please upload a clear video.</div>"
154
 
155
  x = []
156
  for frame_data in faces:
@@ -158,8 +170,7 @@ def analyze_video(video_path):
158
  resized_face = isotropically_resize_image(face, input_size)
159
  resized_face = put_to_center(resized_face, input_size)
160
  x.append(resized_face)
161
- if len(x) >= 16 * 4:
162
- break
163
 
164
  x = np.array(x, dtype=np.uint8)
165
  x = torch.tensor(x, device=device).float()
@@ -167,167 +178,29 @@ def analyze_video(video_path):
167
  for i in range(len(x)):
168
  x[i] = normalize_transform(x[i] / 255.)
169
 
170
- # 2. Save the math to a temporary file
171
- temp_dir = tempfile.gettempdir()
172
  tensor_path = os.path.join(temp_dir, "batch_tensor.pt")
173
  torch.save(x, tensor_path)
174
 
175
- # 3. Ping all 7 Workers in parallel
176
  worker_scores = []
177
  with concurrent.futures.ThreadPoolExecutor(max_workers=7) as executor:
178
  futures = [executor.submit(call_worker, client, tensor_path) for client in clients]
179
  for future in concurrent.futures.as_completed(futures):
180
  worker_scores.append(future.result())
181
 
182
- # 4. Aggregate results
183
  final_score = np.mean(worker_scores)
184
- is_fake = final_score > 0.5
185
- display_score = (final_score * 100) if is_fake else ((1 - final_score) * 100)
186
-
187
- # Format the individual scores for the UI
188
- model_bars_html = ""
189
- for i, score in enumerate(worker_scores):
190
- percentage = score * 100
191
- color = "var(--red)" if percentage > 50 else "var(--green)"
192
- model_bars_html += f"""
193
- <div class="metric-row">
194
- <div class="metric-header"><span class="metric-name">EfficientNet Node {i+1}</span><span class="metric-value">{percentage:.1f}%</span></div>
195
- <div class="metric-bar"><div class="metric-fill" style="width:{percentage}%; background:{color}"></div></div>
196
- </div>
197
- """
198
-
199
- # 5. Inject into your Custom HTML Template
200
- verdict_color = "var(--red)" if is_fake else "var(--green)"
201
- verdict_text = "DEEPFAKE DETECTED" if is_fake else "AUTHENTIC CONTENT"
202
- verdict_desc = "High confidence manipulation detected. Neural forensics indicate spatial anomalies and blending artifacts typical of synthetic face-swapping." if is_fake else "No significant facial manipulation detected. Spatial forensics are within normal parameters. Content appears to be authentic media."
203
-
204
- # Calculate a proxy for "Face Anomaly" vs "Temporal" based on the raw score to fill your template's visual metrics
205
- face_anomaly_score = (final_score * 100) if is_fake else (final_score * 100)
206
-
207
- html_report = f"""
208
- <div class="report-layout">
209
- <div class="report-card accent">
210
- <div class="card-title"><span class="dot"></span>Forensic Analysis Report</div>
211
- <div style="margin-top:8px">
212
- <div style="display:flex;justify-content:space-between;align-items:center;margin-bottom:24px">
213
- <div>
214
- <div style="font-family:'JetBrains Mono',monospace;font-size:10px;letter-spacing:2px;color:var(--text-faint);text-transform:uppercase">Verdict</div>
215
- <div style="font-family:'Bebas Neue',sans-serif;font-size:32px;color:{verdict_color};margin-top:4px">{verdict_text}</div>
216
- </div>
217
- <div style="text-align:right">
218
- <div style="font-family:'Bebas Neue',sans-serif;font-size:48px;color:{verdict_color};text-shadow:0 0 20px {verdict_color};line-height:1">{display_score:.1f}%</div>
219
- <div style="font-family:'JetBrains Mono',monospace;font-size:9px;letter-spacing:2px;color:{verdict_color};text-transform:uppercase">Confidence</div>
220
- </div>
221
- </div>
222
- <p style="color:var(--text-dim); font-size:14px; line-height:1.6; margin-bottom:20px;">{verdict_desc}</p>
223
- <ul class="forensic-list">
224
- <li class="forensic-item">
225
- <div class="forensic-icon"><svg viewBox="0 0 24 24"><circle cx="12" cy="8" r="4"/><path d="M20 21a8 8 0 1 0-16 0"/></svg></div>
226
- <span class="forensic-name">Spatial Artifact Detection</span>
227
- <span class="forensic-status {'alert' if is_fake else 'pass'}">{'Anomaly' if is_fake else 'Pass'}</span>
228
- </li>
229
- <li class="forensic-item">
230
- <div class="forensic-icon"><svg viewBox="0 0 24 24"><path d="M4 15s1-1 4-1 5 2 8 2 4-1 4-1V3s-1 1-4 1-5-2-8-2-4 1-4 1z"/><line x1="4" y1="22" x2="4" y2="15"/></svg></div>
231
- <span class="forensic-name">Feature Extraction Integrity</span>
232
- <span class="forensic-status {'alert' if face_anomaly_score > 60 else 'pass'}">{'Fail' if face_anomaly_score > 60 else 'Normal'}</span>
233
- </li>
234
- </ul>
235
- </div>
236
- </div>
237
-
238
- <div style="display:flex;flex-direction:column;gap:2px">
239
- <div class="report-card" style="flex:1">
240
- <div class="card-title"><span class="dot"></span>Ensemble Node Breakdown</div>
241
- <div style="margin-top:16px">
242
- {model_bars_html}
243
- </div>
244
- </div>
245
- </div>
246
- </div>
247
- """
248
- return html_report
249
-
250
- # ==========================================
251
- # 4. MASTER UI - NETFLIX HTML INTEGRATION
252
- # ==========================================
253
- # We pull your exact CSS variables and styling directly from your deepfake-detector.html
254
- css = """
255
- @import url('https://fonts.googleapis.com/css2?family=Bebas+Neue&family=Syne:wght@400;600;700;800&family=JetBrains+Mono:wght@300;400;500&display=swap');
256
-
257
- :root {
258
- --bg: #030508;
259
- --bg2: #070c12;
260
- --panel: rgba(8, 18, 30, 0.85);
261
- --border: rgba(0, 210, 255, 0.12);
262
- --border-bright: rgba(0, 210, 255, 0.45);
263
- --cyan: #00d2ff;
264
- --red: #ff2d55;
265
- --green: #00ff88;
266
- --amber: #ffb800;
267
- --text: #e8f4ff;
268
- --text-dim: rgba(232, 244, 255, 0.5);
269
- --text-faint: rgba(232, 244, 255, 0.25);
270
- }
271
-
272
- body, .gradio-container { background-color: var(--bg) !important; color: var(--text) !important; font-family: 'Syne', sans-serif !important; }
273
- .gr-panel { background: var(--panel) !important; border: 1px solid var(--border) !important; border-radius: 4px !important; }
274
-
275
- /* Dashboard Titles */
276
- .veridex-title { font-family: 'Bebas Neue', sans-serif; font-size: 60px; letter-spacing: 4px; color: var(--text); text-align: center; margin-top: 40px;}
277
- .veridex-title span { color: var(--cyan); }
278
- .veridex-sub { font-family: 'JetBrains Mono', monospace; font-size: 12px; letter-spacing: 2px; text-transform: uppercase; color: var(--cyan); text-align: center; margin-bottom: 40px; }
279
-
280
- /* Custom HTML injected classes from your design */
281
- .report-layout { display: grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-top: 20px; }
282
- .report-card { background: var(--panel); border: 1px solid var(--border); padding: 30px; }
283
- .report-card.accent { border-color: rgba(0,210,255,0.2); background: rgba(0, 210, 255, 0.04); }
284
- .card-title { font-family: 'JetBrains Mono', monospace; font-size: 10px; letter-spacing: 3px; text-transform: uppercase; color: var(--cyan); margin-bottom: 16px; display: flex; align-items: center; gap: 8px; }
285
- .card-title .dot { width: 5px; height: 5px; border-radius: 50%; background: var(--cyan); box-shadow: 0 0 8px var(--cyan); }
286
-
287
- .forensic-list { list-style: none; display: flex; flex-direction: column; gap: 12px; padding:0; }
288
- .forensic-item { display: flex; align-items: center; gap: 12px; padding: 14px 16px; border: 1px solid var(--border); }
289
- .forensic-icon { width: 32px; height: 32px; border: 1px solid var(--border-bright); display: flex; align-items: center; justify-content: center; }
290
- .forensic-icon svg { width: 14px; height: 14px; stroke: var(--cyan); fill: none; stroke-width: 2; }
291
- .forensic-name { font-size: 13px; font-weight: 600; flex: 1; font-family: 'Syne', sans-serif;}
292
- .forensic-status { font-family: 'JetBrains Mono', monospace; font-size: 9px; letter-spacing: 2px; text-transform: uppercase; padding: 3px 8px; }
293
- .forensic-status.pass { color: var(--green); border: 1px solid rgba(0,255,136,0.3); background: rgba(0,255,136,0.05); }
294
- .forensic-status.alert { color: var(--red); border: 1px solid rgba(255,45,85,0.3); background: rgba(255,45,85,0.05); }
295
-
296
- .metric-row { margin-bottom: 14px; }
297
- .metric-header { display: flex; justify-content: space-between; margin-bottom: 6px; }
298
- .metric-name { font-family: 'JetBrains Mono', monospace; font-size: 10px; letter-spacing: 1.5px; text-transform: uppercase; color: var(--text-dim); }
299
- .metric-value { font-family: 'JetBrains Mono', monospace; font-size: 10px; color: var(--text); }
300
- .metric-bar { height: 3px; background: rgba(255,255,255,0.06); width: 100%; overflow: hidden; }
301
- .metric-fill { height: 100%; transition: width 1s ease; }
302
-
303
- @media (max-width: 900px) { .report-layout { grid-template-columns: 1fr; } }
304
- """
305
-
306
- with gr.Blocks(css=css, theme=gr.themes.Default(neutral_hue="slate", primary_hue="cyan")) as app:
307
- gr.HTML("""
308
- <div class="veridex-title">VERI<span>DEX</span></div>
309
- <div class="veridex-sub">Neural Detection Engine v4.2 // Distributed Architecture</div>
310
- """)
311
 
312
- with gr.Row():
313
- with gr.Column(scale=1):
314
- gr.Markdown("### 1. Ingest Video Evidence")
315
- video_in = gr.Video(label="Upload Media (.mp4, .avi)")
316
- analyze_btn = gr.Button("Run Distributed Ensemble Analysis", variant="primary", size="lg")
317
-
318
- gr.HTML("""
319
- <div style="margin-top:20px; font-family:'JetBrains Mono'; font-size:10px; color:var(--text-faint); line-height:1.8;">
320
- › Local MTCNN Node Active<br>
321
- 7 Parallel EfficientNet Endpoints Linked<br>
322
- Awaiting input...
323
- </div>
324
- """)
325
-
326
- with gr.Column(scale=2):
327
- gr.Markdown("### 2. Forensic Output")
328
- report_out = gr.HTML(value="<div style='color:var(--text-dim); padding:40px; text-align:center; border:1px dashed var(--border);'>Awaiting video analysis...</div>")
329
-
330
- analyze_btn.click(fn=analyze_video, inputs=video_in, outputs=report_out)
331
-
332
- if __name__ == "__main__":
333
- app.launch()
 
1
  import os
 
2
  import torch
3
  import numpy as np
4
  from PIL import Image
5
+ import cv2
6
  import gradio as gr
7
  from gradio_client import Client, handle_file
8
  from torchvision.transforms import Normalize
9
  from facenet_pytorch.models.mtcnn import MTCNN
10
  import concurrent.futures
11
  import tempfile
12
+ from huggingface_hub import get_token
13
+ from fastapi import FastAPI, UploadFile, File
14
+ from fastapi.responses import HTMLResponse
15
+ import shutil
16
 
17
  # ==========================================
18
  # 1. API ROUTER CONFIGURATION
19
  # ==========================================
 
20
  WORKER_SPACES = [
21
  "bithal26/DeepFake-Worker-1",
22
  "bithal26/DeepFake-Worker-2",
 
27
  "bithal26/DeepFake-Worker-7"
28
  ]
29
 
 
 
30
  clients = []
31
  print("Initializing connections to 7 API Workers...")
32
+ hf_token = get_token()
33
 
34
+ if not hf_token:
35
+ print("CRITICAL WARNING: No HF_TOKEN found! Private workers will fail to connect.")
36
 
37
  for space in WORKER_SPACES:
38
  try:
 
39
  clients.append(Client(space, token=hf_token))
40
  except Exception as e:
41
+ print(f"Warning: Could not connect to {space}. Error: {e}")
42
+
43
  # ==========================================
44
  # 2. MTCNN PREPROCESSING ENGINE
45
  # ==========================================
 
126
  else:
127
  return np.mean(pred)
128
 
 
 
 
129
  def call_worker(client, tensor_filepath):
 
130
  try:
131
  result = client.predict(tensor_file=handle_file(tensor_filepath), api_name="/predict")
 
132
  preds = result.get("predictions", [])
133
+ if not preds: return 0.5
 
134
  return confident_strategy(preds)
135
  except Exception as e:
136
  print(f"API Call Failed: {e}")
137
  return 0.5
138
 
139
+ # ==========================================
140
+ # 3. FASTAPI SERVER & DIRECT HTML INJECTION
141
+ # ==========================================
142
+ app = FastAPI()
143
+
144
+ # 1. Serve your custom HTML file as the main page
145
+ @app.get("/")
146
+ def read_root():
147
+ with open("deepfake-detector.html", "r", encoding="utf-8") as f:
148
+ html_content = f.read()
149
+ return HTMLResponse(content=html_content)
150
+
151
+ # 2. Create the hidden API endpoint your HTML will call
152
+ @app.post("/api/analyze")
153
+ async def analyze_api(file: UploadFile = File(...)):
154
+ # Save uploaded video temporarily
155
+ temp_dir = tempfile.mkdtemp()
156
+ video_path = os.path.join(temp_dir, file.filename)
157
+ with open(video_path, "wb") as buffer:
158
+ shutil.copyfileobj(file.file, buffer)
159
+
160
+ # Extract Faces
161
  input_size = 380
162
  faces = face_extractor.process_video(video_path, frames_per_video=16)
163
 
164
  if len(faces) == 0:
165
+ return {"error": "No faces detected."}
166
 
167
  x = []
168
  for frame_data in faces:
 
170
  resized_face = isotropically_resize_image(face, input_size)
171
  resized_face = put_to_center(resized_face, input_size)
172
  x.append(resized_face)
173
+ if len(x) >= 16 * 4: break
 
174
 
175
  x = np.array(x, dtype=np.uint8)
176
  x = torch.tensor(x, device=device).float()
 
178
  for i in range(len(x)):
179
  x[i] = normalize_transform(x[i] / 255.)
180
 
181
+ # Save Tensor
 
182
  tensor_path = os.path.join(temp_dir, "batch_tensor.pt")
183
  torch.save(x, tensor_path)
184
 
185
+ # Ping Workers
186
  worker_scores = []
187
  with concurrent.futures.ThreadPoolExecutor(max_workers=7) as executor:
188
  futures = [executor.submit(call_worker, client, tensor_path) for client in clients]
189
  for future in concurrent.futures.as_completed(futures):
190
  worker_scores.append(future.result())
191
 
192
+ # Aggregate
193
  final_score = np.mean(worker_scores)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ # Clean up temp files
196
+ shutil.rmtree(temp_dir, ignore_errors=True)
197
+
198
+ # Return pure JSON data to the HTML frontend
199
+ return {
200
+ "final_score": float(final_score),
201
+ "worker_scores": [float(s) for s in worker_scores]
202
+ }
203
+
204
+ # Gradio wrapper just to keep Hugging Face happy, but we mount our custom FastAPI app
205
+ demo = gr.Blocks()
206
+ app = gr.mount_gradio_app(app, demo, path="/gradio")