Pragthedon commited on
Commit
408420c
Β·
1 Parent(s): eb14ec7

Update app.py for deployment

Browse files
Files changed (1) hide show
  1. app.py +51 -186
app.py CHANGED
@@ -27,7 +27,6 @@ app = Flask(__name__)
27
  app.secret_key = os.environ.get("FLASK_SECRET_KEY", "dev-insecure-key")
28
 
29
  # ── Privacy-safe logging ──────────────────────────────────────────────────────
30
- # Filter to strip any token that looks like an email or 'password' from logs.
31
  import re as _re
32
 
33
  class _PrivacyFilter(logging.Filter):
@@ -87,9 +86,6 @@ Talisman(
87
  app,
88
  force_https=False, # set True behind a TLS proxy in production
89
  strict_transport_security=False,
90
- # CSP disabled β€” Phosphor Icons load from cdn.jsdelivr.net and templates
91
- # use inline scripts. XSS/CSRF is mitigated by HttpOnly + SameSite=Strict
92
- # JWT cookies instead. Other headers below are still enforced.
93
  content_security_policy=False,
94
  referrer_policy='strict-origin-when-cross-origin',
95
  feature_policy={},
@@ -142,18 +138,12 @@ def check_claim():
142
  if not claim:
143
  return jsonify({"success": False, "error": "Claim cannot be empty"}), 400
144
 
145
- # 1. Check exact result cache first
146
  result = get_cached_result(claim)
147
-
148
  if not result:
149
- # 2. If no exact match, run the full API (which will also check evidence vector cache)
150
  result = run_fact_check_api(claim)
151
-
152
- # 3. Save successful result to exact cache for next time
153
  if result.get("success"):
154
  save_cached_result(claim, result)
155
 
156
- # 4. Save to user's personal history
157
  if result.get("success"):
158
  save_history(
159
  user_id = get_jwt_identity(),
@@ -192,7 +182,6 @@ def ocr_image():
192
  return jsonify({"success": False, "error": "Could not process image."}), 500
193
 
194
 
195
-
196
  # ── Image Authenticity ────────────────────────────────────────────────────────
197
  _image_detector = None
198
  _video_detector = None
@@ -201,7 +190,6 @@ def get_image_detector():
201
  global _image_detector
202
  if _image_detector is None:
203
  from image_authenticity.detector import ImageAuthenticityDetector
204
- # Eagerly initialize the structure, models loaded lazily downstream
205
  _image_detector = ImageAuthenticityDetector()
206
  return _image_detector
207
 
@@ -209,19 +197,16 @@ def get_video_detector():
209
  global _video_detector
210
  if _video_detector is None:
211
  from image_authenticity.detector import ImageAuthenticityDetector
212
- # Videos suffer from compression artifacts, rendering the frequency model unreliable.
213
- # We heavily weight the dedicated AI/Deepfake detectors for videos and lower the threshold
214
- # since deepfake signals might only appear faintly in some frames.
215
  video_weights = {
216
- "hf_primary": 0.00, # ZEROED: scores 0.99 on ALL video frames β€” completely unreliable for video
217
- "hf_secondary": 0.08, # Minimal: inconsistent on real videos (0.59-0.85), keep low
218
- "clip": 0.62, # Dominant: best semantic discriminator; varies meaningfully on real vs fake
219
- "frequency": 0.30, # Strong anchor: physics-based, consistent 0.38 on real video frames
220
- "cnn": 0.00 # Disabled
221
  }
222
  _video_detector = ImageAuthenticityDetector(
223
  ensemble_weights=video_weights,
224
- fake_threshold=0.65 # Raised: CLIP scores ~0.70 on animated/compressed content; need clear margin
225
  )
226
  return _video_detector
227
 
@@ -233,31 +218,18 @@ def verify_image():
233
  file = request.files['image']
234
  if file.filename == '':
235
  return jsonify({"success": False, "error": "No file selected"}), 400
236
-
237
  try:
238
  from PIL import Image
239
  import io
240
  import base64
241
-
242
  img = Image.open(io.BytesIO(file.read())).convert('RGB')
243
-
244
  detector = get_image_detector()
245
- result, visuals = detector.predict_with_visuals(
246
- img,
247
- include_gradcam=True,
248
- include_fft=True,
249
- include_result_card=False
250
- )
251
-
252
  def img_to_b64(pil_img):
253
  if not pil_img: return None
254
  buf = io.BytesIO()
255
  pil_img.save(buf, format="PNG")
256
  return base64.b64encode(buf.getvalue()).decode('utf-8')
257
-
258
- print(f"\n[DEBUG] User Uploaded Image Scores: {result['scores']}\n")
259
- logging.getLogger().info(f"User Uploaded Image Scores: {result['scores']}")
260
-
261
  return jsonify({
262
  "success": True,
263
  "label": result["label"],
@@ -282,149 +254,78 @@ def verify_video():
282
  file = request.files['video']
283
  if file.filename == '':
284
  return jsonify({"success": False, "error": "No file selected"}), 400
285
-
286
  try:
287
- import os
288
- import tempfile
289
- import base64
290
- import uuid
291
  from image_authenticity.utils.video import extract_frames
292
-
293
- # 1. Validation Setup
294
- MAX_SIZE = 20 * 1024 * 1024 # 20MB
295
- ALLOWED_MIMETYPES = {'video/mp4', 'video/quicktime', 'video/webm'}
296
-
297
- if file.mimetype and file.mimetype not in ALLOWED_MIMETYPES:
298
- return jsonify({"success": False, "error": f"Unsupported format. Please upload MP4, MOV, or WEBM."}), 400
299
-
300
- # We enforce size limits more cleanly by checking content-length header if available,
301
- # but robustly checking the actual file size when saving to disk
302
-
303
  detector = get_video_detector()
304
-
305
- # Create a temporary directory ensuring cleanup
306
  with tempfile.TemporaryDirectory() as temp_dir:
307
  temp_path = os.path.join(temp_dir, f"upload_{uuid.uuid4().hex}_{file.filename}")
308
-
309
- # Save the file incrementally to enforce the hard 20MB limit
310
  file.seek(0)
311
  bytes_saved = 0
312
  with open(temp_path, 'wb') as f:
313
  while True:
314
  chunk = file.read(8192)
315
- if not chunk:
316
- break
317
  f.write(chunk)
318
  bytes_saved += len(chunk)
319
  if bytes_saved > MAX_SIZE:
320
- # Too big! Stop saving so we save memory/disk
321
- return jsonify({"success": False, "error": "Video exceeds the maximum limit of 20MB."}), 400
322
-
323
- # 2. Extract Frames
324
- logging.getLogger().info(f"Extracting frames from video: {temp_path}")
325
  try:
326
  frames = extract_frames(temp_path, num_frames=10)
327
  except Exception as e:
328
- logging.getLogger().error(f"Frame extraction failed: {e}")
329
- return jsonify({"success": False, "error": "Could not read video securely. " + str(e)}), 400
330
-
331
  if not frames:
332
- return jsonify({"success": False, "error": "No valid frames found in the video."}), 400
333
-
334
- # 3. Analyze each frame
335
- logging.getLogger().info(f"Analyzing {len(frames)} frames...")
336
  frame_results = []
337
  max_fake_prob = -1
338
  most_suspicious_visuals = None
339
  most_suspicious_result = None
340
-
341
- for i, frame in enumerate(frames):
342
- # Request visuals only for the most suspicious frame (we don't know which one it is until we score them,
343
- # but to save time, we run visuals incrementally if it hits a new max_fake_prob threshold)
344
-
345
- # First, fast prediction without big visuals
346
  res = detector.predict(frame)
347
-
348
  f_prob = res.get("fake_prob", 0.0)
349
-
350
- # If it's our most suspicious frame so far, let's grab the gradcam for it specifically
351
  if f_prob > max_fake_prob:
352
  max_fake_prob = f_prob
353
- # Rerun prediction with all visualizers just for this frame to attach it as the "evidence"
354
- res_visual, vis = detector.predict_with_visuals(
355
- frame,
356
- include_gradcam=True,
357
- include_fft=True,
358
- include_result_card=False
359
- )
360
  most_suspicious_visuals = vis
361
  most_suspicious_result = res_visual
362
-
363
  frame_results.append(res)
364
-
365
- # 4. Aggregate Results
366
- # Videos may only have short manipulated segments. We sort by fake_prob
367
- # and average the top 30% most suspicious frames so real frames don't dilute the deepfake signal.
368
  frame_results.sort(key=lambda r: r.get("fake_prob", 0.0), reverse=True)
369
  top_k = max(1, len(frame_results) // 3)
370
  top_results = frame_results[:top_k]
371
-
372
  avg_fake_prob = sum(r.get("fake_prob", 0.0) for r in top_results) / top_k
373
  avg_real_prob = sum(r.get("real_prob", 0.0) for r in top_results) / top_k
374
-
375
- # Aggregate individual model scores based on the top_k frames
376
  aggregated_scores = {}
377
  if top_results and "scores" in top_results[0]:
378
  for model_key in top_results[0]["scores"].keys():
379
- avg_score = sum(r["scores"].get(model_key, 0.0) for r in top_results) / top_k
380
- aggregated_scores[model_key] = avg_score
381
-
382
  final_label = "FAKE" if avg_fake_prob >= detector.ensemble.fake_threshold else "REAL"
383
-
384
- # Helper to return base64 images
385
  def img_to_b64(pil_img):
386
- import io
387
  if not pil_img: return None
 
388
  buf = io.BytesIO()
389
  pil_img.save(buf, format="PNG")
390
  return base64.b64encode(buf.getvalue()).decode('utf-8')
391
-
392
- explanation = (
393
- f"Analyzed {len(frames)} frames extracted from the video. "
394
- f"The final verdict is based on the most suspicious {top_k} frame(s) to prevent authentic segments from hiding brief deepfake manipulations.\n"
395
- f"The absolute most suspicious frame exhibited a {max_fake_prob*100:.1f}% probability of manipulation. "
396
- )
397
-
398
  if most_suspicious_result and most_suspicious_result.get("explanation"):
399
- explanation += "\n\nDetector logic for the most suspicious frame:\n" + most_suspicious_result["explanation"]
400
-
401
- logging.getLogger().info(f"Video Check Complete. Label: {final_label}, Fake Prob: {avg_fake_prob:.2f}")
402
-
403
  return jsonify({
404
- "success": True,
405
- "label": final_label,
406
- "fake_prob": avg_fake_prob,
407
- "real_prob": avg_real_prob,
408
- "scores": aggregated_scores,
409
- "explanation": explanation,
410
  "gradcam_b64": img_to_b64(most_suspicious_visuals.get("gradcam") if most_suspicious_visuals else None),
411
  "fft_b64": img_to_b64(most_suspicious_visuals.get("fft_spectrum") if most_suspicious_visuals else None),
412
  })
413
-
414
  except Exception as e:
415
  import traceback
416
  logging.getLogger().error("Video Auth error: " + traceback.format_exc())
417
  return jsonify({"success": False, "error": "Model analysis failed. " + str(e)}), 500
418
 
419
-
420
  @app.route('/results')
421
  @jwt_required()
422
  def results():
423
- result = session.get('last_result')
424
- if not result:
425
- return redirect(url_for('index'))
426
- return render_template('results.html', result=result)
427
-
428
 
429
  @app.route('/history')
430
  @jwt_required()
@@ -432,22 +333,18 @@ def history():
432
  records = get_user_history(get_jwt_identity(), limit=50)
433
  return render_template('history.html', records=records)
434
 
435
-
436
  @app.route('/history/delete/<item_id>', methods=['POST'])
437
  @jwt_required()
438
  def delete_history(item_id):
439
  delete_history_item(get_jwt_identity(), item_id)
440
  return redirect(url_for('history'))
441
 
442
-
443
  @app.route('/history/clear', methods=['POST'])
444
  @jwt_required()
445
  def clear_history():
446
  clear_user_history(get_jwt_identity())
447
  return redirect(url_for('history'))
448
 
449
-
450
-
451
  @app.route('/api/suggested_facts')
452
  def suggested_facts():
453
  import random
@@ -455,8 +352,7 @@ def suggested_facts():
455
  facts = random.sample(KNOWLEDGE_BASE, min(3, len(KNOWLEDGE_BASE)))
456
  return jsonify({"success": True, "facts": [f["text"] for f in facts]})
457
 
458
-
459
- # ── ADMIN ROUTES (God Mode) ──────────────────────────────────────────────────
460
  from functools import wraps
461
  def admin_required(fn):
462
  @wraps(fn)
@@ -493,27 +389,21 @@ def admin_logs():
493
  history = get_global_history(limit=500)
494
  return render_template('admin_logs.html', history=history)
495
 
496
-
497
- # ── JSON APIs for Next.js frontend ──────────────────────────────────────────
498
  @app.route('/me')
499
  @jwt_required()
500
  def me():
501
- """Return basic user identity for the Next.js frontend."""
502
  return jsonify({"user_id": g.user_id, "username": g.username, "is_admin": g.is_admin})
503
 
504
  @app.route('/history/json')
505
  @jwt_required()
506
  def history_json():
507
- """Return user history as JSON for the Next.js frontend."""
508
  records = get_user_history(g.user_id)
509
  return jsonify({"records": [
510
  {
511
- "_id": str(r["_id"]),
512
- "claim": r.get("claim", ""),
513
- "verdict": r.get("verdict", ""),
514
- "confidence": r.get("confidence", 0.0),
515
- "evidence_count": r.get("evidence_count", 0),
516
- "created_at": r["created_at"].isoformat() if r.get("created_at") else "",
517
  } for r in records
518
  ]})
519
 
@@ -521,37 +411,22 @@ def history_json():
521
  @jwt_required()
522
  @admin_required
523
  def admin_data():
524
- """Aggregate endpoint for Next.js admin dashboard."""
525
  from project.database import get_system_stats, get_global_history, list_all_users
526
- stats = get_system_stats()
527
  history = get_global_history(limit=20)
528
- users = list_all_users(limit=10)
529
-
530
  def fmt_hist(h):
531
  return {
532
- "_id": str(h.get("_id", "")),
533
- "username": h.get("username", ""),
534
- "claim": h.get("claim", ""),
535
- "verdict": h.get("verdict", ""),
536
- "confidence": h.get("confidence", 0.0),
537
- "evidence_count": h.get("evidence_count", 0),
538
- "created_at": h["created_at"].isoformat() if h.get("created_at") else "",
539
  }
540
-
541
  def fmt_user(u):
542
  return {
543
- "_id": str(u.get("_id", "")),
544
- "username": u.get("username", ""),
545
- "email": u.get("email", ""),
546
- "is_admin": u.get("is_admin", False),
547
- "created_at": u["created_at"].isoformat() if u.get("created_at") else "",
548
  }
549
-
550
- return jsonify({
551
- "stats": stats,
552
- "history": [fmt_hist(h) for h in history],
553
- "users": [fmt_user(u) for u in users],
554
- })
555
 
556
  @app.route('/admin/logs/json')
557
  @jwt_required()
@@ -561,13 +436,9 @@ def admin_logs_json():
561
  history = get_global_history(limit=500)
562
  return jsonify({"history": [
563
  {
564
- "_id": str(h.get("_id", "")),
565
- "username": h.get("username", ""),
566
- "claim": h.get("claim", ""),
567
- "verdict": h.get("verdict", ""),
568
- "confidence": h.get("confidence", 0.0),
569
- "evidence_count": h.get("evidence_count", 0),
570
- "created_at": h["created_at"].isoformat() if h.get("created_at") else "",
571
  } for h in history
572
  ]})
573
 
@@ -579,35 +450,29 @@ def admin_users_json():
579
  users = list_all_users(limit=500)
580
  return jsonify({"users": [
581
  {
582
- "_id": str(u.get("_id", "")),
583
- "username": u.get("username", ""),
584
- "email": u.get("email", ""),
585
- "is_admin": u.get("is_admin", False),
586
- "created_at": u["created_at"].isoformat() if u.get("created_at") else "",
587
  } for u in users
588
  ]})
589
 
590
- # ── Error handlers ────────────────────────────────────────────────────────────
591
  @app.errorhandler(404)
592
- def not_found(e):
593
- return render_template('index.html'), 404
594
 
595
  @app.errorhandler(500)
596
- def internal_error(e):
597
- return jsonify({"success": False, "error": "Internal server error"}), 500
598
-
599
 
600
  @app.route('/emergency-reset')
601
  def emergency_reset():
602
  from flask_bcrypt import Bcrypt
603
  from project.database import get_db
604
  from project.config import BCRYPT_PEPPER
605
- bc = Bcrypt()
606
- email = "prag@proofly.co.in"
607
- peppered = "Admin2026!" + BCRYPT_PEPPER
608
- pw_hash = bc.generate_password_hash(peppered).decode('utf-8')
609
- get_db().users.update_one({"email": email}, {"$set": {"password_hash": pw_hash, "is_admin": True}})
610
- return "Admin reset to prag@proofly.co.in : Admin2026!"
 
611
 
612
  if __name__ == '__main__':
613
  app.run(debug=True, host='0.0.0.0', port=5000)
 
27
  app.secret_key = os.environ.get("FLASK_SECRET_KEY", "dev-insecure-key")
28
 
29
  # ── Privacy-safe logging ──────────────────────────────────────────────────────
 
30
  import re as _re
31
 
32
  class _PrivacyFilter(logging.Filter):
 
86
  app,
87
  force_https=False, # set True behind a TLS proxy in production
88
  strict_transport_security=False,
 
 
 
89
  content_security_policy=False,
90
  referrer_policy='strict-origin-when-cross-origin',
91
  feature_policy={},
 
138
  if not claim:
139
  return jsonify({"success": False, "error": "Claim cannot be empty"}), 400
140
 
 
141
  result = get_cached_result(claim)
 
142
  if not result:
 
143
  result = run_fact_check_api(claim)
 
 
144
  if result.get("success"):
145
  save_cached_result(claim, result)
146
 
 
147
  if result.get("success"):
148
  save_history(
149
  user_id = get_jwt_identity(),
 
182
  return jsonify({"success": False, "error": "Could not process image."}), 500
183
 
184
 
 
185
  # ── Image Authenticity ────────────────────────────────────────────────────────
186
  _image_detector = None
187
  _video_detector = None
 
190
  global _image_detector
191
  if _image_detector is None:
192
  from image_authenticity.detector import ImageAuthenticityDetector
 
193
  _image_detector = ImageAuthenticityDetector()
194
  return _image_detector
195
 
 
197
  global _video_detector
198
  if _video_detector is None:
199
  from image_authenticity.detector import ImageAuthenticityDetector
 
 
 
200
  video_weights = {
201
+ "hf_primary": 0.00,
202
+ "hf_secondary": 0.08,
203
+ "clip": 0.62,
204
+ "frequency": 0.30,
205
+ "cnn": 0.00
206
  }
207
  _video_detector = ImageAuthenticityDetector(
208
  ensemble_weights=video_weights,
209
+ fake_threshold=0.65
210
  )
211
  return _video_detector
212
 
 
218
  file = request.files['image']
219
  if file.filename == '':
220
  return jsonify({"success": False, "error": "No file selected"}), 400
 
221
  try:
222
  from PIL import Image
223
  import io
224
  import base64
 
225
  img = Image.open(io.BytesIO(file.read())).convert('RGB')
 
226
  detector = get_image_detector()
227
+ result, visuals = detector.predict_with_visuals(img, include_gradcam=True, include_fft=True, include_result_card=False)
 
 
 
 
 
 
228
  def img_to_b64(pil_img):
229
  if not pil_img: return None
230
  buf = io.BytesIO()
231
  pil_img.save(buf, format="PNG")
232
  return base64.b64encode(buf.getvalue()).decode('utf-8')
 
 
 
 
233
  return jsonify({
234
  "success": True,
235
  "label": result["label"],
 
254
  file = request.files['video']
255
  if file.filename == '':
256
  return jsonify({"success": False, "error": "No file selected"}), 400
 
257
  try:
258
+ import os, tempfile, base64, uuid
 
 
 
259
  from image_authenticity.utils.video import extract_frames
260
+ MAX_SIZE = 20 * 1024 * 1024
 
 
 
 
 
 
 
 
 
 
261
  detector = get_video_detector()
 
 
262
  with tempfile.TemporaryDirectory() as temp_dir:
263
  temp_path = os.path.join(temp_dir, f"upload_{uuid.uuid4().hex}_{file.filename}")
 
 
264
  file.seek(0)
265
  bytes_saved = 0
266
  with open(temp_path, 'wb') as f:
267
  while True:
268
  chunk = file.read(8192)
269
+ if not chunk: break
 
270
  f.write(chunk)
271
  bytes_saved += len(chunk)
272
  if bytes_saved > MAX_SIZE:
273
+ return jsonify({"success": False, "error": "Video exceeds limit of 20MB."}), 400
 
 
 
 
274
  try:
275
  frames = extract_frames(temp_path, num_frames=10)
276
  except Exception as e:
277
+ return jsonify({"success": False, "error": "Could not read video. " + str(e)}), 400
 
 
278
  if not frames:
279
+ return jsonify({"success": False, "error": "No valid frames found."}), 400
 
 
 
280
  frame_results = []
281
  max_fake_prob = -1
282
  most_suspicious_visuals = None
283
  most_suspicious_result = None
284
+ for frame in frames:
 
 
 
 
 
285
  res = detector.predict(frame)
 
286
  f_prob = res.get("fake_prob", 0.0)
 
 
287
  if f_prob > max_fake_prob:
288
  max_fake_prob = f_prob
289
+ res_visual, vis = detector.predict_with_visuals(frame, include_gradcam=True, include_fft=True, include_result_card=False)
 
 
 
 
 
 
290
  most_suspicious_visuals = vis
291
  most_suspicious_result = res_visual
 
292
  frame_results.append(res)
 
 
 
 
293
  frame_results.sort(key=lambda r: r.get("fake_prob", 0.0), reverse=True)
294
  top_k = max(1, len(frame_results) // 3)
295
  top_results = frame_results[:top_k]
 
296
  avg_fake_prob = sum(r.get("fake_prob", 0.0) for r in top_results) / top_k
297
  avg_real_prob = sum(r.get("real_prob", 0.0) for r in top_results) / top_k
 
 
298
  aggregated_scores = {}
299
  if top_results and "scores" in top_results[0]:
300
  for model_key in top_results[0]["scores"].keys():
301
+ aggregated_scores[model_key] = sum(r["scores"].get(model_key, 0.0) for r in top_results) / top_k
 
 
302
  final_label = "FAKE" if avg_fake_prob >= detector.ensemble.fake_threshold else "REAL"
 
 
303
  def img_to_b64(pil_img):
 
304
  if not pil_img: return None
305
+ import io
306
  buf = io.BytesIO()
307
  pil_img.save(buf, format="PNG")
308
  return base64.b64encode(buf.getvalue()).decode('utf-8')
309
+ explanation = f"Analyzed {len(frames)} frames. Verdict based on top {top_k} suspicious frames."
 
 
 
 
 
 
310
  if most_suspicious_result and most_suspicious_result.get("explanation"):
311
+ explanation += "\n\n" + most_suspicious_result["explanation"]
 
 
 
312
  return jsonify({
313
+ "success": True, "label": final_label, "fake_prob": avg_fake_prob, "real_prob": avg_real_prob,
314
+ "scores": aggregated_scores, "explanation": explanation,
 
 
 
 
315
  "gradcam_b64": img_to_b64(most_suspicious_visuals.get("gradcam") if most_suspicious_visuals else None),
316
  "fft_b64": img_to_b64(most_suspicious_visuals.get("fft_spectrum") if most_suspicious_visuals else None),
317
  })
 
318
  except Exception as e:
319
  import traceback
320
  logging.getLogger().error("Video Auth error: " + traceback.format_exc())
321
  return jsonify({"success": False, "error": "Model analysis failed. " + str(e)}), 500
322
 
 
323
  @app.route('/results')
324
  @jwt_required()
325
  def results():
326
+ res = session.get('last_result')
327
+ if not res: return redirect(url_for('index'))
328
+ return render_template('results.html', result=res)
 
 
329
 
330
  @app.route('/history')
331
  @jwt_required()
 
333
  records = get_user_history(get_jwt_identity(), limit=50)
334
  return render_template('history.html', records=records)
335
 
 
336
  @app.route('/history/delete/<item_id>', methods=['POST'])
337
  @jwt_required()
338
  def delete_history(item_id):
339
  delete_history_item(get_jwt_identity(), item_id)
340
  return redirect(url_for('history'))
341
 
 
342
  @app.route('/history/clear', methods=['POST'])
343
  @jwt_required()
344
  def clear_history():
345
  clear_user_history(get_jwt_identity())
346
  return redirect(url_for('history'))
347
 
 
 
348
  @app.route('/api/suggested_facts')
349
  def suggested_facts():
350
  import random
 
352
  facts = random.sample(KNOWLEDGE_BASE, min(3, len(KNOWLEDGE_BASE)))
353
  return jsonify({"success": True, "facts": [f["text"] for f in facts]})
354
 
355
+ # ── ADMIN ROUTES ─────────────────────────────────────────────────────────────
 
356
  from functools import wraps
357
  def admin_required(fn):
358
  @wraps(fn)
 
389
  history = get_global_history(limit=500)
390
  return render_template('admin_logs.html', history=history)
391
 
392
+ # ── JSON APIs ────────────────────────────────────────────────────────────────
 
393
  @app.route('/me')
394
  @jwt_required()
395
  def me():
 
396
  return jsonify({"user_id": g.user_id, "username": g.username, "is_admin": g.is_admin})
397
 
398
  @app.route('/history/json')
399
  @jwt_required()
400
  def history_json():
 
401
  records = get_user_history(g.user_id)
402
  return jsonify({"records": [
403
  {
404
+ "_id": str(r["_id"]), "claim": r.get("claim", ""), "verdict": r.get("verdict", ""),
405
+ "confidence": r.get("confidence", 0.0), "evidence_count": r.get("evidence_count", 0),
406
+ "created_at": r["created_at"].isoformat() if r.get("created_at") else "",
 
 
 
407
  } for r in records
408
  ]})
409
 
 
411
  @jwt_required()
412
  @admin_required
413
  def admin_data():
 
414
  from project.database import get_system_stats, get_global_history, list_all_users
415
+ stats = get_system_stats()
416
  history = get_global_history(limit=20)
417
+ users = list_all_users(limit=10)
 
418
  def fmt_hist(h):
419
  return {
420
+ "_id": str(h.get("_id", "")), "username": h.get("username", ""), "claim": h.get("claim", ""),
421
+ "verdict": h.get("verdict", ""), "confidence": h.get("confidence", 0.0),
422
+ "evidence_count": h.get("evidence_count", 0), "created_at": h["created_at"].isoformat() if h.get("created_at") else "",
 
 
 
 
423
  }
 
424
  def fmt_user(u):
425
  return {
426
+ "_id": str(u.get("_id", "")), "username": u.get("username", ""), "email": u.get("email", ""),
427
+ "is_admin": u.get("is_admin", False), "created_at": u["created_at"].isoformat() if u.get("created_at") else "",
 
 
 
428
  }
429
+ return jsonify({"stats": stats, "history": [fmt_hist(h) for h in history], "users": [fmt_user(u) for u in users]})
 
 
 
 
 
430
 
431
  @app.route('/admin/logs/json')
432
  @jwt_required()
 
436
  history = get_global_history(limit=500)
437
  return jsonify({"history": [
438
  {
439
+ "_id": str(h.get("_id", "")), "username": h.get("username", ""), "claim": h.get("claim", ""),
440
+ "verdict": h.get("verdict", ""), "confidence": h.get("confidence", 0.0),
441
+ "evidence_count": h.get("evidence_count", 0), "created_at": h["created_at"].isoformat() if h.get("created_at") else "",
 
 
 
 
442
  } for h in history
443
  ]})
444
 
 
450
  users = list_all_users(limit=500)
451
  return jsonify({"users": [
452
  {
453
+ "_id": str(u.get("_id", "")), "username": u.get("username", ""), "email": u.get("email", ""),
454
+ "is_admin": u.get("is_admin", False), "created_at": u["created_at"].isoformat() if u.get("created_at") else "",
 
 
 
455
  } for u in users
456
  ]})
457
 
 
458
  @app.errorhandler(404)
459
+ def not_found(e): return render_template('index.html'), 404
 
460
 
461
  @app.errorhandler(500)
462
+ def internal_error(e): return jsonify({"success": False, "error": "Internal server error"}), 500
 
 
463
 
464
  @app.route('/emergency-reset')
465
  def emergency_reset():
466
  from flask_bcrypt import Bcrypt
467
  from project.database import get_db
468
  from project.config import BCRYPT_PEPPER
469
+ from datetime import datetime, timezone
470
+ bc = Bcrypt(); db = get_db(); email = "prag@proofly.co.in"; password = "admin123"
471
+ pepper_status = "DEFAULT" if BCRYPT_PEPPER == "change-this-pepper" else "CUSTOM SET"
472
+ db.users.delete_one({"email": email})
473
+ pw_hash = bc.generate_password_hash(password + BCRYPT_PEPPER).decode('utf-8')
474
+ db.users.insert_one({"username": "Admin", "email": email, "password_hash": pw_hash, "is_admin": True, "created_at": datetime.now(timezone.utc)})
475
+ return f"Admin Force-Reset! Email: {email} | Password: {password} | Pepper: {pepper_status} | DB: {db.name}"
476
 
477
  if __name__ == '__main__':
478
  app.run(debug=True, host='0.0.0.0', port=5000)