Spaces:
Sleeping
Sleeping
| import os | |
| import tempfile | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| from app.core.config import VIDEO_ENSEMBLE | |
| from app.pipelines.image import _run_ensemble | |
| # Number of frames sampled per video | |
| MAX_FRAMES = 16 | |
| def _extract_frames(video_path: str, n: int = MAX_FRAMES) -> list[Image.Image]: | |
| """Extract n evenly-spaced frames from a video file.""" | |
| cap = cv2.VideoCapture(video_path) | |
| total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| if total <= 0: | |
| cap.release() | |
| raise ValueError("Impossible de lire les frames de la vidéo.") | |
| indices = np.linspace(0, total - 1, min(n, total), dtype=int) | |
| frames = [] | |
| for idx in indices: | |
| cap.set(cv2.CAP_PROP_POS_FRAMES, int(idx)) | |
| ret, frame = cap.read() | |
| if ret: | |
| rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| frames.append(Image.fromarray(rgb)) | |
| cap.release() | |
| return frames | |
| def run(video_bytes: bytes) -> dict: | |
| """ | |
| Analyze a video for deepfake content. | |
| Samples MAX_FRAMES frames evenly across the video, | |
| runs the image ensemble on each, then aggregates. | |
| """ | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tf: | |
| tf.write(video_bytes) | |
| tmp_path = tf.name | |
| try: | |
| frames = _extract_frames(tmp_path) | |
| finally: | |
| if os.path.exists(tmp_path): | |
| os.remove(tmp_path) | |
| if not frames: | |
| raise ValueError("Aucune frame exploitable extraite de la vidéo.") | |
| # Run ensemble on each frame | |
| frame_scores = [] | |
| per_model_scores: dict[str, list[float]] = {} | |
| for i, frame in enumerate(frames): | |
| result = _run_ensemble(frame, VIDEO_ENSEMBLE) | |
| frame_scores.append(result["ensemble_score"]) | |
| for key, data in result["models"].items(): | |
| per_model_scores.setdefault(key, []).append(data["score"]) | |
| print(f" Frame {i + 1}/{len(frames)} → score={result['ensemble_score']:.4f}") | |
| scores_arr = np.array(frame_scores) | |
| fake_prob = float(np.mean(scores_arr)) | |
| high_ratio = float(np.mean(scores_arr > 0.65)) | |
| # Boost when most frames agree on deepfake | |
| if high_ratio > 0.60: | |
| fake_prob = min(fake_prob * 1.10, 1.0) | |
| fake_prob = round(fake_prob, 4) | |
| model_summary = { | |
| key: round(float(np.mean(v)), 4) | |
| for key, v in per_model_scores.items() | |
| } | |
| if fake_prob > 0.65: | |
| verdict = "DEEPFAKE" | |
| confidence = "haute" if fake_prob > 0.85 else "moyenne" | |
| reason = "Artefacts de synthèse détectés sur plusieurs frames." | |
| elif fake_prob < 0.35: | |
| verdict = "AUTHENTIQUE" | |
| confidence = "haute" if fake_prob < 0.15 else "moyenne" | |
| reason = "Aucun artefact de synthèse détecté." | |
| else: | |
| verdict = "INDÉTERMINÉ" | |
| confidence = "faible" | |
| reason = "Signal ambigu, les frames présentent des résultats mixtes." | |
| return { | |
| "verdict": verdict, | |
| "confidence": confidence, | |
| "reason": reason, | |
| "fake_prob": fake_prob, | |
| "real_prob": round(1.0 - fake_prob, 4), | |
| "frames_analyzed": len(frames), | |
| "suspicious_frames_ratio": round(high_ratio, 4), | |
| "models": model_summary, | |
| } | |