File size: 18,448 Bytes
6d18217
 
 
 
 
 
 
 
 
 
 
f15f315
 
6d18217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa8f25d
 
 
 
 
 
 
6d18217
 
 
 
 
aa8f25d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d18217
 
 
 
 
aa8f25d
6d18217
aa8f25d
 
6d18217
 
 
 
 
 
aa8f25d
 
 
 
 
 
 
 
 
 
 
 
 
 
6d18217
 
aa8f25d
6d18217
 
 
 
 
aa8f25d
6d18217
 
aa8f25d
6d18217
 
 
 
 
 
 
 
aa8f25d
 
6d18217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa8f25d
6d18217
 
aa8f25d
6d18217
 
aa8f25d
6d18217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa8f25d
 
 
 
 
 
 
 
 
6d18217
 
aa8f25d
 
 
6d18217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
import os, json, logging, time, base64, gc, asyncio, concurrent.futures
import cv2, numpy as np, torch
from pathlib import Path
from typing import List, Dict, Any, Optional, AsyncGenerator
from collections import Counter
from dataclasses import dataclass
from dotenv import load_dotenv

load_dotenv()

# Configuration
DEEPSEEK_API_URL = "https://ds2api-tau-woad.vercel.app/v1/chat/completions"
DEEPSEEK_API_KEY = "sk-ds2api-key-1-your-custom-key"
DEEPSEEK_MODEL = "deepseek-chat"
BASE_DIR = Path("video_analysis_pro")
OUTPUT_DIR, CACHE_DIR, REPORTS_DIR = BASE_DIR/"output", BASE_DIR/"cache", BASE_DIR/"reports"
for d in [BASE_DIR, OUTPUT_DIR, CACHE_DIR, REPORTS_DIR]: d.mkdir(parents=True, exist_ok=True)

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("ZenithEngine")

# Tools Availability
try:
    from ultralytics import YOLO
    YOLO_AVAILABLE = True
except ImportError:
    YOLO_AVAILABLE = False

try:
    from faster_whisper import WhisperModel
    WHISPER_AVAILABLE = True
except ImportError:
    WHISPER_AVAILABLE = False

@dataclass
class Frame:
    path: Path
    timestamp: float
    metrics: Dict[str, float] = None
    vision_content: str = ""

class DeepSeekClient:
    def __init__(self):
        self.api_url = DEEPSEEK_API_URL
        self.api_key = DEEPSEEK_API_KEY
        logger.info(f"✅ DeepSeek Client initialisé avec l'URL : {self.api_url}")

    async def stream_content(self, model: str, messages: List[Dict[str, Any]], options: Dict[str, Any]) -> AsyncGenerator[Dict[str, Any], None]:
        # Convertir les messages au format OpenAI compatible
        formatted_messages = []
        for msg in messages:
            role = msg["role"]
            content = msg.get("content", "")

            # Si le contenu contient des images, on les convertit en format texte + images
            if isinstance(content, list):
                text_parts = []
                image_parts = []
                for part in content:
                    if part["type"] == "text":
                        text_parts.append(part["text"])
                    elif part["type"] == "image_url":
                        url = part["image_url"]["url"]
                        if url.startswith("data:"):
                            image_parts.append({"type": "image_url", "image_url": {"url": url}})

                # DeepSeek supporte le format OpenAI vision
                if image_parts:
                    formatted_messages.append({
                        "role": role,
                        "content": [{"type": "text", "text": " ".join(text_parts)}] + image_parts
                    })
                else:
                    formatted_messages.append({"role": role, "content": " ".join(text_parts)})
            else:
                formatted_messages.append({"role": role, "content": content})

        payload = {
            "model": model,
            "messages": formatted_messages,
            "temperature": options.get("temperature", 0.7),
            "stream": True
        }

        import httpx
        async with httpx.AsyncClient(timeout=None) as client:
            try:
                async with client.stream(
                    "POST", self.api_url,
                    headers={
                        "Authorization": f"Bearer {self.api_key}",
                        "Content-Type": "application/json"
                    },
                    json=payload
                ) as response:
                    if response.status_code != 200:
                        error_text = await response.aread()
                        logger.error(f"❌ Erreur DeepSeek API (HTTP {response.status_code}): {error_text.decode()}")
                        yield {"error": f"Erreur API DeepSeek: {response.status_code}"}
                        return

                    async for line in response.aiter_lines():
                        if line.startswith("data: "):
                            data_str = line[6:]
                            if data_str.strip() == "[DONE]":
                                break
                            try:
                                data = json.loads(data_str)
                                # Format OpenAI streaming response
                                if "choices" in data and len(data["choices"]) > 0:
                                    delta = data["choices"][0].get("delta", {})
                                    content = delta.get("content", "")
                                    if content:
                                        # Convertir au format attendu par le frontend
                                        yield {
                                            "response": {
                                                "candidates": [{
                                                    "content": {
                                                        "parts": [{"text": content}]
                                                    }
                                                }]
                                            }
                                        }
                            except json.JSONDecodeError:
                                continue
            except Exception as e:
                logger.error(f"❌ Erreur lors du streaming DeepSeek : {str(e)}")
                yield {"error": str(e)}

class VideoProcessor:
    @staticmethod
    def get_frame_metrics(frame: np.ndarray) -> dict:
        try:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            return {"brightness": float(np.mean(gray)), "contrast": float(np.std(gray)),
                    "saturation": float(np.mean(hsv[:, :, 1])), "sharpness": float(cv2.Laplacian(gray, cv2.CV_64F).var())}
        except: return {"brightness": 0, "contrast": 0, "saturation": 0, "sharpness": 0}

    def __init__(self, video_path: Path, output_dir: Path):
        self.video_path, self.output_dir = video_path, output_dir
        self.output_dir.mkdir(parents=True, exist_ok=True)

    def extract_keyframes(self, max_frames: int = 30) -> List[Frame]:
        """

        Extraction intelligente de keyframes avec échantillonnage adaptatif.

        - Vidéos courtes (<2min) : 1 frame toutes les 3-4s

        - Vidéos moyennes (2-10min) : 1 frame toutes les 10-15s

        - Vidéos longues (>10min) : 1 frame toutes les 20-30s

        """
        try:
            from decord import VideoReader, cpu
            vr = VideoReader(str(self.video_path), ctx=cpu(0))
            total = len(vr)
            fps = vr.get_avg_fps()
            duration_seconds = total / fps

            # Échantillonnage adaptatif basé sur la durée
            if duration_seconds < 120:  # < 2 minutes
                target_interval = 3  # 1 frame toutes les 3 secondes
            elif duration_seconds < 600:  # 2-10 minutes
                target_interval = 12  # 1 frame toutes les 12 secondes
            else:  # > 10 minutes
                target_interval = 25  # 1 frame toutes les 25 secondes

            # Calculer le nombre de frames optimal
            optimal_frames = min(int(duration_seconds / target_interval), max_frames)
            optimal_frames = max(optimal_frames, 10)  # Minimum 10 frames

            step = max(1, total // optimal_frames)
            indices = range(0, total, step)[:optimal_frames]
            frames_data = vr.get_batch(indices).asnumpy()

            extracted = []
            for i, idx in enumerate(indices):
                img = cv2.cvtColor(frames_data[i], cv2.COLOR_RGB2BGR)
                ts = idx / fps
                p = self.output_dir / f"f_{idx}.jpg"
                cv2.imwrite(str(p), img, [cv2.IMWRITE_JPEG_QUALITY, 70])
                extracted.append(Frame(path=p, timestamp=ts, metrics=self.get_frame_metrics(img)))

            logger.info(f"✅ Extraction adaptative : {len(extracted)} frames pour {duration_seconds:.1f}s de vidéo (1 frame/{target_interval}s)")
            return extracted
        except Exception as e:
            logger.warning(f"Decord failed, fallback to CV2: {e}")
            cap = cv2.VideoCapture(str(self.video_path))
            fps = cap.get(cv2.CAP_PROP_FPS) or 30.0
            total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 1000
            duration_seconds = total / fps

            # Même logique adaptative pour le fallback CV2
            if duration_seconds < 120:
                target_interval = 3
            elif duration_seconds < 600:
                target_interval = 12
            else:
                target_interval = 25

            optimal_frames = min(int(duration_seconds / target_interval), max_frames)
            optimal_frames = max(optimal_frames, 10)

            step = max(1, total // optimal_frames)
            extracted = []
            for idx in range(0, total, step):
                if len(extracted) >= optimal_frames: break
                cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
                ret, img = cap.read()
                if ret:
                    ts = idx / fps
                    p = self.output_dir / f"f_{idx}.jpg"
                    cv2.imwrite(str(p), img, [cv2.IMWRITE_JPEG_QUALITY, 70])
                    extracted.append(Frame(path=p, timestamp=ts, metrics=self.get_frame_metrics(img)))
            cap.release()
            logger.info(f"✅ Extraction CV2 adaptative : {len(extracted)} frames pour {duration_seconds:.1f}s de vidéo")
            return extracted

class AudioProcessor:
    def __init__(self): self.model = None
    def initialize(self):
        if WHISPER_AVAILABLE and self.model is None:
            try:
                device = "cuda" if torch.cuda.is_available() else "cpu"
                # Utiliser tiny au lieu de base pour plus de rapidité
                self.model = WhisperModel("tiny", device=device, compute_type="int8")
            except: pass
    def transcribe(self, p: Path) -> str:
        self.initialize()
        if not self.model: return "Transcription indisponible"
        try:
            segments, info = self.model.transcribe(str(p), beam_size=5)
            transcript = " ".join([s.text for s in segments])
            return f"[Langue source détectée: {info.language.upper()}] {transcript}"
        except: return "Erreur transcription"

class VideoDownloader:
    @staticmethod
    def download(url: str, output_dir: Path) -> Optional[Path]:
        import yt_dlp
        ydl_opts = {
            'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
            'outtmpl': str(output_dir / 'downloaded_video.%(ext)s'),
            'noplaylist': True, 'quiet': True, 'no_warnings': True, 'nocheckcertificate': True,
            'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'referer': 'https://www.google.com/',
            'http_headers': {'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.9'}
        }
        try:
            with yt_dlp.YoutubeDL(ydl_opts) as ydl:
                info = ydl.extract_info(url, download=True)
                return Path(ydl.prepare_filename(info))
        except: return None

class ZenithAnalyzer:
    def __init__(self):
        self.deepseek = DeepSeekClient()
        self.audio_proc = AudioProcessor()
        self.yolo = YOLO("yolov8n.pt") if YOLO_AVAILABLE else None

    async def extract_frames_only(self, video_path: Path, session_id: str) -> List[str]:
        session_dir = OUTPUT_DIR / f"session_{session_id}"
        session_dir.mkdir(parents=True, exist_ok=True)
        proc = VideoProcessor(video_path, session_dir)
        frames = proc.extract_keyframes()
        return [f"/output/session_{session_id}/{f.path.name}" for f in frames[:12]]

    async def run_full_analysis(self, video_path: Path, session_id: str, custom_prompt: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
        session_dir = OUTPUT_DIR / f"session_{session_id}"
        session_dir.mkdir(parents=True, exist_ok=True)
        cache_file = session_dir / "analysis_cache.json"

        # Optimisation : Ne pas ré-extraire si les frames existent déjà
        existing_frames = list(session_dir.glob("f_*.jpg"))
        if not existing_frames:
            yield {"status": "sampling", "message": "Analyse des séquences..."}
            proc = VideoProcessor(video_path, session_dir)
            frames = proc.extract_keyframes()
        else:
            def get_idx(p):
                try: return int(p.stem.split('_')[1])
                except: return 0
            existing_paths = sorted(existing_frames, key=get_idx)
            frames = []
            for p in existing_paths:
                img = cv2.imread(str(p))
                metrics = VideoProcessor.get_frame_metrics(img) if img is not None else {"brightness": 0, "contrast": 0, "saturation": 0, "sharpness": 0}
                frames.append(Frame(path=p, timestamp=0.0, metrics=metrics))
            yield {"status": "sampling", "message": "Récupération des séquences existantes..."}

        # Envoyer les chemins des images au frontend
        frame_urls = [f"/output/session_{session_id}/{f.path.name}" for f in frames[:12]]
        yield {"status": "frames_ready", "frames": frame_urls, "message": "Séquences prêtes."}

        # Vérifier si on a un cache pour l'audio et le visuel
        cached_data = {}
        if cache_file.exists():
            try:
                with open(cache_file, "r") as f:
                    cached_data = json.load(f)
                logger.info(f"✅ Cache trouvé pour la session {session_id}")
            except: pass

        if "transcript" in cached_data and "vision_info" in cached_data:
            transcript = cached_data["transcript"]
            v_info = cached_data["vision_info"]
            yield {"status": "fusion", "message": "Utilisation des données en cache..."}
        else:
            yield {"status": "audio", "message": "Traitement audio & visuel..."}
            loop = asyncio.get_event_loop()
            with concurrent.futures.ThreadPoolExecutor() as executor:
                audio_task = loop.run_in_executor(executor, self.audio_proc.transcribe, video_path)

                if self.yolo:
                    all_paths = [str(f.path) for f in frames]
                    batch_size = 20
                    for i in range(0, len(all_paths), batch_size):
                        batch = all_paths[i:i+batch_size]
                        results = await loop.run_in_executor(executor, lambda: self.yolo(batch, verbose=False, imgsz=256, stream=False))
                        for j, res in enumerate(results):
                            idx = i + j
                            objs = [res.names[int(b.cls[0])] for b in res.boxes if b.conf > 0.35]
                            ambiance = f"Ambiance: {'Sombre' if frames[idx].metrics['brightness'] < 50 else 'Lumineuse'}"
                            frames[idx].vision_content = f"{ambiance}, Objets: " + ", ".join([f"{v}x {k}" for k,v in Counter(objs).items()])

                transcript = await audio_task

            v_info = "\n".join([f"[{f.timestamp:.1f}s] {f.vision_content}" for f in frames[:40]])

            # Sauvegarder dans le cache
            try:
                with open(cache_file, "w") as f:
                    json.dump({"transcript": transcript, "vision_info": v_info}, f)
            except: pass

        yield {"status": "fusion", "message": "Intelligence Artificielle en action..."}

        # Utilisation du prompt personnalisé si fourni
        base_instruction = custom_prompt if custom_prompt else "Résumer et continuer l'analyse du média"

        prompt = f"""Tu es l'unité Zenith AI, un système d'analyse de données multimédias.

        INSTRUCTION UTILISATEUR : {base_instruction}



        DONNÉES D'ENTRÉE :

        - TRANSCRIPTION : {transcript}

        - DONNÉES VISUELLES : {v_info}



        Produis un rapport TECHNIQUE, FACTUEL et STRUCTURÉ en Markdown."""

        # Encodage parallèle des images - Sélection intelligente et équilibrée
        # On prend des images réparties uniformément sur toute la durée
        num_images_to_send = min(8, len(frames))  # Max 8 images pour l'IA
        if len(frames) > 0:
            step = max(1, len(frames) // num_images_to_send)
            selected_frames = [frames[i] for i in range(0, len(frames), step)][:num_images_to_send]
        else:
            selected_frames = []

        def encode_f(f):
            img = cv2.imread(str(f.path))
            # Redimensionner pour réduire la taille tout en gardant la qualité visuelle
            img = cv2.resize(img, (800, 450), interpolation=cv2.INTER_AREA)
            _, buf = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, 65])
            return {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64.b64encode(buf).decode()}"}}

        with concurrent.futures.ThreadPoolExecutor() as executor:
            images = list(executor.map(encode_f, selected_frames))

        messages = [{"role": "user", "content": [{"type": "text", "text": prompt}] + images}]

        yield {"status": "generating", "message": "Génération du rapport par l'IA..."}
        async for chunk in self.deepseek.stream_content(DEEPSEEK_MODEL, messages, {"temperature": 0.7}):
            if "error" in chunk:
                yield {"error": chunk["error"]}
                break
            resp = chunk.get("response", {})
            candidates = resp.get("candidates", [])
            if candidates:
                for part in candidates[0].get("content", {}).get("parts", []):
                    text = part.get("text", "")
                    if text: yield {"status": "streaming", "text": text}

        # Cleanup
        gc.collect()
        if torch.cuda.is_available(): torch.cuda.empty_cache()
        yield {"status": "completed", "message": "Analyse terminée."}