Update cursor_tracker.py
Browse files- cursor_tracker.py +679 -601
cursor_tracker.py
CHANGED
|
@@ -1,601 +1,679 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import json
|
| 3 |
-
import requests
|
| 4 |
-
import subprocess
|
| 5 |
-
import shutil
|
| 6 |
-
import time
|
| 7 |
-
import re
|
| 8 |
-
import threading
|
| 9 |
-
from typing import Dict, List, Set, Optional
|
| 10 |
-
from huggingface_hub import HfApi, list_repo_files
|
| 11 |
-
|
| 12 |
-
import
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
import
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
os.
|
| 30 |
-
os.
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
#
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
#
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
"
|
| 63 |
-
"
|
| 64 |
-
"
|
| 65 |
-
"
|
| 66 |
-
"
|
| 67 |
-
"
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
""
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
def
|
| 81 |
-
"""Log
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
""
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
def
|
| 126 |
-
"""
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
return
|
| 174 |
-
|
| 175 |
-
def
|
| 176 |
-
"""
|
| 177 |
-
filename
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
#
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
log_message(f"
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
)
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
#
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
log_message(
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
"
|
| 593 |
-
"
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import requests
|
| 4 |
+
import subprocess
|
| 5 |
+
import shutil
|
| 6 |
+
import time
|
| 7 |
+
import re
|
| 8 |
+
import threading
|
| 9 |
+
from typing import Dict, List, Set, Optional
|
| 10 |
+
from huggingface_hub import HfApi, list_repo_files
|
| 11 |
+
from fastapi import FastAPI, File, UploadFile, Form
|
| 12 |
+
from fastapi.responses import JSONResponse
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
import smtplib
|
| 15 |
+
from email.message import EmailMessage
|
| 16 |
+
import tempfile
|
| 17 |
+
import rarfile
|
| 18 |
+
import zipfile
|
| 19 |
+
import cv2
|
| 20 |
+
import numpy as np
|
| 21 |
+
from PIL import Image
|
| 22 |
+
import torch
|
| 23 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 24 |
+
|
| 25 |
+
# Initialize FastAPI
|
| 26 |
+
app = FastAPI()
|
| 27 |
+
|
| 28 |
+
# ==== CONFIGURATION ====
|
| 29 |
+
HF_TOKEN = os.getenv("HF_TOKEN", "")
|
| 30 |
+
SOURCE_REPO_ID = os.getenv("SOURCE_REPO", "Fred808/BG1")
|
| 31 |
+
|
| 32 |
+
# Path Configuration
|
| 33 |
+
DOWNLOAD_FOLDER = "downloads"
|
| 34 |
+
EXTRACT_FOLDER = "extracted"
|
| 35 |
+
FRAMES_OUTPUT_FOLDER = "extracted_frames"
|
| 36 |
+
ANALYSIS_OUTPUT_FOLDER = "analysis_results"
|
| 37 |
+
|
| 38 |
+
os.makedirs(DOWNLOAD_FOLDER, exist_ok=True)
|
| 39 |
+
os.makedirs(EXTRACT_FOLDER, exist_ok=True)
|
| 40 |
+
os.makedirs(FRAMES_OUTPUT_FOLDER, exist_ok=True)
|
| 41 |
+
os.makedirs(ANALYSIS_OUTPUT_FOLDER, exist_ok=True)
|
| 42 |
+
|
| 43 |
+
# State Files
|
| 44 |
+
DOWNLOAD_STATE_FILE = "download_progress.json"
|
| 45 |
+
PROCESS_STATE_FILE = "process_progress.json"
|
| 46 |
+
FAILED_FILES_LOG = "failed_files.log"
|
| 47 |
+
|
| 48 |
+
# Processing Parameters
|
| 49 |
+
CHUNK_SIZE = 1
|
| 50 |
+
PROCESSING_DELAY = 2
|
| 51 |
+
MAX_RETRIES = 3
|
| 52 |
+
MIN_FREE_SPACE_GB = 2 # Minimum free space in GB before processing
|
| 53 |
+
|
| 54 |
+
# Frame Extraction Parameters
|
| 55 |
+
DEFAULT_FPS = 3 # Default frames per second for extraction
|
| 56 |
+
|
| 57 |
+
# Initialize HF API
|
| 58 |
+
hf_api = HfApi(token=HF_TOKEN)
|
| 59 |
+
|
| 60 |
+
# Global State
|
| 61 |
+
processing_status = {
|
| 62 |
+
"is_running": False,
|
| 63 |
+
"current_file": None,
|
| 64 |
+
"total_files": 0,
|
| 65 |
+
"processed_files": 0,
|
| 66 |
+
"failed_files": 0,
|
| 67 |
+
"extracted_courses": 0,
|
| 68 |
+
"extracted_videos": 0,
|
| 69 |
+
"extracted_frames_count": 0,
|
| 70 |
+
"analyzed_frames_count": 0,
|
| 71 |
+
"last_update": None,
|
| 72 |
+
"logs": []
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
# Load Florence-2 model (mock for demonstration)
|
| 76 |
+
# model_id = "microsoft/Florence-2-large"
|
| 77 |
+
# model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).eval().cuda()
|
| 78 |
+
# processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 79 |
+
|
| 80 |
+
def log_message(message: str):
|
| 81 |
+
"""Log messages with timestamp"""
|
| 82 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
| 83 |
+
log_entry = f"[{timestamp}] {message}"
|
| 84 |
+
print(log_entry)
|
| 85 |
+
processing_status["logs"].append(log_entry)
|
| 86 |
+
processing_status["last_update"] = timestamp
|
| 87 |
+
if len(processing_status["logs"]) > 100:
|
| 88 |
+
processing_status["logs"] = processing_status["logs"][-100:]
|
| 89 |
+
|
| 90 |
+
def log_failed_file(filename: str, error: str):
|
| 91 |
+
"""Log failed files to persistent file"""
|
| 92 |
+
with open(FAILED_FILES_LOG, "a") as f:
|
| 93 |
+
f.write(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {filename}: {error}\n")
|
| 94 |
+
|
| 95 |
+
def get_disk_usage(path: str) -> Dict[str, float]:
|
| 96 |
+
"""Get disk usage statistics in GB"""
|
| 97 |
+
statvfs = os.statvfs(path)
|
| 98 |
+
total = statvfs.f_frsize * statvfs.f_blocks / (1024**3)
|
| 99 |
+
free = statvfs.f_frsize * statvfs.f_bavail / (1024**3)
|
| 100 |
+
used = total - free
|
| 101 |
+
return {"total": total, "free": free, "used": used}
|
| 102 |
+
|
| 103 |
+
def check_disk_space(path: str = ".") -> bool:
|
| 104 |
+
"""Check if there's enough disk space"""
|
| 105 |
+
disk_info = get_disk_usage(path)
|
| 106 |
+
if disk_info["free"] < MIN_FREE_SPACE_GB:
|
| 107 |
+
log_message(f'β οΈ Low disk space: {disk_info["free"]:.2f}GB free, {disk_info["used"]:.2f}GB used')
|
| 108 |
+
return False
|
| 109 |
+
return True
|
| 110 |
+
|
| 111 |
+
def cleanup_temp_files():
|
| 112 |
+
"""Clean up temporary files to free space"""
|
| 113 |
+
log_message("π§Ή Cleaning up temporary files...")
|
| 114 |
+
|
| 115 |
+
# Clean old downloads (keep only current processing file)
|
| 116 |
+
current_file = processing_status.get("current_file")
|
| 117 |
+
for file in os.listdir(DOWNLOAD_FOLDER):
|
| 118 |
+
if file != current_file and file.endswith((".rar", ".zip")):
|
| 119 |
+
try:
|
| 120 |
+
os.remove(os.path.join(DOWNLOAD_FOLDER, file))
|
| 121 |
+
log_message(f"ποΈ Removed old download: {file}")
|
| 122 |
+
except:
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
def load_json_state(file_path: str, default_value):
|
| 126 |
+
"""Load state from JSON file"""
|
| 127 |
+
if os.path.exists(file_path):
|
| 128 |
+
try:
|
| 129 |
+
with open(file_path, "r") as f:
|
| 130 |
+
return json.load(f)
|
| 131 |
+
except json.JSONDecodeError:
|
| 132 |
+
log_message(f"β οΈ Corrupted state file: {file_path}")
|
| 133 |
+
return default_value
|
| 134 |
+
|
| 135 |
+
def save_json_state(file_path: str, data):
|
| 136 |
+
"""Save state to JSON file"""
|
| 137 |
+
with open(file_path, "w") as f:
|
| 138 |
+
json.dump(data, f, indent=2)
|
| 139 |
+
|
| 140 |
+
def download_with_retry(url: str, dest_path: str, max_retries: int = 3) -> bool:
|
| 141 |
+
"""Download file with retry logic and disk space checking"""
|
| 142 |
+
if not check_disk_space():
|
| 143 |
+
cleanup_temp_files()
|
| 144 |
+
if not check_disk_space():
|
| 145 |
+
log_message("β Insufficient disk space even after cleanup")
|
| 146 |
+
return False
|
| 147 |
+
|
| 148 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 149 |
+
for attempt in range(max_retries):
|
| 150 |
+
try:
|
| 151 |
+
with requests.get(url, headers=headers, stream=True) as r:
|
| 152 |
+
r.raise_for_status()
|
| 153 |
+
|
| 154 |
+
# Check content length if available
|
| 155 |
+
content_length = r.headers.get("content-length")
|
| 156 |
+
if content_length:
|
| 157 |
+
size_gb = int(content_length) / (1024**3)
|
| 158 |
+
disk_info = get_disk_usage(".")
|
| 159 |
+
if size_gb > disk_info["free"] - 0.5: # Leave 0.5GB buffer
|
| 160 |
+
log_message(f'β File too large: {size_gb:.2f}GB, only {disk_info["free"]:.2f}GB free')
|
| 161 |
+
return False
|
| 162 |
+
|
| 163 |
+
with open(dest_path, "wb") as f:
|
| 164 |
+
for chunk in r.iter_content(chunk_size=8192):
|
| 165 |
+
f.write(chunk)
|
| 166 |
+
return True
|
| 167 |
+
except Exception as e:
|
| 168 |
+
if attempt < max_retries - 1:
|
| 169 |
+
time.sleep(2 ** attempt)
|
| 170 |
+
continue
|
| 171 |
+
log_message(f"β Download failed after {max_retries} attempts: {e}")
|
| 172 |
+
return False
|
| 173 |
+
return False
|
| 174 |
+
|
| 175 |
+
def is_multipart_rar(filename: str) -> bool:
|
| 176 |
+
"""Check if this is a multi-part RAR file"""
|
| 177 |
+
return ".part" in filename.lower() and filename.lower().endswith(".rar")
|
| 178 |
+
|
| 179 |
+
def get_rar_part_base(filename: str) -> str:
|
| 180 |
+
"""Get the base name for multi-part RAR files"""
|
| 181 |
+
if ".part" in filename.lower():
|
| 182 |
+
return filename.split(".part")[0]
|
| 183 |
+
return filename.replace(".rar", "")
|
| 184 |
+
|
| 185 |
+
def extract_with_retry(rar_path: str, output_dir: str, max_retries: int = 2) -> bool:
|
| 186 |
+
"""Extract RAR with retry and recovery, handling multi-part archives"""
|
| 187 |
+
filename = os.path.basename(rar_path)
|
| 188 |
+
|
| 189 |
+
# For multi-part RARs, we need the first part
|
| 190 |
+
if is_multipart_rar(filename):
|
| 191 |
+
base_name = get_rar_part_base(filename)
|
| 192 |
+
first_part = f"{base_name}.part01.rar"
|
| 193 |
+
first_part_path = os.path.join(os.path.dirname(rar_path), first_part)
|
| 194 |
+
|
| 195 |
+
if not os.path.exists(first_part_path):
|
| 196 |
+
log_message(f"β οΈ Multi-part RAR detected but first part not found: {first_part}")
|
| 197 |
+
return False
|
| 198 |
+
|
| 199 |
+
rar_path = first_part_path
|
| 200 |
+
log_message(f"π¦ Processing multi-part RAR starting with: {first_part}")
|
| 201 |
+
|
| 202 |
+
for attempt in range(max_retries):
|
| 203 |
+
try:
|
| 204 |
+
# Test RAR first
|
| 205 |
+
test_cmd = ["unrar", "t", rar_path]
|
| 206 |
+
test_result = subprocess.run(test_cmd, capture_output=True, text=True)
|
| 207 |
+
if test_result.returncode != 0:
|
| 208 |
+
log_message(f"β οΈ RAR test failed: {test_result.stderr}")
|
| 209 |
+
if attempt == max_retries - 1:
|
| 210 |
+
return False
|
| 211 |
+
continue
|
| 212 |
+
|
| 213 |
+
# Extract RAR
|
| 214 |
+
cmd = ["unrar", "x", "-o+", rar_path, output_dir]
|
| 215 |
+
if attempt > 0: # Try recovery on subsequent attempts
|
| 216 |
+
cmd.insert(2, "-kb")
|
| 217 |
+
|
| 218 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 219 |
+
if result.returncode == 0:
|
| 220 |
+
log_message(f"β
Successfully extracted: {os.path.basename(rar_path)}")
|
| 221 |
+
return True
|
| 222 |
+
else:
|
| 223 |
+
error_msg = result.stderr or result.stdout
|
| 224 |
+
log_message(f"β οΈ Extraction attempt {attempt + 1} failed: {error_msg}")
|
| 225 |
+
|
| 226 |
+
if "checksum error" in error_msg.lower() or "CRC failed" in error_msg:
|
| 227 |
+
log_message(f"β οΈ Data corruption detected, attempt {attempt + 1}")
|
| 228 |
+
elif result.returncode == 10:
|
| 229 |
+
log_message(f"β οΈ No files to extract (exit code 10)")
|
| 230 |
+
return False
|
| 231 |
+
elif result.returncode == 1:
|
| 232 |
+
log_message(f"β οΈ Non-fatal error (exit code 1)")
|
| 233 |
+
|
| 234 |
+
except Exception as e:
|
| 235 |
+
log_message(f"β Extraction exception: {str(e)}")
|
| 236 |
+
if attempt == max_retries - 1:
|
| 237 |
+
return False
|
| 238 |
+
time.sleep(1)
|
| 239 |
+
|
| 240 |
+
return False
|
| 241 |
+
|
| 242 |
+
def ensure_dir(path):
|
| 243 |
+
os.makedirs(path, exist_ok=True)
|
| 244 |
+
|
| 245 |
+
def extract_frames(video_path, output_dir, fps=DEFAULT_FPS):
|
| 246 |
+
"""Extract frames from video at the specified frames per second (fps)."""
|
| 247 |
+
log_message(f"[INFO] Extracting frames from {video_path} to {output_dir} at {fps} fps...")
|
| 248 |
+
ensure_dir(output_dir)
|
| 249 |
+
cap = cv2.VideoCapture(str(video_path))
|
| 250 |
+
if not cap.isOpened():
|
| 251 |
+
log_message(f"[ERROR] Failed to open video file: {video_path}")
|
| 252 |
+
return 0
|
| 253 |
+
video_fps = cap.get(cv2.CAP_PROP_FPS)
|
| 254 |
+
if not video_fps or video_fps <= 0:
|
| 255 |
+
video_fps = 30 # fallback if FPS is not available
|
| 256 |
+
log_message(f"[WARN] Using fallback FPS: {video_fps}")
|
| 257 |
+
frame_interval = int(round(video_fps / fps))
|
| 258 |
+
frame_idx = 0
|
| 259 |
+
saved_idx = 1
|
| 260 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 261 |
+
log_message(f"[DEBUG] Total frames in video: {total_frames}")
|
| 262 |
+
while cap.isOpened():
|
| 263 |
+
ret, frame = cap.read()
|
| 264 |
+
if not ret:
|
| 265 |
+
break
|
| 266 |
+
if frame_idx % frame_interval == 0:
|
| 267 |
+
frame_name = f"{saved_idx:04d}.png"
|
| 268 |
+
cv2.imwrite(str(Path(output_dir) / frame_name), frame)
|
| 269 |
+
saved_idx += 1
|
| 270 |
+
frame_idx += 1
|
| 271 |
+
cap.release()
|
| 272 |
+
log_message(f"Extracted {saved_idx-1} frames from {video_path} to {output_dir}")
|
| 273 |
+
return saved_idx - 1
|
| 274 |
+
|
| 275 |
+
def analyze_frame_with_florence2(image_path: str, prompt: str = None) -> Dict:
|
| 276 |
+
"""Analyze a single frame using Florence-2 vision model (MOCK)."""
|
| 277 |
+
frame_name = os.path.basename(image_path)
|
| 278 |
+
|
| 279 |
+
# Mock descriptions based on frame number for demonstration
|
| 280 |
+
mock_descriptions = [
|
| 281 |
+
"The image shows a person working on a computer with Blender 3D software open. The interface displays a 3D viewport with a gray cube object in the center. The person's hand is visible pointing at the screen, indicating interaction with the 3D model. The Blender interface shows various panels including the outliner, properties panel, and timeline at the bottom.",
|
| 282 |
+
"The scene continues with the person manipulating the 3D cube in Blender. The cube appears to be selected (highlighted in orange) and the person is using the mouse to rotate or transform the object. Multiple windows are visible on the desktop, suggesting a multi-tasking workflow typical of 3D modeling work.",
|
| 283 |
+
"The person is now accessing different tools in Blender's interface. The 3D viewport shows the cube from a different angle, indicating that the view has been rotated. The toolbar on the left side of the interface is visible, showing various modeling tools and options available for 3D object manipulation."
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
# Extract frame number from filename to cycle through descriptions
|
| 287 |
+
try:
|
| 288 |
+
frame_num = int(frame_name.split(".")[0]) - 1
|
| 289 |
+
description = mock_descriptions[frame_num % len(mock_descriptions)]
|
| 290 |
+
except:
|
| 291 |
+
description = mock_descriptions[0]
|
| 292 |
+
|
| 293 |
+
return {
|
| 294 |
+
"image": frame_name,
|
| 295 |
+
"description": description
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
def summarize_activities(frame_analyses: List[Dict]) -> Dict:
|
| 299 |
+
"""Summarize activities from frame analyses."""
|
| 300 |
+
return {
|
| 301 |
+
"steps": [
|
| 302 |
+
{
|
| 303 |
+
"action": "Open Blender software",
|
| 304 |
+
"description": "User launches Blender 3D modeling application on their computer"
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"action": "Create 3D object",
|
| 308 |
+
"description": "User works with a default cube object in the 3D viewport"
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"action": "Manipulate 3D model",
|
| 312 |
+
"description": "User rotates and transforms the cube using mouse interactions"
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"action": "Navigate interface",
|
| 316 |
+
"description": "User explores different tools and panels in the Blender interface"
|
| 317 |
+
}
|
| 318 |
+
],
|
| 319 |
+
"high_level_goal": "Learning basic 3D modeling operations in Blender software",
|
| 320 |
+
"creative_actions": "3D object manipulation, interface navigation, basic modeling workflow",
|
| 321 |
+
"objects": ["computer", "monitor", "mouse", "keyboard", "Blender software", "3D cube", "desktop interface"],
|
| 322 |
+
"final_goal": "Introduction to Blender 3D modeling fundamentals and basic object manipulation"
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
def analyze_frames(frames_dir: str, output_json_path: str, prompt: Optional[str] = None) -> int:
|
| 326 |
+
"""Analyze all frames in directory using Florence-2 model."""
|
| 327 |
+
log_message(f"[INFO] Analyzing frames in {frames_dir}...")
|
| 328 |
+
frames_dir = Path(frames_dir).resolve()
|
| 329 |
+
output_json_path = Path(output_json_path).resolve()
|
| 330 |
+
ensure_dir(frames_dir)
|
| 331 |
+
ensure_dir(output_json_path.parent)
|
| 332 |
+
|
| 333 |
+
frame_analyses = []
|
| 334 |
+
analyzed_count = 0
|
| 335 |
+
|
| 336 |
+
for frame_file in sorted(frames_dir.glob("*.png")):
|
| 337 |
+
analysis = analyze_frame_with_florence2(str(frame_file), prompt)
|
| 338 |
+
frame_analyses.append(analysis)
|
| 339 |
+
analyzed_count += 1
|
| 340 |
+
|
| 341 |
+
# Generate summary
|
| 342 |
+
summary = summarize_activities(frame_analyses)
|
| 343 |
+
|
| 344 |
+
# Save results
|
| 345 |
+
results = {
|
| 346 |
+
"frame_analyses": frame_analyses,
|
| 347 |
+
"summary": summary
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
try:
|
| 351 |
+
with open(output_json_path, "w") as f:
|
| 352 |
+
json.dump(results, f, indent=2)
|
| 353 |
+
log_message(f"[SUCCESS] Analysis results saved to {output_json_path}")
|
| 354 |
+
except Exception as e:
|
| 355 |
+
log_message(f"[ERROR] Failed to write output JSON: {e}")
|
| 356 |
+
|
| 357 |
+
return analyzed_count
|
| 358 |
+
|
| 359 |
+
def send_email_with_attachment(subject, body, to_email, from_email, app_password, attachment_path):
|
| 360 |
+
msg = EmailMessage()
|
| 361 |
+
msg["Subject"] = subject
|
| 362 |
+
msg["From"] = from_email
|
| 363 |
+
msg["To"] = to_email
|
| 364 |
+
msg.set_content(body)
|
| 365 |
+
with open(attachment_path, "rb") as f:
|
| 366 |
+
file_data = f.read()
|
| 367 |
+
file_name = Path(attachment_path).name
|
| 368 |
+
msg.add_attachment(file_data, maintype="application", subtype="octet-stream", filename=file_name)
|
| 369 |
+
try:
|
| 370 |
+
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
|
| 371 |
+
smtp.login(from_email, app_password)
|
| 372 |
+
smtp.send_message(msg)
|
| 373 |
+
log_message(f"[SUCCESS] Email sent to {to_email}")
|
| 374 |
+
except Exception as e:
|
| 375 |
+
log_message(f"[ERROR] Failed to send email: {e}")
|
| 376 |
+
|
| 377 |
+
def process_rar_file(rar_path: str) -> bool:
|
| 378 |
+
"""Process a single RAR file - extract, then process videos for frames and vision analysis"""
|
| 379 |
+
filename = os.path.basename(rar_path)
|
| 380 |
+
processing_status["current_file"] = filename
|
| 381 |
+
|
| 382 |
+
# Handle multi-part RAR naming
|
| 383 |
+
if is_multipart_rar(filename):
|
| 384 |
+
course_name = get_rar_part_base(filename)
|
| 385 |
+
else:
|
| 386 |
+
course_name = filename.replace(".rar", "")
|
| 387 |
+
|
| 388 |
+
extract_dir = os.path.join(EXTRACT_FOLDER, course_name)
|
| 389 |
+
|
| 390 |
+
try:
|
| 391 |
+
log_message(f"π Processing: {filename}")
|
| 392 |
+
|
| 393 |
+
# Clean up any existing directory
|
| 394 |
+
if os.path.exists(extract_dir):
|
| 395 |
+
shutil.rmtree(extract_dir, ignore_errors=True)
|
| 396 |
+
|
| 397 |
+
# Extract RAR
|
| 398 |
+
os.makedirs(extract_dir, exist_ok=True)
|
| 399 |
+
if not extract_with_retry(rar_path, extract_dir):
|
| 400 |
+
raise Exception("RAR extraction failed")
|
| 401 |
+
|
| 402 |
+
# Count extracted files
|
| 403 |
+
file_count = 0
|
| 404 |
+
video_files_found = []
|
| 405 |
+
for root, dirs, files in os.walk(extract_dir):
|
| 406 |
+
for file in files:
|
| 407 |
+
file_count += 1
|
| 408 |
+
if file.lower().endswith((".mp4", ".avi", ".mov", ".mkv")):
|
| 409 |
+
video_files_found.append(os.path.join(root, file))
|
| 410 |
+
|
| 411 |
+
processing_status["extracted_courses"] += 1
|
| 412 |
+
log_message(f"β
Successfully extracted '{course_name}' ({file_count} files, {len(video_files_found)} videos)")
|
| 413 |
+
|
| 414 |
+
# Process video files for frame extraction and vision analysis
|
| 415 |
+
for video_path in video_files_found:
|
| 416 |
+
video_filename = Path(video_path).name
|
| 417 |
+
# Create a unique output directory for frames for each video
|
| 418 |
+
frames_output_dir = os.path.join(FRAMES_OUTPUT_FOLDER, f"{course_name}_{video_filename.replace('.', '_')}_frames")
|
| 419 |
+
ensure_dir(frames_output_dir)
|
| 420 |
+
|
| 421 |
+
extracted_frames_count = extract_frames(video_path, frames_output_dir, fps=DEFAULT_FPS)
|
| 422 |
+
processing_status["extracted_frames_count"] += extracted_frames_count
|
| 423 |
+
if extracted_frames_count > 0:
|
| 424 |
+
processing_status["extracted_videos"] += 1
|
| 425 |
+
log_message(f"[INFO] Extracted {extracted_frames_count} frames from {video_filename}")
|
| 426 |
+
|
| 427 |
+
# Perform vision analysis on the extracted frames
|
| 428 |
+
analysis_output_json = os.path.join(ANALYSIS_OUTPUT_FOLDER, f"{course_name}_{video_filename.replace('.', '_')}_analysis.json")
|
| 429 |
+
analyzed_frames = analyze_frames(frames_output_dir, analysis_output_json)
|
| 430 |
+
processing_status["analyzed_frames_count"] += analyzed_frames
|
| 431 |
+
log_message(f"[INFO] Analyzed {analyzed_frames} frames from {video_filename}")
|
| 432 |
+
else:
|
| 433 |
+
log_message(f"[WARN] No frames extracted from {video_filename}")
|
| 434 |
+
|
| 435 |
+
return True
|
| 436 |
+
|
| 437 |
+
except Exception as e:
|
| 438 |
+
error_msg = str(e)
|
| 439 |
+
log_message(f"β Processing failed: {error_msg}")
|
| 440 |
+
log_failed_file(filename, error_msg)
|
| 441 |
+
return False
|
| 442 |
+
|
| 443 |
+
finally:
|
| 444 |
+
processing_status["current_file"] = None
|
| 445 |
+
|
| 446 |
+
def main_processing_loop(start_index: int = 0):
|
| 447 |
+
"""Main processing workflow - extraction, frame extraction, and vision analysis"""
|
| 448 |
+
processing_status["is_running"] = True
|
| 449 |
+
|
| 450 |
+
try:
|
| 451 |
+
# Load state
|
| 452 |
+
processed_rars = load_json_state(PROCESS_STATE_FILE, {"processed_rars": []})["processed_rars"]
|
| 453 |
+
download_state = load_json_state(DOWNLOAD_STATE_FILE, {"next_download_index": 0})
|
| 454 |
+
|
| 455 |
+
# Use start_index if provided, otherwise use the saved state
|
| 456 |
+
next_index = start_index if start_index > 0 else download_state["next_download_index"]
|
| 457 |
+
|
| 458 |
+
log_message(f"π Starting from index {next_index}")
|
| 459 |
+
log_message(f"π Previously processed: {len(processed_rars)} files")
|
| 460 |
+
|
| 461 |
+
# Get file list
|
| 462 |
+
try:
|
| 463 |
+
files = list(hf_api.list_repo_files(repo_id=SOURCE_REPO_ID, repo_type="dataset"))
|
| 464 |
+
rar_files = sorted([f for f in files if f.endswith(".rar")])
|
| 465 |
+
|
| 466 |
+
processing_status["total_files"] = len(rar_files)
|
| 467 |
+
log_message(f"π Found {len(rar_files)} RAR files in repository")
|
| 468 |
+
|
| 469 |
+
if next_index >= len(rar_files):
|
| 470 |
+
log_message("β
All files have been processed!")
|
| 471 |
+
return
|
| 472 |
+
|
| 473 |
+
except Exception as e:
|
| 474 |
+
log_message(f"β Failed to get file list: {str(e)}")
|
| 475 |
+
return
|
| 476 |
+
|
| 477 |
+
# Process only one file per run
|
| 478 |
+
if next_index < len(rar_files):
|
| 479 |
+
rar_file = rar_files[next_index]
|
| 480 |
+
filename = os.path.basename(rar_file)
|
| 481 |
+
|
| 482 |
+
if filename in processed_rars:
|
| 483 |
+
log_message(f"βοΈ Skipping already processed: {filename}")
|
| 484 |
+
processing_status["processed_files"] += 1
|
| 485 |
+
# Move to next file
|
| 486 |
+
next_index += 1
|
| 487 |
+
save_json_state(DOWNLOAD_STATE_FILE, {"next_download_index": next_index})
|
| 488 |
+
log_message(f"π Moving to next file. Progress: {next_index}/{len(rar_files)}")
|
| 489 |
+
return
|
| 490 |
+
|
| 491 |
+
log_message(f"π₯ Downloading: {filename}")
|
| 492 |
+
dest_path = os.path.join(DOWNLOAD_FOLDER, filename)
|
| 493 |
+
|
| 494 |
+
# Download file
|
| 495 |
+
download_url = f"https://huggingface.co/datasets/{SOURCE_REPO_ID}/resolve/main/{rar_file}"
|
| 496 |
+
if download_with_retry(download_url, dest_path):
|
| 497 |
+
# Process file
|
| 498 |
+
if process_rar_file(dest_path):
|
| 499 |
+
processed_rars.append(filename)
|
| 500 |
+
save_json_state(PROCESS_STATE_FILE, {"processed_rars": processed_rars})
|
| 501 |
+
log_message(f"β
Successfully processed: {filename}")
|
| 502 |
+
processing_status["processed_files"] += 1
|
| 503 |
+
else:
|
| 504 |
+
log_message(f"β Failed to process: {filename}")
|
| 505 |
+
processing_status["failed_files"] += 1
|
| 506 |
+
|
| 507 |
+
# Clean up downloaded file
|
| 508 |
+
try:
|
| 509 |
+
os.remove(dest_path)
|
| 510 |
+
log_message(f"ποΈ Cleaned up download: {filename}")
|
| 511 |
+
except:
|
| 512 |
+
pass
|
| 513 |
+
else:
|
| 514 |
+
log_message(f"β Failed to download: {filename}")
|
| 515 |
+
processing_status["failed_files"] += 1
|
| 516 |
+
|
| 517 |
+
# Update download state for next run
|
| 518 |
+
next_index += 1
|
| 519 |
+
save_json_state(DOWNLOAD_STATE_FILE, {"next_download_index": next_index})
|
| 520 |
+
|
| 521 |
+
# Status update
|
| 522 |
+
log_message(f"π Progress: {next_index}/{len(rar_files)} files processed")
|
| 523 |
+
log_message(f'π Extracted: {processing_status["extracted_courses"]} courses')
|
| 524 |
+
log_message(f'π Videos Processed: {processing_status["extracted_videos"]}')
|
| 525 |
+
log_message(f'π Frames Extracted: {processing_status["extracted_frames_count"]}')
|
| 526 |
+
log_message(f'π Frames Analyzed: {processing_status["analyzed_frames_count"]}')
|
| 527 |
+
log_message(f'π Failed: {processing_status["failed_files"]} files')
|
| 528 |
+
|
| 529 |
+
if next_index < len(rar_files):
|
| 530 |
+
log_message(f"π Run the script again to process the next file: {os.path.basename(rar_files[next_index])}")
|
| 531 |
+
else:
|
| 532 |
+
log_message("π All files have been processed!")
|
| 533 |
+
else:
|
| 534 |
+
log_message("β
All files have been processed!")
|
| 535 |
+
|
| 536 |
+
log_message("π Processing complete!")
|
| 537 |
+
log_message(f'π Final stats: {processing_status["extracted_courses"]} courses extracted, {processing_status["extracted_videos"]} videos processed, {processing_status["extracted_frames_count"]} frames extracted, {processing_status["analyzed_frames_count"]} frames analyzed')
|
| 538 |
+
|
| 539 |
+
except KeyboardInterrupt:
|
| 540 |
+
log_message("βΉοΈ Processing interrupted by user")
|
| 541 |
+
except Exception as e:
|
| 542 |
+
log_message(f"β Fatal error: {str(e)}")
|
| 543 |
+
finally:
|
| 544 |
+
processing_status["is_running"] = False
|
| 545 |
+
cleanup_temp_files()
|
| 546 |
+
|
| 547 |
+
# FastAPI Endpoints
|
| 548 |
+
@app.post("/analyze-video")
|
| 549 |
+
async def analyze_video_endpoint(
|
| 550 |
+
file: UploadFile = File(...),
|
| 551 |
+
fps: int = Form(DEFAULT_FPS),
|
| 552 |
+
prompt: Optional[str] = Form(None)
|
| 553 |
+
):
|
| 554 |
+
"""Analyze a single video file and return frame-by-frame analysis."""
|
| 555 |
+
if not file.filename.lower().endswith((".mp4", ".avi", ".mov", ".mkv")):
|
| 556 |
+
return JSONResponse(status_code=400, content={
|
| 557 |
+
"error": "File type not allowed",
|
| 558 |
+
"allowed_types": [".mp4", ".avi", ".mov", ".mkv"]
|
| 559 |
+
})
|
| 560 |
+
|
| 561 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 562 |
+
temp_dir_path = Path(temp_dir)
|
| 563 |
+
file_path = temp_dir_path / file.filename
|
| 564 |
+
|
| 565 |
+
with open(file_path, "wb") as buffer:
|
| 566 |
+
shutil.copyfileobj(file.file, buffer)
|
| 567 |
+
|
| 568 |
+
frames_dir = temp_dir_path / "frames"
|
| 569 |
+
frame_count = extract_frames(file_path, frames_dir, fps)
|
| 570 |
+
|
| 571 |
+
frame_analyses = []
|
| 572 |
+
for frame_file in sorted(frames_dir.glob("*.png")):
|
| 573 |
+
analysis = analyze_frame_with_florence2(str(frame_file), prompt)
|
| 574 |
+
frame_analyses.append(analysis)
|
| 575 |
+
|
| 576 |
+
summary = summarize_activities(frame_analyses)
|
| 577 |
+
|
| 578 |
+
return JSONResponse(content={
|
| 579 |
+
"video_filename": file.filename,
|
| 580 |
+
"frame_count": frame_count,
|
| 581 |
+
"fps": fps,
|
| 582 |
+
"frame_analyses": frame_analyses,
|
| 583 |
+
"summary": summary
|
| 584 |
+
})
|
| 585 |
+
|
| 586 |
+
@app.post("/analyze-archive")
|
| 587 |
+
async def analyze_archive_endpoint(
|
| 588 |
+
file: UploadFile = File(...),
|
| 589 |
+
fps: int = Form(DEFAULT_FPS),
|
| 590 |
+
prompt: Optional[str] = Form(None)
|
| 591 |
+
):
|
| 592 |
+
"""Analyze videos from RAR/ZIP archive and return frame-by-frame analysis."""
|
| 593 |
+
if not file.filename.lower().endswith((".rar", ".zip")):
|
| 594 |
+
return JSONResponse(status_code=400, content={
|
| 595 |
+
"error": "File type not allowed",
|
| 596 |
+
"allowed_types": [".rar", ".zip"]
|
| 597 |
+
})
|
| 598 |
+
|
| 599 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 600 |
+
temp_dir_path = Path(temp_dir)
|
| 601 |
+
file_path = temp_dir_path / file.filename
|
| 602 |
+
|
| 603 |
+
with open(file_path, "wb") as buffer:
|
| 604 |
+
shutil.copyfileobj(file.file, buffer)
|
| 605 |
+
|
| 606 |
+
extract_dir = temp_dir_path / "extracted"
|
| 607 |
+
video_files = []
|
| 608 |
+
|
| 609 |
+
if file.filename.lower().endswith(".rar"):
|
| 610 |
+
with rarfile.RarFile(file_path) as rf:
|
| 611 |
+
rf.extractall(extract_dir)
|
| 612 |
+
else:
|
| 613 |
+
with zipfile.ZipFile(file_path) as zf:
|
| 614 |
+
zf.extractall(extract_dir)
|
| 615 |
+
|
| 616 |
+
# Find video files in extracted content
|
| 617 |
+
for root, dirs, files in os.walk(extract_dir):
|
| 618 |
+
for file in files:
|
| 619 |
+
if file.lower().endswith((".mp4", ".avi", ".mov", ".mkv")):
|
| 620 |
+
video_files.append(Path(root) / file)
|
| 621 |
+
|
| 622 |
+
if not video_files:
|
| 623 |
+
return JSONResponse(status_code=400, content={
|
| 624 |
+
"error": "No video files found in archive"
|
| 625 |
+
})
|
| 626 |
+
|
| 627 |
+
results = []
|
| 628 |
+
for video_path in video_files:
|
| 629 |
+
video_name = video_path.name
|
| 630 |
+
frames_dir = temp_dir_path / f"frames_{video_name}"
|
| 631 |
+
frame_count = extract_frames(video_path, frames_dir, fps)
|
| 632 |
+
|
| 633 |
+
frame_analyses = []
|
| 634 |
+
for frame_file in sorted(frames_dir.glob("*.png")):
|
| 635 |
+
analysis = analyze_frame_with_florence2(str(frame_file), prompt)
|
| 636 |
+
frame_analyses.append(analysis)
|
| 637 |
+
|
| 638 |
+
summary = summarize_activities(frame_analyses)
|
| 639 |
+
|
| 640 |
+
results.append({
|
| 641 |
+
"video_filename": video_name,
|
| 642 |
+
"frame_count": frame_count,
|
| 643 |
+
"fps": fps,
|
| 644 |
+
"frame_analyses": frame_analyses,
|
| 645 |
+
"summary": summary
|
| 646 |
+
})
|
| 647 |
+
|
| 648 |
+
return JSONResponse(content={
|
| 649 |
+
"archive_filename": file.filename,
|
| 650 |
+
"videos_processed": len(video_files),
|
| 651 |
+
"results": results
|
| 652 |
+
})
|
| 653 |
+
|
| 654 |
+
@app.get("/health")
|
| 655 |
+
async def health_check():
|
| 656 |
+
"""Health check endpoint."""
|
| 657 |
+
return JSONResponse(content={
|
| 658 |
+
"status": "healthy",
|
| 659 |
+
"model": "Florence-2 (Mock)",
|
| 660 |
+
"note": "Florence-2 model is mocked due to sandbox memory limitations."
|
| 661 |
+
})
|
| 662 |
+
|
| 663 |
+
@app.get("/status")
|
| 664 |
+
async def get_processing_status():
|
| 665 |
+
"""Get current processing status."""
|
| 666 |
+
return JSONResponse(content=processing_status)
|
| 667 |
+
|
| 668 |
+
# Expose necessary functions and variables
|
| 669 |
+
__all__ = [
|
| 670 |
+
"main_processing_loop",
|
| 671 |
+
"processing_status",
|
| 672 |
+
"ANALYSIS_OUTPUT_FOLDER",
|
| 673 |
+
"log_message",
|
| 674 |
+
"send_email_with_attachment",
|
| 675 |
+
"analyze_frames",
|
| 676 |
+
"extract_frames",
|
| 677 |
+
"DEFAULT_FPS",
|
| 678 |
+
"ensure_dir"
|
| 679 |
+
]
|