MogensR commited on
Commit
31b4ddd
Β·
1 Parent(s): 06646af

Update core/app.py

Browse files
Files changed (1) hide show
  1. core/app.py +106 -32
core/app.py CHANGED
@@ -4,14 +4,38 @@
4
  Refactored modular architecture – orchestrates specialised components
5
  """
6
  from __future__ import annotations
 
7
  # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
8
  import os
9
  os.environ["OMP_NUM_THREADS"] = "2" # Force valid value early
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # If you use early_env in your project, keep this import (harmless if absent)
11
  try:
12
- import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
13
  except Exception:
14
  pass
 
15
  import logging
16
  import threading
17
  import traceback
@@ -19,24 +43,29 @@
19
  import time
20
  from pathlib import Path
21
  from typing import Optional, Tuple, Dict, Any, Callable
 
22
  # Mitigate CUDA fragmentation (must be set before importing torch)
23
  if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ:
24
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:128"
 
25
  # ── Logging ──────────────────────────────────────────────────────────────────
26
  logging.basicConfig(
27
  level=logging.INFO,
28
  format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
29
  )
30
  logger = logging.getLogger("core.app")
 
31
  # ── Ensure project root importable ───────────────────────────────────────────
32
  PROJECT_FILE = Path(__file__).resolve()
33
  CORE_DIR = PROJECT_FILE.parent
34
  ROOT = CORE_DIR.parent
35
  if str(ROOT) not in sys.path:
36
  sys.path.insert(0, str(ROOT))
 
37
  # Create loader directories if they don't exist
38
  loaders_dir = ROOT / "models" / "loaders"
39
  loaders_dir.mkdir(parents=True, exist_ok=True)
 
40
  # ── Gradio schema patch (HF quirk) ───────────────────────────────────────────
41
  try:
42
  import gradio_client.utils as gc_utils
@@ -52,6 +81,7 @@ def _patched_get_type(schema):
52
  logger.info("Gradio schema patch applied")
53
  except Exception as e:
54
  logger.warning(f"Gradio patch failed: {e}")
 
55
  # ── Core config + components ─────────────────────────────────────────────────
56
  try:
57
  from config.app_config import get_config
@@ -61,8 +91,10 @@ class DummyConfig:
61
  def to_dict(self):
62
  return {}
63
  get_config = lambda: DummyConfig()
 
64
  from utils.hardware.device_manager import DeviceManager
65
  from utils.system.memory_manager import MemoryManager
 
66
  # Try to import the new split loaders first, fall back to old if needed
67
  try:
68
  from models.loaders.model_loader import ModelLoader
@@ -70,20 +102,23 @@ def to_dict(self):
70
  except ImportError:
71
  logger.warning("Split loaders not found, using legacy loader")
72
  # Fall back to old loader if split architecture isn't available yet
73
- from models.model_loader import ModelLoader # type: ignore
 
74
  from processing.video.video_processor import CoreVideoProcessor
75
  from processing.audio.audio_processor import AudioProcessor
76
  from utils.monitoring.progress_tracker import ProgressTracker
77
  from utils.cv_processing import validate_video_file
 
78
  # ── Optional Two-Stage import ────────────────────────────────────────────────
79
  TWO_STAGE_AVAILABLE = False
80
  TWO_STAGE_IMPORT_ORIGIN = ""
81
  TWO_STAGE_IMPORT_ERROR = ""
82
  CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {"standard": {}}
83
- TwoStageProcessor = None # type: ignore
 
84
  # Try multiple import paths for two-stage processor
85
  two_stage_paths = [
86
- "processors.two_stage", # Your fixed version
87
  "processing.two_stage.two_stage_processor",
88
  "processing.two_stage",
89
  ]
@@ -97,19 +132,23 @@ def to_dict(self):
97
  except Exception as e:
98
  TWO_STAGE_IMPORT_ERROR = str(e)
99
  continue
 
100
  if not TWO_STAGE_AVAILABLE:
101
  logger.warning(f"Two-stage import FAILED from all paths: {TWO_STAGE_IMPORT_ERROR}")
 
102
  # ── Quiet startup self-check (async by default) ──────────────────────────────
103
  # Place the helper in tools/startup_selfcheck.py (with tools/__init__.py present)
104
  try:
105
  from tools.startup_selfcheck import schedule_startup_selfcheck
106
  except Exception:
107
- schedule_startup_selfcheck = None # graceful if the helper isn't shipped
 
108
  # Dummy exceptions if core.exceptions not available
109
  class ModelLoadingError(Exception):
110
  pass
111
  class VideoProcessingError(Exception):
112
  pass
 
113
  # ╔══════════════════════════════════════════════════════════════════════════╗
114
  # β•‘ VideoProcessor class β•‘
115
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
@@ -119,7 +158,7 @@ class VideoProcessor:
119
  """
120
  def __init__(self):
121
  self.config = get_config()
122
- self._patch_config_defaults(self.config) # avoid AttributeError on older configs
123
  self.device_manager = DeviceManager()
124
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
125
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
@@ -131,6 +170,7 @@ def __init__(self):
131
  self.cancel_event = threading.Event()
132
  self.progress_tracker: Optional[ProgressTracker] = None
133
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
 
134
  # ── Config hardening: add missing fields safely ───────────────────────────
135
  @staticmethod
136
  def _patch_config_defaults(cfg: Any) -> None:
@@ -154,6 +194,7 @@ def _patch_config_defaults(cfg: Any) -> None:
154
  if not hasattr(cfg, k):
155
  setattr(cfg, k, v)
156
  Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
 
157
  # ── Progress helper ───────────────────────────────────────────────────────
158
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
159
  try:
@@ -167,6 +208,7 @@ def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
167
  except Exception as e:
168
  logger.warning(f"Progress init failed: {e}")
169
  self.progress_tracker = ProgressTracker(100, cb)
 
170
  # ── Model loading ─────────────────────────────────────────────────────────
171
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
172
  with self.loading_lock:
@@ -181,11 +223,14 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
181
  )
182
  if self.cancel_event.is_set():
183
  return "Model loading cancelled"
 
184
  # Get the actual models
185
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
186
- mat_model = mat_loaded.model if mat_loaded else None # NOTE: in our MatAnyone loader this is a stateful adapter (callable)
 
187
  # Initialize core processor
188
  self.core_processor = CoreVideoProcessor(config=self.config, models=self.model_loader)
 
189
  # Initialize two-stage processor if available
190
  self.two_stage_processor = None
191
  if TWO_STAGE_AVAILABLE and TwoStageProcessor and (sam2_predictor or mat_model):
@@ -197,20 +242,21 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
197
  except Exception as e:
198
  logger.warning(f"Two-stage init failed: {e}")
199
  self.two_stage_processor = None
 
200
  self.models_loaded = True
201
  msg = self.model_loader.get_load_summary()
202
-
203
  # Add status about processors
204
  if self.two_stage_processor:
205
  msg += "\nβœ… Two-stage processor ready"
206
  else:
207
  msg += "\n⚠️ Two-stage processor not available"
208
-
209
  if mat_model:
210
  msg += "\nβœ… MatAnyone refinement active"
211
  else:
212
  msg += "\n⚠️ MatAnyone not loaded (edges may be rough)"
213
-
214
  logger.info(msg)
215
  return msg
216
  except (AttributeError, ModelLoadingError) as e:
@@ -223,6 +269,7 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
223
  err = f"Unexpected error during model loading: {e}"
224
  logger.error(f"{err}\n{traceback.format_exc()}")
225
  return err
 
226
  # ── Public entry – process video ─────────────────────────────────────────
227
  def process_video(
228
  self,
@@ -236,20 +283,19 @@ def process_video(
236
  preview_mask: bool = False,
237
  preview_greenscreen: bool = False,
238
  ) -> Tuple[Optional[str], Optional[str], str]:
239
-
240
  # ===== BACKGROUND PATH DEBUG & FIX =====
241
  logger.info("=" * 60)
242
  logger.info("BACKGROUND PATH DEBUGGING")
243
  logger.info(f"background_choice: {background_choice}")
244
  logger.info(f"custom_background_path type: {type(custom_background_path)}")
245
  logger.info(f"custom_background_path value: {custom_background_path}")
246
-
247
  # Fix 1: Handle if Gradio sends a dict
248
  if isinstance(custom_background_path, dict):
249
  original = custom_background_path
250
  custom_background_path = custom_background_path.get('name') or custom_background_path.get('path')
251
  logger.info(f"Extracted path from dict: {original} -> {custom_background_path}")
252
-
253
  # Fix 2: Handle PIL Image objects
254
  try:
255
  from PIL import Image
@@ -261,7 +307,7 @@ def process_video(
261
  logger.info(f"Saved PIL Image to: {custom_background_path}")
262
  except ImportError:
263
  pass
264
-
265
  # Fix 3: Verify file exists when using custom background
266
  if background_choice == "custom" or custom_background_path:
267
  if custom_background_path:
@@ -273,7 +319,7 @@ def process_video(
273
  import glob
274
  patterns = [
275
  "/tmp/gradio*/**/*.jpg",
276
- "/tmp/gradio*/**/*.jpeg",
277
  "/tmp/gradio*/**/*.png",
278
  "/tmp/**/*.jpg",
279
  "/tmp/**/*.jpeg",
@@ -292,25 +338,29 @@ def process_video(
292
  break
293
  else:
294
  logger.error("❌ Custom background mode but path is None!")
295
-
296
  logger.info(f"Final custom_background_path: {custom_background_path}")
297
  logger.info("=" * 60)
298
-
299
  if not self.models_loaded or not self.core_processor:
300
  return None, None, "Models not loaded. Please click 'Load Models' first."
301
  if self.cancel_event.is_set():
302
  return None, None, "Processing cancelled"
 
303
  self._init_progress(video_path, progress_callback)
304
  ok, why = validate_video_file(video_path)
305
  if not ok:
306
  return None, None, f"Invalid video: {why}"
 
307
  try:
308
  # Log which mode we're using
309
  mode = "two-stage" if use_two_stage else "single-stage"
310
  matanyone_status = "enabled" if self.model_loader.get_matanyone() else "disabled"
311
  logger.info(f"Processing video in {mode} mode, MatAnyone: {matanyone_status}")
 
312
  # IMPORTANT: start each video with a clean MatAnyone memory
313
  self._reset_matanyone_session()
 
314
  if use_two_stage:
315
  if not TWO_STAGE_AVAILABLE or self.two_stage_processor is None:
316
  return None, None, "Two-stage processing not available"
@@ -339,6 +389,7 @@ def process_video(
339
  except Exception as e:
340
  logger.error(f"Unexpected processing error: {e}\n{traceback.format_exc()}")
341
  return None, None, f"Unexpected error: {e}"
 
342
  # ── Private – per-video MatAnyone reset ──────────────────────────────────
343
  def _reset_matanyone_session(self):
344
  """
@@ -355,6 +406,7 @@ def _reset_matanyone_session(self):
355
  logger.info("MatAnyone session reset for new video")
356
  except Exception as e:
357
  logger.warning(f"MatAnyone session reset failed (continuing): {e}")
 
358
  # ── Private – single-stage ───────────────────────────────────────────────
359
  def _process_single_stage(
360
  self,
@@ -365,15 +417,15 @@ def _process_single_stage(
365
  preview_mask: bool,
366
  preview_greenscreen: bool,
367
  ) -> Tuple[Optional[str], Optional[str], str]:
368
-
369
  # Additional debug logging for single-stage
370
  logger.info(f"[Single-stage] background_choice: {background_choice}")
371
  logger.info(f"[Single-stage] custom_background_path: {custom_background_path}")
372
-
373
  ts = int(time.time())
374
  out_dir = Path(self.config.output_dir) / "single_stage"
375
  out_dir.mkdir(parents=True, exist_ok=True)
376
  out_path = str(out_dir / f"processed_{ts}.mp4")
 
377
  # Process video via your CoreVideoProcessor
378
  result = self.core_processor.process_video(
379
  input_path=video_path,
@@ -384,9 +436,10 @@ def _process_single_stage(
384
  },
385
  progress_callback=progress_callback,
386
  )
387
-
388
  if not result:
389
  return None, None, "Video processing failed"
 
390
  # Mux audio unless preview-only
391
  if not (preview_mask or preview_greenscreen):
392
  try:
@@ -398,6 +451,7 @@ def _process_single_stage(
398
  final_path = out_path
399
  else:
400
  final_path = out_path
 
401
  # Build status message
402
  try:
403
  mat_loaded = bool(self.model_loader.get_matanyone())
@@ -413,6 +467,7 @@ def _process_single_stage(
413
  f"Device: {self.device_manager.get_optimal_device()}"
414
  )
415
  return final_path, None, msg # No green in single-stage
 
416
  # ── Private – two-stage ─────────────────────────────────────────────────
417
  def _process_two_stage(
418
  self,
@@ -425,11 +480,11 @@ def _process_two_stage(
425
  ) -> Tuple[Optional[str], Optional[str], str]:
426
  if self.two_stage_processor is None:
427
  return None, None, "Two-stage processor not available"
428
-
429
  # Additional debug logging for two-stage
430
  logger.info(f"[Two-stage] background_choice: {background_choice}")
431
  logger.info(f"[Two-stage] custom_background_path: {custom_background_path}")
432
-
433
  import cv2
434
  cap = cv2.VideoCapture(video_path)
435
  if not cap.isOpened():
@@ -437,6 +492,7 @@ def _process_two_stage(
437
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) or 1280
438
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) or 720
439
  cap.release()
 
440
  # Prepare background
441
  try:
442
  background = self.core_processor.prepare_background(
@@ -447,12 +503,15 @@ def _process_two_stage(
447
  return None, None, f"Failed to prepare background: {e}"
448
  if background is None:
449
  return None, None, "Failed to prepare background"
 
450
  ts = int(time.time())
451
  out_dir = Path(self.config.output_dir) / "two_stage"
452
  out_dir.mkdir(parents=True, exist_ok=True)
453
  final_out = str(out_dir / f"final_{ts}.mp4")
 
454
  chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS.get("standard", {}))
455
  logger.info(f"Two-stage with preset: {chroma_preset} | key_color: {key_color_mode}")
 
456
  # (Per-video reset already called in process_video)
457
  final_path, green_path, stage2_msg = self.two_stage_processor.process_full_pipeline(
458
  video_path,
@@ -464,6 +523,7 @@ def _process_two_stage(
464
  )
465
  if final_path is None:
466
  return None, None, stage2_msg
 
467
  # Mux audio
468
  try:
469
  final_with_audio = self.audio_processor.add_audio_to_video(
@@ -472,6 +532,7 @@ def _process_two_stage(
472
  except Exception as e:
473
  logger.warning(f"Audio mux failed: {e}")
474
  final_with_audio = final_path
 
475
  try:
476
  mat_loaded = bool(self.model_loader.get_matanyone())
477
  except Exception:
@@ -485,6 +546,7 @@ def _process_two_stage(
485
  f"Device: {self.device_manager.get_optimal_device()}"
486
  )
487
  return final_with_audio, green_path, msg
 
488
  # ── Status helpers ───────────────────────────────────────────────────────
489
  def get_status(self) -> Dict[str, Any]:
490
  status = {
@@ -496,7 +558,7 @@ def get_status(self) -> Dict[str, Any]:
496
  "config": self._safe_config_dict(),
497
  "memory_usage": self._safe_memory_usage(),
498
  }
499
-
500
  try:
501
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
502
  status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
@@ -504,10 +566,12 @@ def get_status(self) -> Dict[str, Any]:
504
  except Exception:
505
  status["sam2_loaded"] = False
506
  status["matanyone_loaded"] = False
 
507
  if self.progress_tracker:
508
  status["progress"] = self.progress_tracker.get_all_progress()
509
-
510
  return status
 
511
  def _safe_config_dict(self) -> Dict[str, Any]:
512
  try:
513
  return self.config.to_dict()
@@ -516,14 +580,17 @@ def _safe_config_dict(self) -> Dict[str, Any]:
516
  "ffmpeg_path", "max_model_size", "max_model_size_bytes",
517
  "output_dir", "matanyone_enabled"]
518
  return {k: getattr(self.config, k, None) for k in keys}
 
519
  def _safe_memory_usage(self) -> Dict[str, Any]:
520
  try:
521
  return self.memory_manager.get_memory_usage()
522
  except Exception:
523
  return {}
 
524
  def cancel_processing(self):
525
  self.cancel_event.set()
526
  logger.info("Cancellation requested")
 
527
  def cleanup_resources(self):
528
  try:
529
  self.memory_manager.cleanup_aggressive()
@@ -534,10 +601,13 @@ def cleanup_resources(self):
534
  except Exception:
535
  pass
536
  logger.info("Resources cleaned up")
 
537
  # ── Singleton + thin wrappers (used by UI callbacks) ────────────────────────
538
  processor = VideoProcessor()
 
539
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
540
  return processor.load_models(progress_callback)
 
541
  def process_video_fixed(
542
  video_path: str,
543
  background_choice: str,
@@ -560,38 +630,42 @@ def process_video_fixed(
560
  preview_mask,
561
  preview_greenscreen,
562
  )
 
563
  def get_model_status() -> Dict[str, Any]:
564
  return processor.get_status()
 
565
  def get_cache_status() -> Dict[str, Any]:
566
  return processor.get_status()
 
567
  PROCESS_CANCELLED = processor.cancel_event
 
568
  # ── CLI entrypoint (must exist; app.py imports main) ─────────────────────────
569
  def main():
570
  try:
571
  logger.info("Starting BackgroundFX Pro")
572
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
573
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
574
-
575
  # πŸ”Ή Quiet model self-check (defaults to async; set SELF_CHECK_MODE=sync to block)
576
  if schedule_startup_selfcheck is not None:
577
  try:
578
  schedule_startup_selfcheck(mode=os.getenv("SELF_CHECK_MODE", "async"))
579
  except Exception as e:
580
  logger.error(f"Startup self-check skipped: {e}", exc_info=True)
581
-
582
  # Log model loader type
583
  try:
584
  from models.loaders.model_loader import ModelLoader
585
  logger.info("Using split loader architecture")
586
  except Exception:
587
  logger.info("Using legacy loader")
588
-
589
  # FIXED: Move UI import inside main() to avoid circular dependency
590
  # and add better error handling
591
  try:
592
  # Import here to break circular dependency
593
  from ui import ui_components
594
-
595
  # Now get the create_interface function
596
  if hasattr(ui_components, 'create_interface'):
597
  create_interface = ui_components.create_interface
@@ -599,12 +673,12 @@ def main():
599
  logger.error("create_interface not found in ui_components")
600
  logger.error(f"Available attributes: {dir(ui_components)}")
601
  raise ImportError("create_interface function not found")
602
-
603
  except ImportError as e:
604
  logger.error(f"Failed to import UI components: {e}")
605
  import traceback
606
  traceback.print_exc()
607
-
608
  # Try alternate import method
609
  try:
610
  logger.info("Trying alternate import method...")
@@ -616,7 +690,7 @@ def main():
616
  logger.error(f"Alternate import also failed: {e2}")
617
  logger.info("System initialized but UI unavailable. Exiting.")
618
  return
619
-
620
  # Create and launch the interface
621
  try:
622
  demo = create_interface()
@@ -630,7 +704,7 @@ def main():
630
  logger.error(f"Failed to launch Gradio interface: {e}")
631
  import traceback
632
  traceback.print_exc()
633
-
634
  except Exception as e:
635
  logger.error(f"Fatal error in main: {e}")
636
  import traceback
 
4
  Refactored modular architecture – orchestrates specialised components
5
  """
6
  from __future__ import annotations
7
+
8
  # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
9
  import os
10
  os.environ["OMP_NUM_THREADS"] = "2" # Force valid value early
11
+
12
+ # --- Stable runtime env defaults and optional secret-name mapping ---
13
+ def _map_env(src: str, dst: str):
14
+ v = os.getenv(src)
15
+ if v and not os.getenv(dst):
16
+ os.environ[dst] = v
17
+
18
+ # If HF blocks underscores in secret names, you can add simple names and map them:
19
+ # Add secrets later like: OMPNUMTHREADS=1, TOKENIZERSPARALLELISM=false,
20
+ # PYTORCHCUDAALLOCCONF=max_split_size_mb:128, LOGLEVEL=info, APPENV=production
21
+ _map_env("OMPNUMTHREADS", "OMP_NUM_THREADS")
22
+ _map_env("TOKENIZERSPARALLELISM", "TOKENIZERS_PARALLELISM")
23
+ _map_env("PYTORCHCUDAALLOCCONF", "PYTORCH_CUDA_ALLOC_CONF")
24
+ _map_env("LOGLEVEL", "LOG_LEVEL")
25
+ _map_env("APPENV", "APP_ENV")
26
+
27
+ # Critical defaults (safe even if already set elsewhere)
28
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
29
+ os.environ.setdefault("LOG_LEVEL", "info")
30
+ os.environ.setdefault("APP_ENV", "production")
31
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
32
+
33
  # If you use early_env in your project, keep this import (harmless if absent)
34
  try:
35
+ import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
36
  except Exception:
37
  pass
38
+
39
  import logging
40
  import threading
41
  import traceback
 
43
  import time
44
  from pathlib import Path
45
  from typing import Optional, Tuple, Dict, Any, Callable
46
+
47
  # Mitigate CUDA fragmentation (must be set before importing torch)
48
  if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ:
49
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:128"
50
+
51
  # ── Logging ──────────────────────────────────────────────────────────────────
52
  logging.basicConfig(
53
  level=logging.INFO,
54
  format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
55
  )
56
  logger = logging.getLogger("core.app")
57
+
58
  # ── Ensure project root importable ───────────────────────────────────────────
59
  PROJECT_FILE = Path(__file__).resolve()
60
  CORE_DIR = PROJECT_FILE.parent
61
  ROOT = CORE_DIR.parent
62
  if str(ROOT) not in sys.path:
63
  sys.path.insert(0, str(ROOT))
64
+
65
  # Create loader directories if they don't exist
66
  loaders_dir = ROOT / "models" / "loaders"
67
  loaders_dir.mkdir(parents=True, exist_ok=True)
68
+
69
  # ── Gradio schema patch (HF quirk) ───────────────────────────────────────────
70
  try:
71
  import gradio_client.utils as gc_utils
 
81
  logger.info("Gradio schema patch applied")
82
  except Exception as e:
83
  logger.warning(f"Gradio patch failed: {e}")
84
+
85
  # ── Core config + components ─────────────────────────────────────────────────
86
  try:
87
  from config.app_config import get_config
 
91
  def to_dict(self):
92
  return {}
93
  get_config = lambda: DummyConfig()
94
+
95
  from utils.hardware.device_manager import DeviceManager
96
  from utils.system.memory_manager import MemoryManager
97
+
98
  # Try to import the new split loaders first, fall back to old if needed
99
  try:
100
  from models.loaders.model_loader import ModelLoader
 
102
  except ImportError:
103
  logger.warning("Split loaders not found, using legacy loader")
104
  # Fall back to old loader if split architecture isn't available yet
105
+ from models.model_loader import ModelLoader # type: ignore
106
+
107
  from processing.video.video_processor import CoreVideoProcessor
108
  from processing.audio.audio_processor import AudioProcessor
109
  from utils.monitoring.progress_tracker import ProgressTracker
110
  from utils.cv_processing import validate_video_file
111
+
112
  # ── Optional Two-Stage import ────────────────────────────────────────────────
113
  TWO_STAGE_AVAILABLE = False
114
  TWO_STAGE_IMPORT_ORIGIN = ""
115
  TWO_STAGE_IMPORT_ERROR = ""
116
  CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {"standard": {}}
117
+ TwoStageProcessor = None # type: ignore
118
+
119
  # Try multiple import paths for two-stage processor
120
  two_stage_paths = [
121
+ "processors.two_stage", # Your fixed version
122
  "processing.two_stage.two_stage_processor",
123
  "processing.two_stage",
124
  ]
 
132
  except Exception as e:
133
  TWO_STAGE_IMPORT_ERROR = str(e)
134
  continue
135
+
136
  if not TWO_STAGE_AVAILABLE:
137
  logger.warning(f"Two-stage import FAILED from all paths: {TWO_STAGE_IMPORT_ERROR}")
138
+
139
  # ── Quiet startup self-check (async by default) ──────────────────────────────
140
  # Place the helper in tools/startup_selfcheck.py (with tools/__init__.py present)
141
  try:
142
  from tools.startup_selfcheck import schedule_startup_selfcheck
143
  except Exception:
144
+ schedule_startup_selfcheck = None # graceful if the helper isn't shipped
145
+
146
  # Dummy exceptions if core.exceptions not available
147
  class ModelLoadingError(Exception):
148
  pass
149
  class VideoProcessingError(Exception):
150
  pass
151
+
152
  # ╔══════════════════════════════════════════════════════════════════════════╗
153
  # β•‘ VideoProcessor class β•‘
154
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
 
158
  """
159
  def __init__(self):
160
  self.config = get_config()
161
+ self._patch_config_defaults(self.config) # avoid AttributeError on older configs
162
  self.device_manager = DeviceManager()
163
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
164
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
 
170
  self.cancel_event = threading.Event()
171
  self.progress_tracker: Optional[ProgressTracker] = None
172
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
173
+
174
  # ── Config hardening: add missing fields safely ───────────────────────────
175
  @staticmethod
176
  def _patch_config_defaults(cfg: Any) -> None:
 
194
  if not hasattr(cfg, k):
195
  setattr(cfg, k, v)
196
  Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
197
+
198
  # ── Progress helper ───────────────────────────────────────────────────────
199
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
200
  try:
 
208
  except Exception as e:
209
  logger.warning(f"Progress init failed: {e}")
210
  self.progress_tracker = ProgressTracker(100, cb)
211
+
212
  # ── Model loading ─────────────────────────────────────────────────────────
213
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
214
  with self.loading_lock:
 
223
  )
224
  if self.cancel_event.is_set():
225
  return "Model loading cancelled"
226
+
227
  # Get the actual models
228
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
229
+ mat_model = mat_loaded.model if mat_loaded else None # NOTE: stateful callable adapter
230
+
231
  # Initialize core processor
232
  self.core_processor = CoreVideoProcessor(config=self.config, models=self.model_loader)
233
+
234
  # Initialize two-stage processor if available
235
  self.two_stage_processor = None
236
  if TWO_STAGE_AVAILABLE and TwoStageProcessor and (sam2_predictor or mat_model):
 
242
  except Exception as e:
243
  logger.warning(f"Two-stage init failed: {e}")
244
  self.two_stage_processor = None
245
+
246
  self.models_loaded = True
247
  msg = self.model_loader.get_load_summary()
248
+
249
  # Add status about processors
250
  if self.two_stage_processor:
251
  msg += "\nβœ… Two-stage processor ready"
252
  else:
253
  msg += "\n⚠️ Two-stage processor not available"
254
+
255
  if mat_model:
256
  msg += "\nβœ… MatAnyone refinement active"
257
  else:
258
  msg += "\n⚠️ MatAnyone not loaded (edges may be rough)"
259
+
260
  logger.info(msg)
261
  return msg
262
  except (AttributeError, ModelLoadingError) as e:
 
269
  err = f"Unexpected error during model loading: {e}"
270
  logger.error(f"{err}\n{traceback.format_exc()}")
271
  return err
272
+
273
  # ── Public entry – process video ─────────────────────────────────────────
274
  def process_video(
275
  self,
 
283
  preview_mask: bool = False,
284
  preview_greenscreen: bool = False,
285
  ) -> Tuple[Optional[str], Optional[str], str]:
 
286
  # ===== BACKGROUND PATH DEBUG & FIX =====
287
  logger.info("=" * 60)
288
  logger.info("BACKGROUND PATH DEBUGGING")
289
  logger.info(f"background_choice: {background_choice}")
290
  logger.info(f"custom_background_path type: {type(custom_background_path)}")
291
  logger.info(f"custom_background_path value: {custom_background_path}")
292
+
293
  # Fix 1: Handle if Gradio sends a dict
294
  if isinstance(custom_background_path, dict):
295
  original = custom_background_path
296
  custom_background_path = custom_background_path.get('name') or custom_background_path.get('path')
297
  logger.info(f"Extracted path from dict: {original} -> {custom_background_path}")
298
+
299
  # Fix 2: Handle PIL Image objects
300
  try:
301
  from PIL import Image
 
307
  logger.info(f"Saved PIL Image to: {custom_background_path}")
308
  except ImportError:
309
  pass
310
+
311
  # Fix 3: Verify file exists when using custom background
312
  if background_choice == "custom" or custom_background_path:
313
  if custom_background_path:
 
319
  import glob
320
  patterns = [
321
  "/tmp/gradio*/**/*.jpg",
322
+ "/tmp/gradio*/**/*.jpeg",
323
  "/tmp/gradio*/**/*.png",
324
  "/tmp/**/*.jpg",
325
  "/tmp/**/*.jpeg",
 
338
  break
339
  else:
340
  logger.error("❌ Custom background mode but path is None!")
341
+
342
  logger.info(f"Final custom_background_path: {custom_background_path}")
343
  logger.info("=" * 60)
344
+
345
  if not self.models_loaded or not self.core_processor:
346
  return None, None, "Models not loaded. Please click 'Load Models' first."
347
  if self.cancel_event.is_set():
348
  return None, None, "Processing cancelled"
349
+
350
  self._init_progress(video_path, progress_callback)
351
  ok, why = validate_video_file(video_path)
352
  if not ok:
353
  return None, None, f"Invalid video: {why}"
354
+
355
  try:
356
  # Log which mode we're using
357
  mode = "two-stage" if use_two_stage else "single-stage"
358
  matanyone_status = "enabled" if self.model_loader.get_matanyone() else "disabled"
359
  logger.info(f"Processing video in {mode} mode, MatAnyone: {matanyone_status}")
360
+
361
  # IMPORTANT: start each video with a clean MatAnyone memory
362
  self._reset_matanyone_session()
363
+
364
  if use_two_stage:
365
  if not TWO_STAGE_AVAILABLE or self.two_stage_processor is None:
366
  return None, None, "Two-stage processing not available"
 
389
  except Exception as e:
390
  logger.error(f"Unexpected processing error: {e}\n{traceback.format_exc()}")
391
  return None, None, f"Unexpected error: {e}"
392
+
393
  # ── Private – per-video MatAnyone reset ──────────────────────────────────
394
  def _reset_matanyone_session(self):
395
  """
 
406
  logger.info("MatAnyone session reset for new video")
407
  except Exception as e:
408
  logger.warning(f"MatAnyone session reset failed (continuing): {e}")
409
+
410
  # ── Private – single-stage ───────────────────────────────────────────────
411
  def _process_single_stage(
412
  self,
 
417
  preview_mask: bool,
418
  preview_greenscreen: bool,
419
  ) -> Tuple[Optional[str], Optional[str], str]:
 
420
  # Additional debug logging for single-stage
421
  logger.info(f"[Single-stage] background_choice: {background_choice}")
422
  logger.info(f"[Single-stage] custom_background_path: {custom_background_path}")
423
+
424
  ts = int(time.time())
425
  out_dir = Path(self.config.output_dir) / "single_stage"
426
  out_dir.mkdir(parents=True, exist_ok=True)
427
  out_path = str(out_dir / f"processed_{ts}.mp4")
428
+
429
  # Process video via your CoreVideoProcessor
430
  result = self.core_processor.process_video(
431
  input_path=video_path,
 
436
  },
437
  progress_callback=progress_callback,
438
  )
439
+
440
  if not result:
441
  return None, None, "Video processing failed"
442
+
443
  # Mux audio unless preview-only
444
  if not (preview_mask or preview_greenscreen):
445
  try:
 
451
  final_path = out_path
452
  else:
453
  final_path = out_path
454
+
455
  # Build status message
456
  try:
457
  mat_loaded = bool(self.model_loader.get_matanyone())
 
467
  f"Device: {self.device_manager.get_optimal_device()}"
468
  )
469
  return final_path, None, msg # No green in single-stage
470
+
471
  # ── Private – two-stage ─────────────────────────────────────────────────
472
  def _process_two_stage(
473
  self,
 
480
  ) -> Tuple[Optional[str], Optional[str], str]:
481
  if self.two_stage_processor is None:
482
  return None, None, "Two-stage processor not available"
483
+
484
  # Additional debug logging for two-stage
485
  logger.info(f"[Two-stage] background_choice: {background_choice}")
486
  logger.info(f"[Two-stage] custom_background_path: {custom_background_path}")
487
+
488
  import cv2
489
  cap = cv2.VideoCapture(video_path)
490
  if not cap.isOpened():
 
492
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) or 1280
493
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) or 720
494
  cap.release()
495
+
496
  # Prepare background
497
  try:
498
  background = self.core_processor.prepare_background(
 
503
  return None, None, f"Failed to prepare background: {e}"
504
  if background is None:
505
  return None, None, "Failed to prepare background"
506
+
507
  ts = int(time.time())
508
  out_dir = Path(self.config.output_dir) / "two_stage"
509
  out_dir.mkdir(parents=True, exist_ok=True)
510
  final_out = str(out_dir / f"final_{ts}.mp4")
511
+
512
  chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS.get("standard", {}))
513
  logger.info(f"Two-stage with preset: {chroma_preset} | key_color: {key_color_mode}")
514
+
515
  # (Per-video reset already called in process_video)
516
  final_path, green_path, stage2_msg = self.two_stage_processor.process_full_pipeline(
517
  video_path,
 
523
  )
524
  if final_path is None:
525
  return None, None, stage2_msg
526
+
527
  # Mux audio
528
  try:
529
  final_with_audio = self.audio_processor.add_audio_to_video(
 
532
  except Exception as e:
533
  logger.warning(f"Audio mux failed: {e}")
534
  final_with_audio = final_path
535
+
536
  try:
537
  mat_loaded = bool(self.model_loader.get_matanyone())
538
  except Exception:
 
546
  f"Device: {self.device_manager.get_optimal_device()}"
547
  )
548
  return final_with_audio, green_path, msg
549
+
550
  # ── Status helpers ───────────────────────────────────────────────────────
551
  def get_status(self) -> Dict[str, Any]:
552
  status = {
 
558
  "config": self._safe_config_dict(),
559
  "memory_usage": self._safe_memory_usage(),
560
  }
561
+
562
  try:
563
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
564
  status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
 
566
  except Exception:
567
  status["sam2_loaded"] = False
568
  status["matanyone_loaded"] = False
569
+
570
  if self.progress_tracker:
571
  status["progress"] = self.progress_tracker.get_all_progress()
572
+
573
  return status
574
+
575
  def _safe_config_dict(self) -> Dict[str, Any]:
576
  try:
577
  return self.config.to_dict()
 
580
  "ffmpeg_path", "max_model_size", "max_model_size_bytes",
581
  "output_dir", "matanyone_enabled"]
582
  return {k: getattr(self.config, k, None) for k in keys}
583
+
584
  def _safe_memory_usage(self) -> Dict[str, Any]:
585
  try:
586
  return self.memory_manager.get_memory_usage()
587
  except Exception:
588
  return {}
589
+
590
  def cancel_processing(self):
591
  self.cancel_event.set()
592
  logger.info("Cancellation requested")
593
+
594
  def cleanup_resources(self):
595
  try:
596
  self.memory_manager.cleanup_aggressive()
 
601
  except Exception:
602
  pass
603
  logger.info("Resources cleaned up")
604
+
605
  # ── Singleton + thin wrappers (used by UI callbacks) ────────────────────────
606
  processor = VideoProcessor()
607
+
608
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
609
  return processor.load_models(progress_callback)
610
+
611
  def process_video_fixed(
612
  video_path: str,
613
  background_choice: str,
 
630
  preview_mask,
631
  preview_greenscreen,
632
  )
633
+
634
  def get_model_status() -> Dict[str, Any]:
635
  return processor.get_status()
636
+
637
  def get_cache_status() -> Dict[str, Any]:
638
  return processor.get_status()
639
+
640
  PROCESS_CANCELLED = processor.cancel_event
641
+
642
  # ── CLI entrypoint (must exist; app.py imports main) ─────────────────────────
643
  def main():
644
  try:
645
  logger.info("Starting BackgroundFX Pro")
646
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
647
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
648
+
649
  # πŸ”Ή Quiet model self-check (defaults to async; set SELF_CHECK_MODE=sync to block)
650
  if schedule_startup_selfcheck is not None:
651
  try:
652
  schedule_startup_selfcheck(mode=os.getenv("SELF_CHECK_MODE", "async"))
653
  except Exception as e:
654
  logger.error(f"Startup self-check skipped: {e}", exc_info=True)
655
+
656
  # Log model loader type
657
  try:
658
  from models.loaders.model_loader import ModelLoader
659
  logger.info("Using split loader architecture")
660
  except Exception:
661
  logger.info("Using legacy loader")
662
+
663
  # FIXED: Move UI import inside main() to avoid circular dependency
664
  # and add better error handling
665
  try:
666
  # Import here to break circular dependency
667
  from ui import ui_components
668
+
669
  # Now get the create_interface function
670
  if hasattr(ui_components, 'create_interface'):
671
  create_interface = ui_components.create_interface
 
673
  logger.error("create_interface not found in ui_components")
674
  logger.error(f"Available attributes: {dir(ui_components)}")
675
  raise ImportError("create_interface function not found")
676
+
677
  except ImportError as e:
678
  logger.error(f"Failed to import UI components: {e}")
679
  import traceback
680
  traceback.print_exc()
681
+
682
  # Try alternate import method
683
  try:
684
  logger.info("Trying alternate import method...")
 
690
  logger.error(f"Alternate import also failed: {e2}")
691
  logger.info("System initialized but UI unavailable. Exiting.")
692
  return
693
+
694
  # Create and launch the interface
695
  try:
696
  demo = create_interface()
 
704
  logger.error(f"Failed to launch Gradio interface: {e}")
705
  import traceback
706
  traceback.print_exc()
707
+
708
  except Exception as e:
709
  logger.error(f"Fatal error in main: {e}")
710
  import traceback