MogensR commited on
Commit
138b104
Β·
1 Parent(s): acad788

Update core/app.py

Browse files
Files changed (1) hide show
  1. core/app.py +44 -139
core/app.py CHANGED
@@ -3,58 +3,30 @@
3
  BackgroundFX Pro – Main Application Entry Point
4
  Refactored modular architecture – orchestrates specialised components
5
  """
6
-
7
  from __future__ import annotations
8
-
9
  # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
10
  import os
11
  if not os.environ.get("OMP_NUM_THREADS", "").isdigit():
12
  os.environ["OMP_NUM_THREADS"] = "2"
13
-
14
  # If you use early_env in your project, keep this import (harmless if absent)
15
  try:
16
- import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
17
  except Exception:
18
  pass
19
-
20
  import logging
21
  import threading
22
  import traceback
23
  import sys
24
  from pathlib import Path
25
- from typing import Optional, Tuple, Dict, Any, Callable
26
-
27
- # Mitigate CUDA fragmentation (must be set before importing torch)
28
- if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ:
29
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:128"
30
-
31
-
32
- # ── Logging ──────────────────────────────────────────────────────────────────
33
- logging.basicConfig(
34
- level=logging.INFO,
35
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
36
- )
37
- logger = logging.getLogger("core.app")
38
-
39
- # ── Ensure project root importable ───────────────────────────────────────────
40
- PROJECT_FILE = Path(__file__).resolve()
41
- CORE_DIR = PROJECT_FILE.parent
42
- ROOT = CORE_DIR.parent
43
- if str(ROOT) not in sys.path:
44
- sys.path.insert(0, str(ROOT))
45
-
46
- # Create loader directories if they don't exist
47
- loaders_dir = ROOT / "models" / "loaders"
48
- loaders_dir.mkdir(parents=True, exist_ok=True)
49
-
50
  # ── Gradio schema patch (HF quirk) ───────────────────────────────────────────
51
  try:
52
  import gradio_client.utils as gc_utils
53
  _orig_get_type = gc_utils.get_type
54
  def _patched_get_type(schema):
55
  if not isinstance(schema, dict):
56
- if isinstance(schema, bool): return "boolean"
57
- if isinstance(schema, str): return "string"
58
  if isinstance(schema, (int, float)): return "number"
59
  return "string"
60
  return _orig_get_type(schema)
@@ -62,13 +34,9 @@ def _patched_get_type(schema):
62
  logger.info("Gradio schema patch applied")
63
  except Exception as e:
64
  logger.warning(f"Gradio patch failed: {e}")
65
-
66
  # ── Core config + components ─────────────────────────────────────────────────
67
- from config.app_config import get_config
68
- from core.exceptions import ModelLoadingError, VideoProcessingError
69
  from utils.hardware.device_manager import DeviceManager
70
  from utils.system.memory_manager import MemoryManager
71
-
72
  # Try to import the new split loaders first, fall back to old if needed
73
  try:
74
  from models.loaders.model_loader import ModelLoader
@@ -76,77 +44,63 @@ def _patched_get_type(schema):
76
  except ImportError:
77
  logger.warning("Split loaders not found, using legacy loader")
78
  # Fall back to old loader if split architecture isn't available yet
79
- from models.model_loader import ModelLoader # type: ignore
80
-
81
  from processing.video.video_processor import CoreVideoProcessor
82
  from processing.audio.audio_processor import AudioProcessor
83
  from utils.monitoring.progress_tracker import ProgressTracker
84
- from utils.cv_processing import validate_video_file
85
-
86
  # ── Optional Two-Stage import ────────────────────────────────────────────────
87
  TWO_STAGE_AVAILABLE = False
88
  TWO_STAGE_IMPORT_ORIGIN = ""
89
  TWO_STAGE_IMPORT_ERROR = ""
90
  CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {"standard": {}}
91
- TwoStageProcessor = None # type: ignore
92
-
93
  # Try multiple import paths for two-stage processor
94
  two_stage_paths = [
95
- "processors.two_stage", # Your fixed version
96
  "processing.two_stage.two_stage_processor",
97
  "processing.two_stage",
98
  ]
99
-
100
  for import_path in two_stage_paths:
101
  try:
102
  if "processors" in import_path:
103
- from processors.two_stage import TwoStageProcessor, CHROMA_PRESETS # type: ignore
104
  else:
105
- from processing.two_stage.two_stage_processor import TwoStageProcessor, CHROMA_PRESETS # type: ignore
106
  TWO_STAGE_AVAILABLE = True
107
  TWO_STAGE_IMPORT_ORIGIN = import_path
108
  logger.info(f"Two-stage import OK ({import_path})")
109
  break
110
  except Exception:
111
  continue
112
-
113
  if not TWO_STAGE_AVAILABLE:
114
  logger.warning("Two-stage import FAILED from all paths")
115
-
116
  # ── Quiet startup self-check (async by default) ──────────────────────────────
117
  # Place the helper in tools/startup_selfcheck.py (with tools/__init__.py present)
118
  try:
119
  from tools.startup_selfcheck import schedule_startup_selfcheck
120
  except Exception:
121
- schedule_startup_selfcheck = None # graceful if the helper isn't shipped
122
-
123
  # ╔══════════════════════════════════════════════════════════════════════════╗
124
- # β•‘ VideoProcessor class β•‘
125
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
126
  class VideoProcessor:
127
  """
128
  Main orchestrator – coordinates all specialised components.
129
  """
130
-
131
  def __init__(self):
132
  self.config = get_config()
133
- self._patch_config_defaults(self.config) # avoid AttributeError on older configs
134
-
135
  self.device_manager = DeviceManager()
136
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
137
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
138
-
139
  self.audio_processor = AudioProcessor()
140
  self.core_processor: Optional[CoreVideoProcessor] = None
141
  self.two_stage_processor: Optional[Any] = None
142
-
143
  self.models_loaded = False
144
  self.loading_lock = threading.Lock()
145
  self.cancel_event = threading.Event()
146
  self.progress_tracker: Optional[ProgressTracker] = None
147
-
148
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
149
-
150
  # ── Config hardening: add missing fields safely ───────────────────────────
151
  @staticmethod
152
  def _patch_config_defaults(cfg: Any) -> None:
@@ -170,7 +124,6 @@ def _patch_config_defaults(cfg: Any) -> None:
170
  if not hasattr(cfg, k):
171
  setattr(cfg, k, v)
172
  Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
173
-
174
  # ── Progress helper ───────────────────────────────────────────────────────
175
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
176
  try:
@@ -184,31 +137,25 @@ def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
184
  except Exception as e:
185
  logger.warning(f"Progress init failed: {e}")
186
  self.progress_tracker = ProgressTracker(100, cb)
187
-
188
  # ── Model loading ─────────────────────────────────────────────────────────
189
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
190
  with self.loading_lock:
191
  if self.models_loaded:
192
  return "Models already loaded and validated"
193
-
194
  try:
195
  self.cancel_event.clear()
196
  if progress_callback:
197
  progress_callback(0.0, f"Loading on {self.device_manager.get_optimal_device()}")
198
-
199
  sam2_loaded, mat_loaded = self.model_loader.load_all_models(
200
  progress_callback=progress_callback, cancel_event=self.cancel_event
201
  )
202
  if self.cancel_event.is_set():
203
  return "Model loading cancelled"
204
-
205
  # Get the actual models
206
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
207
- mat_model = mat_loaded.model if mat_loaded else None # NOTE: in our MatAnyone loader this is a stateful adapter (callable)
208
-
209
  # Initialize core processor
210
  self.core_processor = CoreVideoProcessor(config=self.config, models=self.model_loader)
211
-
212
  # Initialize two-stage processor if available
213
  self.two_stage_processor = None
214
  if TWO_STAGE_AVAILABLE and TwoStageProcessor and (sam2_predictor or mat_model):
@@ -220,24 +167,22 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
220
  except Exception as e:
221
  logger.warning(f"Two-stage init failed: {e}")
222
  self.two_stage_processor = None
223
-
224
  self.models_loaded = True
225
  msg = self.model_loader.get_load_summary()
226
-
227
  # Add status about processors
228
  if self.two_stage_processor:
229
  msg += "\nβœ… Two-stage processor ready"
230
  else:
231
  msg += "\n⚠️ Two-stage processor not available"
232
-
233
  if mat_model:
234
  msg += "\nβœ… MatAnyone refinement active"
235
  else:
236
  msg += "\n⚠️ MatAnyone not loaded (edges may be rough)"
237
-
238
  logger.info(msg)
239
  return msg
240
-
241
  except (AttributeError, ModelLoadingError) as e:
242
  self.models_loaded = False
243
  err = f"Model loading failed: {e}"
@@ -248,7 +193,6 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
248
  err = f"Unexpected error during model loading: {e}"
249
  logger.error(f"{err}\n{traceback.format_exc()}")
250
  return err
251
-
252
  # ── Public entry – process video ─────────────────────────────────────────
253
  def process_video(
254
  self,
@@ -261,30 +205,25 @@ def process_video(
261
  key_color_mode: str = "auto",
262
  preview_mask: bool = False,
263
  preview_greenscreen: bool = False,
264
- ) -> Tuple[Optional[str], str]:
265
  if not self.models_loaded or not self.core_processor:
266
- return None, "Models not loaded. Please click 'Load Models' first."
267
  if self.cancel_event.is_set():
268
- return None, "Processing cancelled"
269
-
270
  self._init_progress(video_path, progress_callback)
271
-
272
  ok, why = validate_video_file(video_path)
273
  if not ok:
274
- return None, f"Invalid video: {why}"
275
-
276
  try:
277
  # Log which mode we're using
278
  mode = "two-stage" if use_two_stage else "single-stage"
279
  matanyone_status = "enabled" if self.model_loader.get_matanyone() else "disabled"
280
  logger.info(f"Processing video in {mode} mode, MatAnyone: {matanyone_status}")
281
-
282
  # IMPORTANT: start each video with a clean MatAnyone memory
283
  self._reset_matanyone_session()
284
-
285
  if use_two_stage:
286
  if not TWO_STAGE_AVAILABLE or self.two_stage_processor is None:
287
- return None, "Two-stage processing not available"
288
  return self._process_two_stage(
289
  video_path,
290
  background_choice,
@@ -302,14 +241,12 @@ def process_video(
302
  preview_mask,
303
  preview_greenscreen,
304
  )
305
-
306
  except VideoProcessingError as e:
307
  logger.error(f"Processing failed: {e}")
308
- return None, f"Processing failed: {e}"
309
  except Exception as e:
310
  logger.error(f"Unexpected processing error: {e}\n{traceback.format_exc()}")
311
- return None, f"Unexpected error: {e}"
312
-
313
  # ── Private – per-video MatAnyone reset ──────────────────────────────────
314
  def _reset_matanyone_session(self):
315
  """
@@ -326,7 +263,6 @@ def _reset_matanyone_session(self):
326
  logger.info("MatAnyone session reset for new video")
327
  except Exception as e:
328
  logger.warning(f"MatAnyone session reset failed (continuing): {e}")
329
-
330
  # ── Private – single-stage ───────────────────────────────────────────────
331
  def _process_single_stage(
332
  self,
@@ -336,13 +272,12 @@ def _process_single_stage(
336
  progress_callback: Optional[Callable],
337
  preview_mask: bool,
338
  preview_greenscreen: bool,
339
- ) -> Tuple[Optional[str], str]:
340
  import time
341
  ts = int(time.time())
342
  out_dir = Path(self.config.output_dir) / "single_stage"
343
  out_dir.mkdir(parents=True, exist_ok=True)
344
  out_path = str(out_dir / f"processed_{ts}.mp4")
345
-
346
  # Process video via your CoreVideoProcessor
347
  result = self.core_processor.process_video(
348
  input_path=video_path,
@@ -353,10 +288,9 @@ def _process_single_stage(
353
  },
354
  progress_callback=progress_callback,
355
  )
356
-
357
  if not result:
358
- return None, "Video processing failed"
359
-
360
  # Mux audio unless preview-only
361
  if not (preview_mask or preview_greenscreen):
362
  try:
@@ -368,7 +302,6 @@ def _process_single_stage(
368
  final_path = out_path
369
  else:
370
  final_path = out_path
371
-
372
  # Build status message
373
  try:
374
  mat_loaded = bool(self.model_loader.get_matanyone())
@@ -383,8 +316,7 @@ def _process_single_stage(
383
  f"MatAnyone: {matanyone_status}\n"
384
  f"Device: {self.device_manager.get_optimal_device()}"
385
  )
386
- return final_path, msg
387
-
388
  # ── Private – two-stage ─────────────────────────────────────────────────
389
  def _process_two_stage(
390
  self,
@@ -394,18 +326,16 @@ def _process_two_stage(
394
  progress_callback: Optional[Callable],
395
  chroma_preset: str,
396
  key_color_mode: str,
397
- ) -> Tuple[Optional[str], str]:
398
  if self.two_stage_processor is None:
399
- return None, "Two-stage processor not available"
400
-
401
  import cv2, time
402
  cap = cv2.VideoCapture(video_path)
403
  if not cap.isOpened():
404
- return None, "Could not open input video"
405
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) or 1280
406
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) or 720
407
  cap.release()
408
-
409
  # Prepare background
410
  try:
411
  background = self.core_processor.prepare_background(
@@ -413,20 +343,17 @@ def _process_two_stage(
413
  )
414
  except Exception as e:
415
  logger.error(f"Background preparation failed: {e}")
416
- return None, f"Failed to prepare background: {e}"
417
  if background is None:
418
- return None, "Failed to prepare background"
419
-
420
  ts = int(time.time())
421
  out_dir = Path(self.config.output_dir) / "two_stage"
422
  out_dir.mkdir(parents=True, exist_ok=True)
423
  final_out = str(out_dir / f"final_{ts}.mp4")
424
-
425
  chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS.get("standard", {}))
426
  logger.info(f"Two-stage with preset: {chroma_preset} | key_color: {key_color_mode}")
427
-
428
  # (Per-video reset already called in process_video)
429
- result, message = self.two_stage_processor.process_full_pipeline(
430
  video_path,
431
  background,
432
  final_out,
@@ -434,18 +361,16 @@ def _process_two_stage(
434
  chroma_settings=chroma_cfg,
435
  progress_callback=progress_callback,
436
  )
437
- if result is None:
438
- return None, message
439
-
440
  # Mux audio
441
  try:
442
- final_path = self.audio_processor.add_audio_to_video(
443
- original_video=video_path, processed_video=result
444
  )
445
  except Exception as e:
446
  logger.warning(f"Audio mux failed: {e}")
447
- final_path = result
448
-
449
  try:
450
  mat_loaded = bool(self.model_loader.get_matanyone())
451
  except Exception:
@@ -458,8 +383,7 @@ def _process_two_stage(
458
  f"MatAnyone: {matanyone_status}\n"
459
  f"Device: {self.device_manager.get_optimal_device()}"
460
  )
461
- return final_path, msg
462
-
463
  # ── Status helpers ───────────────────────────────────────────────────────
464
  def get_status(self) -> Dict[str, Any]:
465
  status = {
@@ -471,7 +395,7 @@ def get_status(self) -> Dict[str, Any]:
471
  "config": self._safe_config_dict(),
472
  "memory_usage": self._safe_memory_usage(),
473
  }
474
-
475
  try:
476
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
477
  status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
@@ -479,31 +403,26 @@ def get_status(self) -> Dict[str, Any]:
479
  except Exception:
480
  status["sam2_loaded"] = False
481
  status["matanyone_loaded"] = False
482
-
483
  if self.progress_tracker:
484
  status["progress"] = self.progress_tracker.get_all_progress()
485
-
486
  return status
487
-
488
  def _safe_config_dict(self) -> Dict[str, Any]:
489
  try:
490
  return self.config.to_dict()
491
  except Exception:
492
  keys = ["use_nvenc", "prefer_mp4", "video_codec", "audio_copy",
493
- "ffmpeg_path", "max_model_size", "max_model_size_bytes",
494
  "output_dir", "matanyone_enabled"]
495
  return {k: getattr(self.config, k, None) for k in keys}
496
-
497
  def _safe_memory_usage(self) -> Dict[str, Any]:
498
  try:
499
  return self.memory_manager.get_memory_usage()
500
  except Exception:
501
  return {}
502
-
503
  def cancel_processing(self):
504
  self.cancel_event.set()
505
  logger.info("Cancellation requested")
506
-
507
  def cleanup_resources(self):
508
  try:
509
  self.memory_manager.cleanup_aggressive()
@@ -514,14 +433,10 @@ def cleanup_resources(self):
514
  except Exception:
515
  pass
516
  logger.info("Resources cleaned up")
517
-
518
-
519
  # ── Singleton + thin wrappers (used by UI callbacks) ────────────────────────
520
  processor = VideoProcessor()
521
-
522
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
523
  return processor.load_models(progress_callback)
524
-
525
  def process_video_fixed(
526
  video_path: str,
527
  background_choice: str,
@@ -532,7 +447,7 @@ def process_video_fixed(
532
  key_color_mode: str = "auto",
533
  preview_mask: bool = False,
534
  preview_greenscreen: bool = False,
535
- ) -> Tuple[Optional[str], str]:
536
  return processor.process_video(
537
  video_path,
538
  background_choice,
@@ -544,37 +459,29 @@ def process_video_fixed(
544
  preview_mask,
545
  preview_greenscreen,
546
  )
547
-
548
  def get_model_status() -> Dict[str, Any]:
549
  return processor.get_status()
550
-
551
  def get_cache_status() -> Dict[str, Any]:
552
  return processor.get_status()
553
-
554
  PROCESS_CANCELLED = processor.cancel_event
555
-
556
-
557
  # ── CLI entrypoint (must exist; app.py imports main) ─────────────────────────
558
  def main():
559
  try:
560
  logger.info("Starting BackgroundFX Pro")
561
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
562
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
563
-
564
  # πŸ”Ή Quiet model self-check (defaults to async; set SELF_CHECK_MODE=sync to block)
565
  if schedule_startup_selfcheck is not None:
566
  try:
567
  schedule_startup_selfcheck(mode=os.getenv("SELF_CHECK_MODE", "async"))
568
  except Exception as e:
569
  logger.error(f"Startup self-check skipped: {e}", exc_info=True)
570
-
571
  # Log model loader type
572
  try:
573
  from models.loaders.model_loader import ModelLoader
574
  logger.info("Using split loader architecture")
575
  except Exception:
576
  logger.info("Using legacy loader")
577
-
578
  from ui.ui_components import create_interface
579
  demo = create_interface()
580
  demo.queue().launch(
@@ -585,7 +492,5 @@ def main():
585
  )
586
  finally:
587
  processor.cleanup_resources()
588
-
589
-
590
  if __name__ == "__main__":
591
- main()
 
3
  BackgroundFX Pro – Main Application Entry Point
4
  Refactored modular architecture – orchestrates specialised components
5
  """
 
6
  from __future__ import annotations
 
7
  # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
8
  import os
9
  if not os.environ.get("OMP_NUM_THREADS", "").isdigit():
10
  os.environ["OMP_NUM_THREADS"] = "2"
 
11
  # If you use early_env in your project, keep this import (harmless if absent)
12
  try:
13
+ import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
14
  except Exception:
15
  pass
 
16
  import logging
17
  import threading
18
  import traceback
19
  import sys
20
  from pathlib import Path
21
+ from utils.cv_processing import validate_video_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  # ── Gradio schema patch (HF quirk) ───────────────────────────────────────────
23
  try:
24
  import gradio_client.utils as gc_utils
25
  _orig_get_type = gc_utils.get_type
26
  def _patched_get_type(schema):
27
  if not isinstance(schema, dict):
28
+ if isinstance(schema, bool): return "boolean"
29
+ if isinstance(schema, str): return "string"
30
  if isinstance(schema, (int, float)): return "number"
31
  return "string"
32
  return _orig_get_type(schema)
 
34
  logger.info("Gradio schema patch applied")
35
  except Exception as e:
36
  logger.warning(f"Gradio patch failed: {e}")
 
37
  # ── Core config + components ─────────────────────────────────────────────────
 
 
38
  from utils.hardware.device_manager import DeviceManager
39
  from utils.system.memory_manager import MemoryManager
 
40
  # Try to import the new split loaders first, fall back to old if needed
41
  try:
42
  from models.loaders.model_loader import ModelLoader
 
44
  except ImportError:
45
  logger.warning("Split loaders not found, using legacy loader")
46
  # Fall back to old loader if split architecture isn't available yet
47
+ from models.model_loader import ModelLoader # type: ignore
 
48
  from processing.video.video_processor import CoreVideoProcessor
49
  from processing.audio.audio_processor import AudioProcessor
50
  from utils.monitoring.progress_tracker import ProgressTracker
 
 
51
  # ── Optional Two-Stage import ────────────────────────────────────────────────
52
  TWO_STAGE_AVAILABLE = False
53
  TWO_STAGE_IMPORT_ORIGIN = ""
54
  TWO_STAGE_IMPORT_ERROR = ""
55
  CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {"standard": {}}
56
+ TwoStageProcessor = None # type: ignore
 
57
  # Try multiple import paths for two-stage processor
58
  two_stage_paths = [
59
+ "processors.two_stage", # Your fixed version
60
  "processing.two_stage.two_stage_processor",
61
  "processing.two_stage",
62
  ]
 
63
  for import_path in two_stage_paths:
64
  try:
65
  if "processors" in import_path:
66
+ from processors.two_stage import TwoStageProcessor, CHROMA_PRESETS # type: ignore
67
  else:
68
+ from processing.two_stage.two_stage_processor import TwoStageProcessor, CHROMA_PRESETS # type: ignore
69
  TWO_STAGE_AVAILABLE = True
70
  TWO_STAGE_IMPORT_ORIGIN = import_path
71
  logger.info(f"Two-stage import OK ({import_path})")
72
  break
73
  except Exception:
74
  continue
 
75
  if not TWO_STAGE_AVAILABLE:
76
  logger.warning("Two-stage import FAILED from all paths")
 
77
  # ── Quiet startup self-check (async by default) ──────────────────────────────
78
  # Place the helper in tools/startup_selfcheck.py (with tools/__init__.py present)
79
  try:
80
  from tools.startup_selfcheck import schedule_startup_selfcheck
81
  except Exception:
82
+ schedule_startup_selfcheck = None # graceful if the helper isn't shipped
 
83
  # ╔══════════════════════════════════════════════════════════════════════════╗
84
+ # β•‘ VideoProcessor class β•‘
85
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
86
  class VideoProcessor:
87
  """
88
  Main orchestrator – coordinates all specialised components.
89
  """
 
90
  def __init__(self):
91
  self.config = get_config()
92
+ self._patch_config_defaults(self.config) # avoid AttributeError on older configs
 
93
  self.device_manager = DeviceManager()
94
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
95
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
 
96
  self.audio_processor = AudioProcessor()
97
  self.core_processor: Optional[CoreVideoProcessor] = None
98
  self.two_stage_processor: Optional[Any] = None
 
99
  self.models_loaded = False
100
  self.loading_lock = threading.Lock()
101
  self.cancel_event = threading.Event()
102
  self.progress_tracker: Optional[ProgressTracker] = None
 
103
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
 
104
  # ── Config hardening: add missing fields safely ───────────────────────────
105
  @staticmethod
106
  def _patch_config_defaults(cfg: Any) -> None:
 
124
  if not hasattr(cfg, k):
125
  setattr(cfg, k, v)
126
  Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
 
127
  # ── Progress helper ───────────────────────────────────────────────────────
128
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
129
  try:
 
137
  except Exception as e:
138
  logger.warning(f"Progress init failed: {e}")
139
  self.progress_tracker = ProgressTracker(100, cb)
 
140
  # ── Model loading ─────────────────────────────────────────────────────────
141
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
142
  with self.loading_lock:
143
  if self.models_loaded:
144
  return "Models already loaded and validated"
 
145
  try:
146
  self.cancel_event.clear()
147
  if progress_callback:
148
  progress_callback(0.0, f"Loading on {self.device_manager.get_optimal_device()}")
 
149
  sam2_loaded, mat_loaded = self.model_loader.load_all_models(
150
  progress_callback=progress_callback, cancel_event=self.cancel_event
151
  )
152
  if self.cancel_event.is_set():
153
  return "Model loading cancelled"
 
154
  # Get the actual models
155
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
156
+ mat_model = mat_loaded.model if mat_loaded else None # NOTE: in our MatAnyone loader this is a stateful adapter (callable)
 
157
  # Initialize core processor
158
  self.core_processor = CoreVideoProcessor(config=self.config, models=self.model_loader)
 
159
  # Initialize two-stage processor if available
160
  self.two_stage_processor = None
161
  if TWO_STAGE_AVAILABLE and TwoStageProcessor and (sam2_predictor or mat_model):
 
167
  except Exception as e:
168
  logger.warning(f"Two-stage init failed: {e}")
169
  self.two_stage_processor = None
 
170
  self.models_loaded = True
171
  msg = self.model_loader.get_load_summary()
172
+
173
  # Add status about processors
174
  if self.two_stage_processor:
175
  msg += "\nβœ… Two-stage processor ready"
176
  else:
177
  msg += "\n⚠️ Two-stage processor not available"
178
+
179
  if mat_model:
180
  msg += "\nβœ… MatAnyone refinement active"
181
  else:
182
  msg += "\n⚠️ MatAnyone not loaded (edges may be rough)"
183
+
184
  logger.info(msg)
185
  return msg
 
186
  except (AttributeError, ModelLoadingError) as e:
187
  self.models_loaded = False
188
  err = f"Model loading failed: {e}"
 
193
  err = f"Unexpected error during model loading: {e}"
194
  logger.error(f"{err}\n{traceback.format_exc()}")
195
  return err
 
196
  # ── Public entry – process video ─────────────────────────────────────────
197
  def process_video(
198
  self,
 
205
  key_color_mode: str = "auto",
206
  preview_mask: bool = False,
207
  preview_greenscreen: bool = False,
208
+ ) -> Tuple[Optional[str], Optional[str], str]:
209
  if not self.models_loaded or not self.core_processor:
210
+ return None, None, "Models not loaded. Please click 'Load Models' first."
211
  if self.cancel_event.is_set():
212
+ return None, None, "Processing cancelled"
 
213
  self._init_progress(video_path, progress_callback)
 
214
  ok, why = validate_video_file(video_path)
215
  if not ok:
216
+ return None, None, f"Invalid video: {why}"
 
217
  try:
218
  # Log which mode we're using
219
  mode = "two-stage" if use_two_stage else "single-stage"
220
  matanyone_status = "enabled" if self.model_loader.get_matanyone() else "disabled"
221
  logger.info(f"Processing video in {mode} mode, MatAnyone: {matanyone_status}")
 
222
  # IMPORTANT: start each video with a clean MatAnyone memory
223
  self._reset_matanyone_session()
 
224
  if use_two_stage:
225
  if not TWO_STAGE_AVAILABLE or self.two_stage_processor is None:
226
+ return None, None, "Two-stage processing not available"
227
  return self._process_two_stage(
228
  video_path,
229
  background_choice,
 
241
  preview_mask,
242
  preview_greenscreen,
243
  )
 
244
  except VideoProcessingError as e:
245
  logger.error(f"Processing failed: {e}")
246
+ return None, None, f"Processing failed: {e}"
247
  except Exception as e:
248
  logger.error(f"Unexpected processing error: {e}\n{traceback.format_exc()}")
249
+ return None, None, f"Unexpected error: {e}"
 
250
  # ── Private – per-video MatAnyone reset ──────────────────────────────────
251
  def _reset_matanyone_session(self):
252
  """
 
263
  logger.info("MatAnyone session reset for new video")
264
  except Exception as e:
265
  logger.warning(f"MatAnyone session reset failed (continuing): {e}")
 
266
  # ── Private – single-stage ───────────────────────────────────────────────
267
  def _process_single_stage(
268
  self,
 
272
  progress_callback: Optional[Callable],
273
  preview_mask: bool,
274
  preview_greenscreen: bool,
275
+ ) -> Tuple[Optional[str], Optional[str], str]:
276
  import time
277
  ts = int(time.time())
278
  out_dir = Path(self.config.output_dir) / "single_stage"
279
  out_dir.mkdir(parents=True, exist_ok=True)
280
  out_path = str(out_dir / f"processed_{ts}.mp4")
 
281
  # Process video via your CoreVideoProcessor
282
  result = self.core_processor.process_video(
283
  input_path=video_path,
 
288
  },
289
  progress_callback=progress_callback,
290
  )
291
+
292
  if not result:
293
+ return None, None, "Video processing failed"
 
294
  # Mux audio unless preview-only
295
  if not (preview_mask or preview_greenscreen):
296
  try:
 
302
  final_path = out_path
303
  else:
304
  final_path = out_path
 
305
  # Build status message
306
  try:
307
  mat_loaded = bool(self.model_loader.get_matanyone())
 
316
  f"MatAnyone: {matanyone_status}\n"
317
  f"Device: {self.device_manager.get_optimal_device()}"
318
  )
319
+ return final_path, None, msg # No green in single-stage
 
320
  # ── Private – two-stage ─────────────────────────────────────────────────
321
  def _process_two_stage(
322
  self,
 
326
  progress_callback: Optional[Callable],
327
  chroma_preset: str,
328
  key_color_mode: str,
329
+ ) -> Tuple[Optional[str], Optional[str], str]:
330
  if self.two_stage_processor is None:
331
+ return None, None, "Two-stage processor not available"
 
332
  import cv2, time
333
  cap = cv2.VideoCapture(video_path)
334
  if not cap.isOpened():
335
+ return None, None, "Could not open input video"
336
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) or 1280
337
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) or 720
338
  cap.release()
 
339
  # Prepare background
340
  try:
341
  background = self.core_processor.prepare_background(
 
343
  )
344
  except Exception as e:
345
  logger.error(f"Background preparation failed: {e}")
346
+ return None, None, f"Failed to prepare background: {e}"
347
  if background is None:
348
+ return None, None, "Failed to prepare background"
 
349
  ts = int(time.time())
350
  out_dir = Path(self.config.output_dir) / "two_stage"
351
  out_dir.mkdir(parents=True, exist_ok=True)
352
  final_out = str(out_dir / f"final_{ts}.mp4")
 
353
  chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS.get("standard", {}))
354
  logger.info(f"Two-stage with preset: {chroma_preset} | key_color: {key_color_mode}")
 
355
  # (Per-video reset already called in process_video)
356
+ final_path, green_path, stage2_msg = self.two_stage_processor.process_full_pipeline(
357
  video_path,
358
  background,
359
  final_out,
 
361
  chroma_settings=chroma_cfg,
362
  progress_callback=progress_callback,
363
  )
364
+ if final_path is None:
365
+ return None, None, stage2_msg
 
366
  # Mux audio
367
  try:
368
+ final_with_audio = self.audio_processor.add_audio_to_video(
369
+ original_video=video_path, processed_video=final_path
370
  )
371
  except Exception as e:
372
  logger.warning(f"Audio mux failed: {e}")
373
+ final_with_audio = final_path
 
374
  try:
375
  mat_loaded = bool(self.model_loader.get_matanyone())
376
  except Exception:
 
383
  f"MatAnyone: {matanyone_status}\n"
384
  f"Device: {self.device_manager.get_optimal_device()}"
385
  )
386
+ return final_with_audio, green_path, msg
 
387
  # ── Status helpers ───────────────────────────────────────────────────────
388
  def get_status(self) -> Dict[str, Any]:
389
  status = {
 
395
  "config": self._safe_config_dict(),
396
  "memory_usage": self._safe_memory_usage(),
397
  }
398
+
399
  try:
400
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
401
  status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
 
403
  except Exception:
404
  status["sam2_loaded"] = False
405
  status["matanyone_loaded"] = False
 
406
  if self.progress_tracker:
407
  status["progress"] = self.progress_tracker.get_all_progress()
408
+
409
  return status
 
410
  def _safe_config_dict(self) -> Dict[str, Any]:
411
  try:
412
  return self.config.to_dict()
413
  except Exception:
414
  keys = ["use_nvenc", "prefer_mp4", "video_codec", "audio_copy",
415
+ "ffmpeg_path", "max_model_size", "max_model_size_bytes",
416
  "output_dir", "matanyone_enabled"]
417
  return {k: getattr(self.config, k, None) for k in keys}
 
418
  def _safe_memory_usage(self) -> Dict[str, Any]:
419
  try:
420
  return self.memory_manager.get_memory_usage()
421
  except Exception:
422
  return {}
 
423
  def cancel_processing(self):
424
  self.cancel_event.set()
425
  logger.info("Cancellation requested")
 
426
  def cleanup_resources(self):
427
  try:
428
  self.memory_manager.cleanup_aggressive()
 
433
  except Exception:
434
  pass
435
  logger.info("Resources cleaned up")
 
 
436
  # ── Singleton + thin wrappers (used by UI callbacks) ────────────────────────
437
  processor = VideoProcessor()
 
438
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
439
  return processor.load_models(progress_callback)
 
440
  def process_video_fixed(
441
  video_path: str,
442
  background_choice: str,
 
447
  key_color_mode: str = "auto",
448
  preview_mask: bool = False,
449
  preview_greenscreen: bool = False,
450
+ ) -> Tuple[Optional[str], Optional[str], str]:
451
  return processor.process_video(
452
  video_path,
453
  background_choice,
 
459
  preview_mask,
460
  preview_greenscreen,
461
  )
 
462
  def get_model_status() -> Dict[str, Any]:
463
  return processor.get_status()
 
464
  def get_cache_status() -> Dict[str, Any]:
465
  return processor.get_status()
 
466
  PROCESS_CANCELLED = processor.cancel_event
 
 
467
  # ── CLI entrypoint (must exist; app.py imports main) ─────────────────────────
468
  def main():
469
  try:
470
  logger.info("Starting BackgroundFX Pro")
471
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
472
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
 
473
  # πŸ”Ή Quiet model self-check (defaults to async; set SELF_CHECK_MODE=sync to block)
474
  if schedule_startup_selfcheck is not None:
475
  try:
476
  schedule_startup_selfcheck(mode=os.getenv("SELF_CHECK_MODE", "async"))
477
  except Exception as e:
478
  logger.error(f"Startup self-check skipped: {e}", exc_info=True)
 
479
  # Log model loader type
480
  try:
481
  from models.loaders.model_loader import ModelLoader
482
  logger.info("Using split loader architecture")
483
  except Exception:
484
  logger.info("Using legacy loader")
 
485
  from ui.ui_components import create_interface
486
  demo = create_interface()
487
  demo.queue().launch(
 
492
  )
493
  finally:
494
  processor.cleanup_resources()
 
 
495
  if __name__ == "__main__":
496
+ main()