MogensR commited on
Commit
062de48
Β·
1 Parent(s): d4b7ec0

Update core/app.py

Browse files
Files changed (1) hide show
  1. core/app.py +132 -74
core/app.py CHANGED
@@ -1,10 +1,12 @@
1
  #!/usr/bin/env python3
2
  """
3
- BackgroundFX Pro - Main Application Entry Point
4
- Refactored modular architecture - orchestrates specialized components
5
  """
6
 
7
- # 0) Early env/threading hygiene (must be first)
 
 
8
  import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
9
 
10
  import logging
@@ -12,16 +14,21 @@
12
  from pathlib import Path
13
  from typing import Optional, Tuple, Dict, Any, Callable
14
 
 
15
  # 1) Logging
 
16
  logging.basicConfig(
17
  level=logging.INFO,
18
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
19
  )
20
  logger = logging.getLogger("core.app")
21
 
 
22
  # 2) Patch Gradio schema early (HF Spaces quirk)
 
23
  try:
24
  import gradio_client.utils as gc_utils
 
25
  _orig_get_type = gc_utils.get_type
26
 
27
  def _patched_get_type(schema):
@@ -40,7 +47,9 @@ def _patched_get_type(schema):
40
  except Exception as e:
41
  logger.warning(f"Gradio patch failed: {e}")
42
 
 
43
  # 3) Core config + components
 
44
  from config.app_config import get_config
45
  from core.exceptions import ModelLoadingError, VideoProcessingError
46
  from utils.hardware.device_manager import DeviceManager
@@ -52,35 +61,41 @@ def _patched_get_type(schema):
52
 
53
  # Optional two-stage processor
54
  try:
55
- from processing.two_stage.two_stage_processor import TwoStageProcessor, CHROMA_PRESETS
 
 
 
 
56
  TWO_STAGE_AVAILABLE = True
57
  except Exception:
58
  TWO_STAGE_AVAILABLE = False
59
  CHROMA_PRESETS = {"standard": {}}
60
 
61
- # Validation helper for inputs (lives with CV utils)
62
  from utils.cv_processing import validate_video_file
63
 
64
 
 
 
 
65
  class VideoProcessor:
66
  """
67
- Main video processing orchestrator - coordinates all specialized components.
68
  """
 
 
 
 
69
  def __init__(self):
70
- self.config = get_config() # singleton-style app config
71
  self.device_manager = DeviceManager()
72
-
73
- # Memory manager now requires a device object/string
74
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
75
-
76
- # Model loader takes device + memory managers
77
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
78
 
79
  self.audio_processor = AudioProcessor()
80
  self.core_processor: CoreVideoProcessor | None = None
81
  self.two_stage_processor: TwoStageProcessor | None = None
82
 
83
- # State
84
  self.models_loaded = False
85
  self.loading_lock = threading.Lock()
86
  self.cancel_event = threading.Event()
@@ -88,10 +103,13 @@ def __init__(self):
88
 
89
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
90
 
91
- # ---------- Progress ----------
 
 
92
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
93
  try:
94
  import cv2
 
95
  cap = cv2.VideoCapture(video_path)
96
  total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
97
  cap.release()
@@ -102,7 +120,9 @@ def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
102
  logger.warning(f"Progress init failed: {e}")
103
  self.progress_tracker = ProgressTracker(100, cb)
104
 
105
- # ---------- Load Models ----------
 
 
106
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
107
  with self.loading_lock:
108
  if self.models_loaded:
@@ -111,57 +131,44 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
111
  try:
112
  self.cancel_event.clear()
113
  if progress_callback:
114
- progress_callback(0.0, f"Loading on {self.device_manager.get_optimal_device()}")
115
-
116
- try:
117
- sam2_loaded, mat_loaded = self.model_loader.load_all_models(
118
- progress_callback=progress_callback,
119
- cancel_event=self.cancel_event
120
  )
121
- except IndexError as e:
122
- import traceback
123
- tb = traceback.extract_tb(e.__traceback__)
124
- where = f"{tb[-1].filename}:{tb[-1].lineno}" if tb else "unknown"
125
- logger.error(f"IndexError in load_all_models at {where}: {e}")
126
- raise ModelLoadingError(f"Model loading failed (IndexError @ {where}): {e}")
127
- except Exception as e:
128
- import traceback
129
- logger.error(f"Unexpected error in load_all_models: {e}\n{traceback.format_exc()}")
130
- raise
131
 
132
  if self.cancel_event.is_set():
133
  return "Model loading cancelled"
134
 
135
- # Unwrap actual model refs for two-stage
136
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
137
  mat_model = mat_loaded.model if mat_loaded else None
138
- if (sam2_predictor is None) and (mat_model is None):
139
- return "Model loading failed - see logs"
140
 
141
- # Core processor expects a "models" provider (we pass the loader itself)
142
  self.core_processor = CoreVideoProcessor(
143
- config=self.config,
144
- models=self.model_loader
145
  )
146
 
147
- # Optional 2-stage
148
  if TWO_STAGE_AVAILABLE and (sam2_predictor or mat_model):
149
  try:
150
  self.two_stage_processor = TwoStageProcessor(
151
- sam2_predictor=sam2_predictor,
152
- matanyone_model=mat_model
153
  )
154
- logger.info("Two-stage processor initialized")
155
  except Exception as e:
156
  logger.warning(f"Two-stage init failed: {e}")
157
  self.two_stage_processor = None
158
 
159
  self.models_loaded = True
160
  msg = self.model_loader.get_load_summary()
161
- if self.two_stage_processor:
162
- msg += "\nβœ… Two-stage processor ready"
163
- else:
164
- msg += "\n⚠️ Two-stage processor not available"
 
165
  logger.info(msg)
166
  return msg
167
 
@@ -176,7 +183,9 @@ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
176
  logger.error(err)
177
  return err
178
 
179
- # ---------- Process ----------
 
 
180
  def process_video(
181
  self,
182
  video_path: str,
@@ -185,13 +194,15 @@ def process_video(
185
  progress_callback: Optional[Callable] = None,
186
  use_two_stage: bool = False,
187
  chroma_preset: str = "standard",
 
188
  preview_mask: bool = False,
189
  preview_greenscreen: bool = False,
190
  ) -> Tuple[Optional[str], str]:
191
-
 
 
192
  if not self.models_loaded or not self.core_processor:
193
  return None, "Models not loaded. Please click β€œLoad Models” first."
194
-
195
  if self.cancel_event.is_set():
196
  return None, "Processing cancelled"
197
 
@@ -206,10 +217,24 @@ def process_video(
206
  if not TWO_STAGE_AVAILABLE:
207
  return None, "Two-stage processing not available on this build"
208
  if not self.two_stage_processor:
209
- return None, "Two-stage processor not initialized (models not ready?)"
210
- return self._process_two_stage(video_path, background_choice, custom_background_path, progress_callback, chroma_preset)
 
 
 
 
 
 
 
211
  else:
212
- return self._process_single_stage(video_path, background_choice, custom_background_path, progress_callback, preview_mask, preview_greenscreen)
 
 
 
 
 
 
 
213
 
214
  except VideoProcessingError as e:
215
  logger.error(f"Processing failed: {e}")
@@ -218,6 +243,9 @@ def process_video(
218
  logger.error(f"Unexpected processing error: {e}")
219
  return None, f"Unexpected error: {e}"
220
 
 
 
 
221
  def _process_single_stage(
222
  self,
223
  video_path: str,
@@ -227,8 +255,8 @@ def _process_single_stage(
227
  preview_mask: bool,
228
  preview_greenscreen: bool,
229
  ) -> Tuple[Optional[str], str]:
230
-
231
  import time
 
232
  ts = int(time.time())
233
  out_dir = Path(self.config.output_dir) / "single_stage"
234
  out_dir.mkdir(parents=True, exist_ok=True)
@@ -237,13 +265,18 @@ def _process_single_stage(
237
  result = self.core_processor.process_video(
238
  input_path=video_path,
239
  output_path=out_path,
240
- bg_config={"background_choice": background_choice, "custom_path": custom_background_path}
 
 
 
241
  )
242
  if not result:
243
  return None, "Video processing failed"
244
 
245
  if not (preview_mask or preview_greenscreen):
246
- final_path = self.audio_processor.add_audio_to_video(original_video=video_path, processed_video=out_path)
 
 
247
  else:
248
  final_path = out_path
249
 
@@ -256,6 +289,9 @@ def _process_single_stage(
256
  )
257
  return final_path, msg
258
 
 
 
 
259
  def _process_two_stage(
260
  self,
261
  video_path: str,
@@ -263,17 +299,21 @@ def _process_two_stage(
263
  custom_background_path: Optional[str],
264
  progress_callback: Optional[Callable],
265
  chroma_preset: str,
 
266
  ) -> Tuple[Optional[str], str]:
267
  if self.two_stage_processor is None:
268
  return None, "Two-stage processor not available"
269
 
270
  import cv2, time
 
271
  cap = cv2.VideoCapture(video_path)
272
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
273
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
274
  cap.release()
275
 
276
- background = self.core_processor.prepare_background(background_choice, custom_background_path, w, h)
 
 
277
  if background is None:
278
  return None, "Failed to prepare background"
279
 
@@ -282,11 +322,16 @@ def _process_two_stage(
282
  out_dir.mkdir(parents=True, exist_ok=True)
283
  final_out = str(out_dir / f"final_{ts}.mp4")
284
 
285
- chroma = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS["standard"])
286
- logger.info(f"Two-stage with preset: {chroma_preset}")
287
 
288
  result, message = self.two_stage_processor.process_full_pipeline(
289
- video_path, background, final_out, chroma_settings=chroma, progress_callback=progress_callback
 
 
 
 
 
290
  )
291
  if result is None:
292
  return None, message
@@ -299,11 +344,14 @@ def _process_two_stage(
299
  )
300
  return result, msg
301
 
302
- # ---------- Status / Control ----------
 
 
303
  def get_status(self) -> Dict[str, Any]:
304
  status = {
305
  "models_loaded": self.models_loaded,
306
- "two_stage_available": TWO_STAGE_AVAILABLE and (self.two_stage_processor is not None),
 
307
  "device": str(self.device_manager.get_optimal_device()),
308
  "memory_usage": self.memory_manager.get_memory_usage(),
309
  "config": self.config.to_dict(),
@@ -311,7 +359,9 @@ def get_status(self) -> Dict[str, Any]:
311
  }
312
  try:
313
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
314
- status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
 
 
315
  except Exception:
316
  status["sam2_loaded"] = False
317
  status["matanyone_loaded"] = False
@@ -326,15 +376,15 @@ def cancel_processing(self):
326
 
327
  def cleanup_resources(self):
328
  self.memory_manager.cleanup_aggressive()
329
- if self.model_loader:
330
- self.model_loader.cleanup()
331
  logger.info("Resources cleaned up")
332
 
333
 
334
- # Singleton for UI callbacks
 
 
335
  processor = VideoProcessor()
336
 
337
- # Back-compat wrappers used by ui/callbacks.py
338
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
339
  return processor.load_models(progress_callback)
340
 
@@ -345,43 +395,51 @@ def process_video_fixed(
345
  progress_callback: Optional[Callable] = None,
346
  use_two_stage: bool = False,
347
  chroma_preset: str = "standard",
 
348
  preview_mask: bool = False,
349
  preview_greenscreen: bool = False,
350
  ) -> Tuple[Optional[str], str]:
351
  return processor.process_video(
352
- video_path, background_choice, custom_background_path,
353
- progress_callback, use_two_stage, chroma_preset, preview_mask, preview_greenscreen
 
 
 
 
 
 
 
354
  )
355
 
356
  def get_model_status() -> Dict[str, Any]:
357
  return processor.get_status()
358
 
359
  def get_cache_status() -> Dict[str, Any]:
360
- # For now same as status; can add cache metrics later
361
  return processor.get_status()
362
 
363
  PROCESS_CANCELLED = processor.cancel_event
364
 
365
 
 
 
 
366
  def main():
367
  try:
368
  logger.info("Starting BackgroundFX Pro")
369
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
370
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
371
 
372
- # NOTE: UI was split into ui/components.py
373
  from ui.components import create_interface
374
- demo = create_interface()
375
 
 
376
  demo.queue().launch(
377
  server_name="0.0.0.0",
378
  server_port=7860,
379
  show_error=True,
380
- debug=False
381
  )
382
- except Exception as e:
383
- logger.error(f"Startup failed: {e}")
384
- raise
385
  finally:
386
  processor.cleanup_resources()
387
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ BackgroundFX Pro – Main Application Entry Point
4
+ Refactored modular architecture – orchestrates specialised components
5
  """
6
 
7
+ # ─────────────────────────────────────────────────────────────────────────────
8
+ # 0) Early env/threading hygiene (must run first)
9
+ # ─────────────────────────────────────────────────────────────────────────────
10
  import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
11
 
12
  import logging
 
14
  from pathlib import Path
15
  from typing import Optional, Tuple, Dict, Any, Callable
16
 
17
+ # ─────────────────────────────────────────────────────────────────────────────
18
  # 1) Logging
19
+ # ─────────────────────────────────────────────────────────────────────────────
20
  logging.basicConfig(
21
  level=logging.INFO,
22
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
23
  )
24
  logger = logging.getLogger("core.app")
25
 
26
+ # ─────────────────────────────────────────────────────────────────────────────
27
  # 2) Patch Gradio schema early (HF Spaces quirk)
28
+ # ─────────────────────────────────────────────────────────────────────────────
29
  try:
30
  import gradio_client.utils as gc_utils
31
+
32
  _orig_get_type = gc_utils.get_type
33
 
34
  def _patched_get_type(schema):
 
47
  except Exception as e:
48
  logger.warning(f"Gradio patch failed: {e}")
49
 
50
+ # ─────────────────────────────────────────────────────────────────────────────
51
  # 3) Core config + components
52
+ # ─────────────────────────────────────────────────────────────────────────────
53
  from config.app_config import get_config
54
  from core.exceptions import ModelLoadingError, VideoProcessingError
55
  from utils.hardware.device_manager import DeviceManager
 
61
 
62
  # Optional two-stage processor
63
  try:
64
+ from processing.two_stage.two_stage_processor import (
65
+ TwoStageProcessor,
66
+ CHROMA_PRESETS,
67
+ )
68
+
69
  TWO_STAGE_AVAILABLE = True
70
  except Exception:
71
  TWO_STAGE_AVAILABLE = False
72
  CHROMA_PRESETS = {"standard": {}}
73
 
74
+ # Validation helper
75
  from utils.cv_processing import validate_video_file
76
 
77
 
78
+ # ╔══════════════════════════════════════════════════════════════════════════╗
79
+ # β•‘ VideoProcessor class β•‘
80
+ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
81
  class VideoProcessor:
82
  """
83
+ Main orchestrator – coordinates all specialised components.
84
  """
85
+
86
+ # ─────────────────────────────────────────────────────────────────────
87
+ # Init
88
+ # ─────────────────────────────────────────────────────────────────────
89
  def __init__(self):
90
+ self.config = get_config()
91
  self.device_manager = DeviceManager()
 
 
92
  self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
 
 
93
  self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
94
 
95
  self.audio_processor = AudioProcessor()
96
  self.core_processor: CoreVideoProcessor | None = None
97
  self.two_stage_processor: TwoStageProcessor | None = None
98
 
 
99
  self.models_loaded = False
100
  self.loading_lock = threading.Lock()
101
  self.cancel_event = threading.Event()
 
103
 
104
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
105
 
106
+ # ─────────────────────────────────────────────────────────────────────
107
+ # Progress helper
108
+ # ─────────────────────────────────────────────────────────────────────
109
  def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
110
  try:
111
  import cv2
112
+
113
  cap = cv2.VideoCapture(video_path)
114
  total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
115
  cap.release()
 
120
  logger.warning(f"Progress init failed: {e}")
121
  self.progress_tracker = ProgressTracker(100, cb)
122
 
123
+ # ─────────────────────────────────────────────────────────────────────
124
+ # Model loading
125
+ # ─────────────────────────────────────────────────────────────────────
126
  def load_models(self, progress_callback: Optional[Callable] = None) -> str:
127
  with self.loading_lock:
128
  if self.models_loaded:
 
131
  try:
132
  self.cancel_event.clear()
133
  if progress_callback:
134
+ progress_callback(
135
+ 0.0, f"Loading on {self.device_manager.get_optimal_device()}"
 
 
 
 
136
  )
137
+
138
+ sam2_loaded, mat_loaded = self.model_loader.load_all_models(
139
+ progress_callback=progress_callback, cancel_event=self.cancel_event
140
+ )
 
 
 
 
 
 
141
 
142
  if self.cancel_event.is_set():
143
  return "Model loading cancelled"
144
 
145
+ # Unwrap actual predictor / model objects
146
  sam2_predictor = sam2_loaded.model if sam2_loaded else None
147
  mat_model = mat_loaded.model if mat_loaded else None
 
 
148
 
149
+ # Core single-stage processor
150
  self.core_processor = CoreVideoProcessor(
151
+ config=self.config, models=self.model_loader
 
152
  )
153
 
154
+ # Two-stage processor (optional)
155
  if TWO_STAGE_AVAILABLE and (sam2_predictor or mat_model):
156
  try:
157
  self.two_stage_processor = TwoStageProcessor(
158
+ sam2_predictor=sam2_predictor, matanyone_model=mat_model
 
159
  )
160
+ logger.info("Two-stage processor initialised")
161
  except Exception as e:
162
  logger.warning(f"Two-stage init failed: {e}")
163
  self.two_stage_processor = None
164
 
165
  self.models_loaded = True
166
  msg = self.model_loader.get_load_summary()
167
+ msg += (
168
+ "\nβœ… Two-stage processor ready"
169
+ if self.two_stage_processor
170
+ else "\n⚠️ Two-stage processor not available"
171
+ )
172
  logger.info(msg)
173
  return msg
174
 
 
183
  logger.error(err)
184
  return err
185
 
186
+ # ─────────────────────────────────────────────────────────────────────
187
+ # Public entry – process video
188
+ # ─────────────────────────────────────────────────────────────────────
189
  def process_video(
190
  self,
191
  video_path: str,
 
194
  progress_callback: Optional[Callable] = None,
195
  use_two_stage: bool = False,
196
  chroma_preset: str = "standard",
197
+ key_color_mode: str = "auto", # NEW
198
  preview_mask: bool = False,
199
  preview_greenscreen: bool = False,
200
  ) -> Tuple[Optional[str], str]:
201
+ """
202
+ Dispatch to single-stage or two-stage pipeline.
203
+ """
204
  if not self.models_loaded or not self.core_processor:
205
  return None, "Models not loaded. Please click β€œLoad Models” first."
 
206
  if self.cancel_event.is_set():
207
  return None, "Processing cancelled"
208
 
 
217
  if not TWO_STAGE_AVAILABLE:
218
  return None, "Two-stage processing not available on this build"
219
  if not self.two_stage_processor:
220
+ return None, "Two-stage processor not initialised"
221
+ return self._process_two_stage(
222
+ video_path,
223
+ background_choice,
224
+ custom_background_path,
225
+ progress_callback,
226
+ chroma_preset,
227
+ key_color_mode, # NEW
228
+ )
229
  else:
230
+ return self._process_single_stage(
231
+ video_path,
232
+ background_choice,
233
+ custom_background_path,
234
+ progress_callback,
235
+ preview_mask,
236
+ preview_greenscreen,
237
+ )
238
 
239
  except VideoProcessingError as e:
240
  logger.error(f"Processing failed: {e}")
 
243
  logger.error(f"Unexpected processing error: {e}")
244
  return None, f"Unexpected error: {e}"
245
 
246
+ # ─────────────────────────────────────────────────────────────────────
247
+ # Private – single-stage
248
+ # ─────────────────────────────────────────────────────────────────────
249
  def _process_single_stage(
250
  self,
251
  video_path: str,
 
255
  preview_mask: bool,
256
  preview_greenscreen: bool,
257
  ) -> Tuple[Optional[str], str]:
 
258
  import time
259
+
260
  ts = int(time.time())
261
  out_dir = Path(self.config.output_dir) / "single_stage"
262
  out_dir.mkdir(parents=True, exist_ok=True)
 
265
  result = self.core_processor.process_video(
266
  input_path=video_path,
267
  output_path=out_path,
268
+ bg_config={
269
+ "background_choice": background_choice,
270
+ "custom_path": custom_background_path,
271
+ },
272
  )
273
  if not result:
274
  return None, "Video processing failed"
275
 
276
  if not (preview_mask or preview_greenscreen):
277
+ final_path = self.audio_processor.add_audio_to_video(
278
+ original_video=video_path, processed_video=out_path
279
+ )
280
  else:
281
  final_path = out_path
282
 
 
289
  )
290
  return final_path, msg
291
 
292
+ # ─────────────────────────────────────────────────────────────────────
293
+ # Private – two-stage
294
+ # ─────────────────────────────────────────────────────────────────────
295
  def _process_two_stage(
296
  self,
297
  video_path: str,
 
299
  custom_background_path: Optional[str],
300
  progress_callback: Optional[Callable],
301
  chroma_preset: str,
302
+ key_color_mode: str, # NEW
303
  ) -> Tuple[Optional[str], str]:
304
  if self.two_stage_processor is None:
305
  return None, "Two-stage processor not available"
306
 
307
  import cv2, time
308
+
309
  cap = cv2.VideoCapture(video_path)
310
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
311
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
312
  cap.release()
313
 
314
+ background = self.core_processor.prepare_background(
315
+ background_choice, custom_background_path, w, h
316
+ )
317
  if background is None:
318
  return None, "Failed to prepare background"
319
 
 
322
  out_dir.mkdir(parents=True, exist_ok=True)
323
  final_out = str(out_dir / f"final_{ts}.mp4")
324
 
325
+ chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS["standard"])
326
+ logger.info(f"Two-stage with preset: {chroma_preset} and key_color_mode={key_color_mode}")
327
 
328
  result, message = self.two_stage_processor.process_full_pipeline(
329
+ video_path,
330
+ background,
331
+ final_out,
332
+ key_color_mode=key_color_mode, # NEW
333
+ chroma_settings=chroma_cfg,
334
+ progress_callback=progress_callback,
335
  )
336
  if result is None:
337
  return None, message
 
344
  )
345
  return result, msg
346
 
347
+ # ─────────────────────────────────────────────────────────────────────
348
+ # Status helpers
349
+ # ─────────────────────────────────────────────────────────────────────
350
  def get_status(self) -> Dict[str, Any]:
351
  status = {
352
  "models_loaded": self.models_loaded,
353
+ "two_stage_available": TWO_STAGE_AVAILABLE
354
+ and (self.two_stage_processor is not None),
355
  "device": str(self.device_manager.get_optimal_device()),
356
  "memory_usage": self.memory_manager.get_memory_usage(),
357
  "config": self.config.to_dict(),
 
359
  }
360
  try:
361
  status["sam2_loaded"] = self.model_loader.get_sam2() is not None
362
+ status["matanyone_loaded"] = (
363
+ self.model_loader.get_matanyone() is not None
364
+ )
365
  except Exception:
366
  status["sam2_loaded"] = False
367
  status["matanyone_loaded"] = False
 
376
 
377
  def cleanup_resources(self):
378
  self.memory_manager.cleanup_aggressive()
379
+ self.model_loader.cleanup()
 
380
  logger.info("Resources cleaned up")
381
 
382
 
383
+ # ╔══════════════════════════════════════════════════════════════════════════╗
384
+ # β•‘ Singleton instance + wrappers β•‘
385
+ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
386
  processor = VideoProcessor()
387
 
 
388
  def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
389
  return processor.load_models(progress_callback)
390
 
 
395
  progress_callback: Optional[Callable] = None,
396
  use_two_stage: bool = False,
397
  chroma_preset: str = "standard",
398
+ key_color_mode: str = "auto", # NEW
399
  preview_mask: bool = False,
400
  preview_greenscreen: bool = False,
401
  ) -> Tuple[Optional[str], str]:
402
  return processor.process_video(
403
+ video_path,
404
+ background_choice,
405
+ custom_background_path,
406
+ progress_callback,
407
+ use_two_stage,
408
+ chroma_preset,
409
+ key_color_mode, # NEW
410
+ preview_mask,
411
+ preview_greenscreen,
412
  )
413
 
414
  def get_model_status() -> Dict[str, Any]:
415
  return processor.get_status()
416
 
417
  def get_cache_status() -> Dict[str, Any]:
418
+ # Placeholder – could expose FS cache size, etc.
419
  return processor.get_status()
420
 
421
  PROCESS_CANCELLED = processor.cancel_event
422
 
423
 
424
+ # ╔══════════════════════════════════════════════════════════════════════════╗
425
+ # β•‘ CLI β•‘
426
+ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
427
  def main():
428
  try:
429
  logger.info("Starting BackgroundFX Pro")
430
  logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
431
  logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
432
 
433
+ # UI lives in ui/components.py
434
  from ui.components import create_interface
 
435
 
436
+ demo = create_interface()
437
  demo.queue().launch(
438
  server_name="0.0.0.0",
439
  server_port=7860,
440
  show_error=True,
441
+ debug=False,
442
  )
 
 
 
443
  finally:
444
  processor.cleanup_resources()
445