MogensR commited on
Commit
ed6612e
Β·
1 Parent(s): 06c1a6b

Update ui/ui_components.py

Browse files
Files changed (1) hide show
  1. ui/ui_components.py +582 -383
ui/ui_components.py CHANGED
@@ -1,398 +1,597 @@
1
  #!/usr/bin/env python3
2
  """
3
- UI Components for BackgroundFX Pro (forced Two-Stage)
4
- -----------------------------------------------------
5
- * Pure layout (tiny wrapper to set env for quality)
6
- * All heavy logic stays in ui/callbacks.py
7
- * Two-stage mode is always active (checkbox removed)
8
  """
9
-
10
  from __future__ import annotations
 
11
  import os
12
- import gradio as gr
 
 
 
 
 
13
  import logging
14
-
15
- from ui.callbacks import (
16
- cb_load_models,
17
- cb_process_video,
18
- cb_cancel,
19
- cb_status,
20
- cb_clear,
21
- cb_generate_bg,
22
- cb_use_gen_bg,
23
- cb_preset_bg_preview,
 
 
24
  )
25
-
26
- logger = logging.getLogger(__name__)
27
-
28
- # Typography & UI polish: sharper text + cleaner cards
29
- CSS = """
30
- :root {
31
- --radius: 16px;
32
- --font-sans: 'Inter', system-ui, -apple-system, 'Segoe UI', Roboto,
33
- 'Helvetica Neue', Arial, sans-serif;
34
- }
35
-
36
- /* Global crisp text */
37
- html, body, .gradio-container, .gradio-container * {
38
- font-family: var(--font-sans) !important;
39
- -webkit-font-smoothing: antialiased !important;
40
- -moz-osx-font-smoothing: grayscale !important;
41
- text-rendering: optimizeLegibility !important;
42
- font-synthesis-weight: none;
43
- }
44
-
45
- /* Headings tighter & bolder */
46
- .gradio-container h1, .gradio-container h2, .gradio-container h3 {
47
- letter-spacing: -0.01em;
48
- font-weight: 700;
49
- }
50
-
51
- /* Body copy slightly tighter */
52
- #hero .prose, .gr-markdown, .gr-text {
53
- letter-spacing: -0.003em;
54
- }
55
-
56
- /* Card look */
57
- .card {
58
- border-radius: var(--radius);
59
- border: 1px solid rgba(0,0,0,.08);
60
- padding: 16px;
61
- background: linear-gradient(180deg, rgba(255,255,255,.94), rgba(248,250,252,.94));
62
- box-shadow: 0 10px 30px rgba(0,0,0,.06);
63
- }
64
-
65
- .footer-note { opacity: 0.7; font-size: 12px; }
66
- .sm { font-size: 13px; opacity: 0.85; }
67
- #statusbox { min-height: 120px; }
68
- .preview-img { border-radius: var(--radius); border: 1px solid rgba(0,0,0,.08); }
69
-
70
- /* Buttons get a tiny weight bump for clarity */
71
- button, .gr-button { font-weight: 600; }
72
-
73
- /* Inline Quality select between buttons */
74
- .inline-quality .wrap-inner { min-width: 170px; }
75
- """
76
-
77
- # Keep in sync with utils/cv_processing.PROFESSIONAL_BACKGROUNDS
78
- _BG_CHOICES = [
79
- "minimalist",
80
- "office_modern",
81
- "studio_blue",
82
- "studio_green",
83
- "warm_gradient",
84
- "tech_dark",
85
  ]
86
- PRO_IMAGE_CHOICES = ["minimalist", "office_modern", "studio_blue", "studio_green"]
87
- GRADIENT_COLOR_CHOICES = ["warm_gradient", "tech_dark"]
88
-
89
-
90
- def get_background_status(bg_method, bg_style, custom_bg_path=None, pro_image=None, gradient=None):
91
- """Get current background status text for display"""
92
- if bg_method == "Upload file" and custom_bg_path:
93
- filename = os.path.basename(custom_bg_path) if custom_bg_path else "none"
94
- return f"Background: custom({filename})"
95
- elif bg_method == "Pre-loaded professional images" and pro_image:
96
- return f"Background: {pro_image}"
97
- elif bg_method == "Pre-loaded Gradients / Colors" and gradient:
98
- return f"Background: {gradient}"
99
- elif bg_method == "AI generated background":
100
- return f"Background: AI-generated"
101
- elif bg_style:
102
- return f"Background: {bg_style}"
103
- else:
104
- return "Background: none"
105
-
106
-
107
- def on_quality_change(quality_value):
108
- """Called when quality dropdown changes - sets environment variable"""
109
- os.environ["BFX_QUALITY"] = quality_value.lower()
110
- logger.info(f"UI: Quality preference set to {quality_value}")
111
- return f"Quality mode: {quality_value} (refine_every={'1' if quality_value=='max' else '3' if quality_value=='balanced' else '10'} frames)"
112
-
113
-
114
- def create_interface() -> gr.Blocks:
115
- with gr.Blocks(
116
- title="🎬 BackgroundFX Pro",
117
- css=CSS,
118
- analytics_enabled=False,
119
- theme=gr.themes.Soft()
120
- ) as demo:
121
-
122
- # ------------------------------------------------------------------
123
- # HERO
124
- # ------------------------------------------------------------------
125
- with gr.Row(elem_id="hero"):
126
- gr.Markdown(
127
- "## 🎬 BackgroundFX Pro (CSP-Safe)\n"
128
- "Replace your video background with cinema-quality AI matting. "
129
- "Built for Hugging Face Spaces CSP.\n\n"
130
- "_Tip: press **Load Models** once after the Space spins up._"
131
- )
132
-
133
- # ------------------------------------------------------------------
134
- # TAB – Quick Start
135
- # ------------------------------------------------------------------
136
- with gr.Tab("🏁 Quick Start"):
137
-
138
- with gr.Row():
139
- # ── Left column ────────────────────────────────────────────
140
- with gr.Column(scale=1):
141
- video = gr.Video(label="Upload Video", interactive=True)
142
-
143
- # Hidden: effective preset key (still used by callbacks / defaults)
144
- bg_style = gr.Dropdown(
145
- label="Background Style (hidden)",
146
- choices=_BG_CHOICES,
147
- value="minimalist",
148
- visible=False,
149
- )
150
-
151
- # =======================
152
- # Background Source block
153
- # =======================
154
- gr.Markdown("### πŸ–ΌοΈ Background Source")
155
-
156
- bg_method = gr.Radio(
157
- label="Choose method",
158
- choices=[
159
- "Upload file",
160
- "Pre-loaded professional images",
161
- "Pre-loaded Gradients / Colors",
162
- "AI generated background",
163
- ],
164
- value="Upload file",
165
- )
166
-
167
- # a) Upload file option
168
- with gr.Group(visible=True) as grp_upload:
169
- custom_bg = gr.Image(
170
- label="Upload Background Image",
171
- interactive=True,
172
- type="filepath", # returns file path
173
- elem_classes=["preview-img"]
174
- )
175
-
176
- # b) Pre-loaded professional images
177
- with gr.Group(visible=False) as grp_pro_images:
178
- pro_image_dd = gr.Dropdown(
179
- label="Professional Images",
180
- choices=PRO_IMAGE_CHOICES,
181
- value=PRO_IMAGE_CHOICES[0],
182
- info="Pre-defined photo-like backgrounds",
183
- )
184
- gr.Markdown(
185
- "<span class='sm'>Selecting a preset updates the preview below.</span>"
186
- )
187
-
188
- # c) Pre-loaded gradients & full colors
189
- with gr.Group(visible=False) as grp_gradients:
190
- gradient_dd = gr.Dropdown(
191
- label="Gradients & Full Colors",
192
- choices=GRADIENT_COLOR_CHOICES,
193
- value=GRADIENT_COLOR_CHOICES[0],
194
- info="Clean gradients or solid color styles",
195
- )
196
- gr.Markdown(
197
- "<span class='sm'>Selecting a preset updates the preview below.</span>"
198
- )
199
-
200
- # d) AI-generated background (inline, lightweight)
201
- with gr.Group(visible=False) as grp_ai:
202
- prompt = gr.Textbox(
203
- label="Describe vibe",
204
- value="modern office",
205
- info="e.g. 'soft sunset studio', 'cool tech dark', 'forest ambience'"
206
- )
207
- with gr.Row():
208
- gen_width = gr.Slider(640, 1920, 1280, step=10, label="Width")
209
- gen_height = gr.Slider(360, 1080, 720, step=10, label="Height")
210
- with gr.Row():
211
- bokeh = gr.Slider(0, 30, 8, step=1, label="Bokeh Blur")
212
- vignette = gr.Slider(0, 0.6, 0.15, step=0.01, label="Vignette")
213
- contrast = gr.Slider(0.8, 1.4, 1.05, step=0.01, label="Contrast")
214
- with gr.Row():
215
- btn_gen_bg_inline = gr.Button("✨ Generate Background", variant="primary")
216
- use_gen_as_custom_inline = gr.Button("πŸ“Œ Use as Custom Background", variant="secondary")
217
- gen_preview = gr.Image(
218
- label="Generated Background",
219
- interactive=False,
220
- elem_classes=["preview-img"]
221
- )
222
- gen_path = gr.Textbox(label="Saved Path", interactive=False)
223
-
224
- # ── Advanced options accordion ───────────────────────
225
- with gr.Accordion("Advanced", open=False):
226
- chroma_preset = gr.Dropdown(
227
- label="Chroma Preset",
228
- choices=["standard"], # can add 'studio', 'outdoor' later
229
- value="standard"
230
- )
231
- key_color_mode = gr.Dropdown(
232
- label="Key-Colour Mode",
233
- choices=["auto", "green", "blue", "cyan", "magenta"],
234
- value="auto",
235
- info="Auto picks a colour far from your clothes; override if needed."
236
- )
237
- preview_mask = gr.Checkbox(
238
- label="Preview Mask only (mute audio)",
239
- value=False
240
  )
241
- preview_greenscreen = gr.Checkbox(
242
- label="Preview Green-screen only (mute audio)",
243
- value=False
244
- )
245
-
246
- # ── Controls row: Load β†’ Quality β†’ Process / Cancel ──
247
- with gr.Row():
248
- btn_load = gr.Button("πŸ”„ Load Models", variant="secondary")
249
-
250
- quality = gr.Dropdown(
251
- label="Quality",
252
- choices=["speed", "balanced", "max"],
253
- value=os.getenv("BFX_QUALITY", "balanced"),
254
- info="Speed = fastest; Max = best edges & spill control.",
255
- elem_classes=["inline-quality"],
256
- )
257
-
258
- btn_run = gr.Button("🎬 Process Video", variant="primary")
259
- btn_cancel = gr.Button("⏹️ Cancel", variant="secondary")
260
-
261
- # ── Right column ──────────────────────────────────────────
262
- with gr.Column(scale=1):
263
- out_video = gr.Video(label="Processed Output", interactive=False)
264
- statusbox = gr.Textbox(label="Status", lines=8, elem_id="statusbox")
265
- quality_status = gr.Textbox(label="Quality Settings", lines=1, value="Quality mode: balanced")
266
- with gr.Row():
267
- btn_refresh = gr.Button("πŸ” Refresh Status", variant="secondary")
268
- btn_clear = gr.Button("🧹 Clear", variant="secondary")
269
-
270
- # ------------------------------------------------------------------
271
- # TAB – Status & settings
272
- # ------------------------------------------------------------------
273
- with gr.Tab("πŸ“ˆ Status & Settings"):
274
- with gr.Row():
275
- with gr.Column(scale=1, elem_classes=["card"]):
276
- model_status = gr.JSON(label="Model Status")
277
- with gr.Column(scale=1, elem_classes=["card"]):
278
- cache_status = gr.JSON(label="Cache / System Status")
279
-
280
- gr.Markdown(
281
- "<div class='footer-note'>If models fail to load, fallbacks keep the UI responsive. "
282
- "Check the runtime log for details.</div>"
283
- )
284
-
285
- # ------------------------------------------------------------------
286
- # Callback wiring
287
- # ------------------------------------------------------------------
288
-
289
- # Toggle which background sub-section is visible
290
- def _toggle_bg_sections(choice: str):
291
- return (
292
- gr.update(visible=(choice == "Upload file")),
293
- gr.update(visible=(choice == "Pre-loaded professional images")),
294
- gr.update(visible=(choice == "Pre-loaded Gradients / Colors")),
295
- gr.update(visible=(choice == "AI generated background")),
296
- )
297
-
298
- bg_method.change(
299
- _toggle_bg_sections,
300
- inputs=[bg_method],
301
- outputs=[grp_upload, grp_pro_images, grp_gradients, grp_ai],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  )
303
-
304
- # Load models
305
- btn_load.click(cb_load_models, outputs=statusbox)
306
-
307
- # Quality dropdown handler - updates environment and status
308
- quality.change(
309
- on_quality_change,
310
- inputs=[quality],
311
- outputs=[quality_status]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  )
313
-
314
- # Tiny wrapper to set env for quality before calling the existing callback
315
- def _run_with_quality(video_pth, bg_style_val, custom_bg_pth,
316
- use_two_stage_state, chroma_p, key_mode,
317
- prev_mask, prev_gs, quality_val, bg_method_val,
318
- pro_image_val, gradient_val):
319
- os.environ["BFX_QUALITY"] = (quality_val or "balanced")
320
-
321
- # Update status with background info
322
- bg_status = get_background_status(
323
- bg_method_val, bg_style_val, custom_bg_pth,
324
- pro_image_val, gradient_val
325
- )
326
-
327
- result = cb_process_video(
328
- video_pth, bg_style_val, custom_bg_pth,
329
- use_two_stage_state, chroma_p, key_mode, prev_mask, prev_gs
 
 
 
 
 
 
 
 
 
 
 
 
330
  )
331
-
332
- # Append background status to the result message
333
- if isinstance(result, tuple) and len(result) >= 2:
334
- video_out, msg = result[0], result[1]
335
- return video_out, f"{msg}\n{bg_status}"
336
- return result
337
-
338
- # Always two-stage: pass use_two_stage=True to callback via State
339
- btn_run.click(
340
- _run_with_quality,
341
- inputs=[
342
- video,
343
- bg_style,
344
- custom_bg,
345
- gr.State(value=True), # Always two-stage
346
- chroma_preset,
347
- key_color_mode,
348
- preview_mask,
349
- preview_greenscreen,
350
- quality, # <-- the new control
351
- bg_method, # Added for status
352
- pro_image_dd, # Added for status
353
- gradient_dd, # Added for status
354
- ],
355
- outputs=[out_video, statusbox],
356
  )
357
-
358
- # Cancel / Status / Clear
359
- btn_cancel.click(cb_cancel, outputs=statusbox)
360
- btn_refresh.click(cb_status, outputs=[model_status, cache_status])
361
-
362
- btn_clear.click(
363
- cb_clear,
364
- outputs=[out_video, statusbox, gen_preview, gen_path, custom_bg]
365
- )
366
-
367
- # Preloaded presets β†’ update preview (write into custom_bg)
368
- pro_image_dd.change(
369
- cb_preset_bg_preview,
370
- inputs=[pro_image_dd],
371
- outputs=[custom_bg],
372
- )
373
- gradient_dd.change(
374
- cb_preset_bg_preview,
375
- inputs=[gradient_dd],
376
- outputs=[custom_bg],
377
- )
378
-
379
- # AI background generation (inline)
380
- btn_gen_bg_inline.click(
381
- cb_generate_bg,
382
- inputs=[prompt, gen_width, gen_height, bokeh, vignette, contrast],
383
- outputs=[gen_preview, gen_path],
384
- )
385
- use_gen_as_custom_inline.click(
386
- cb_use_gen_bg,
387
- inputs=[gen_path],
388
- outputs=[custom_bg],
389
  )
390
-
391
- # Initialize with a default preset preview on load
392
- demo.load(
393
- cb_preset_bg_preview,
394
- inputs=[bg_style],
395
- outputs=[custom_bg]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  )
397
-
398
- return demo
 
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ BackgroundFX Pro – Main Application Entry Point
4
+ Refactored modular architecture – orchestrates specialised components
 
 
 
5
  """
 
6
  from __future__ import annotations
7
+ # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
8
  import os
9
+ os.environ["OMP_NUM_THREADS"] = "2" # Force valid value early
10
+ # If you use early_env in your project, keep this import (harmless if absent)
11
+ try:
12
+ import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
13
+ except Exception:
14
+ pass
15
  import logging
16
+ import threading
17
+ import traceback
18
+ import sys
19
+ from pathlib import Path
20
+ from typing import Optional, Tuple, Dict, Any, Callable
21
+ # Mitigate CUDA fragmentation (must be set before importing torch)
22
+ if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ:
23
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:128"
24
+ # ── Logging ──────────────────────────────────────────────────────────────────
25
+ logging.basicConfig(
26
+ level=logging.INFO,
27
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
28
  )
29
+ logger = logging.getLogger("core.app")
30
+ # ── Ensure project root importable ───────────────────────────────────────────
31
+ PROJECT_FILE = Path(__file__).resolve()
32
+ CORE_DIR = PROJECT_FILE.parent
33
+ ROOT = CORE_DIR.parent
34
+ if str(ROOT) not in sys.path:
35
+ sys.path.insert(0, str(ROOT))
36
+ # Create loader directories if they don't exist
37
+ loaders_dir = ROOT / "models" / "loaders"
38
+ loaders_dir.mkdir(parents=True, exist_ok=True)
39
+ # ── Gradio schema patch (HF quirk) ───────────────────────────────────────────
40
+ try:
41
+ import gradio_client.utils as gc_utils
42
+ _orig_get_type = gc_utils.get_type
43
+ def _patched_get_type(schema):
44
+ if not isinstance(schema, dict):
45
+ if isinstance(schema, bool): return "boolean"
46
+ if isinstance(schema, str): return "string"
47
+ if isinstance(schema, (int, float)): return "number"
48
+ return "string"
49
+ return _orig_get_type(schema)
50
+ gc_utils.get_type = _patched_get_type
51
+ logger.info("Gradio schema patch applied")
52
+ except Exception as e:
53
+ logger.warning(f"Gradio patch failed: {e}")
54
+ # ── Core config + components ─────────────────────────────────────────────────
55
+ try:
56
+ from config.app_config import get_config
57
+ except ImportError:
58
+ # Dummy if missing
59
+ class DummyConfig:
60
+ def to_dict(self):
61
+ return {}
62
+ get_config = lambda: DummyConfig()
63
+ from utils.hardware.device_manager import DeviceManager
64
+ from utils.system.memory_manager import MemoryManager
65
+ # Try to import the new split loaders first, fall back to old if needed
66
+ try:
67
+ from models.loaders.model_loader import ModelLoader
68
+ logger.info("Using split loader architecture")
69
+ except ImportError:
70
+ logger.warning("Split loaders not found, using legacy loader")
71
+ # Fall back to old loader if split architecture isn't available yet
72
+ from models.model_loader import ModelLoader # type: ignore
73
+ from processing.video.video_processor import CoreVideoProcessor
74
+ from processing.audio.audio_processor import AudioProcessor
75
+ from utils.monitoring.progress_tracker import ProgressTracker
76
+ from utils.cv_processing import validate_video_file
77
+ # ── Optional Two-Stage import ────────────────────────────────────────────────
78
+ TWO_STAGE_AVAILABLE = False
79
+ TWO_STAGE_IMPORT_ORIGIN = ""
80
+ TWO_STAGE_IMPORT_ERROR = ""
81
+ CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {"standard": {}}
82
+ TwoStageProcessor = None # type: ignore
83
+ # Try multiple import paths for two-stage processor
84
+ two_stage_paths = [
85
+ "processors.two_stage", # Your fixed version
86
+ "processing.two_stage.two_stage_processor",
87
+ "processing.two_stage",
 
88
  ]
89
+ for import_path in two_stage_paths:
90
+ try:
91
+ exec(f"from {import_path} import TwoStageProcessor, CHROMA_PRESETS")
92
+ TWO_STAGE_AVAILABLE = True
93
+ TWO_STAGE_IMPORT_ORIGIN = import_path
94
+ logger.info(f"Two-stage import OK ({import_path})")
95
+ break
96
+ except Exception as e:
97
+ TWO_STAGE_IMPORT_ERROR = str(e)
98
+ continue
99
+ if not TWO_STAGE_AVAILABLE:
100
+ logger.warning(f"Two-stage import FAILED from all paths: {TWO_STAGE_IMPORT_ERROR}")
101
+ # ── Quiet startup self-check (async by default) ──────────────────────────────
102
+ # Place the helper in tools/startup_selfcheck.py (with tools/__init__.py present)
103
+ try:
104
+ from tools.startup_selfcheck import schedule_startup_selfcheck
105
+ except Exception:
106
+ schedule_startup_selfcheck = None # graceful if the helper isn't shipped
107
+ # Dummy exceptions if core.exceptions not available
108
+ class ModelLoadingError(Exception):
109
+ pass
110
+ class VideoProcessingError(Exception):
111
+ pass
112
+ # ╔══════════════════════════════════════════════════════════════════════════╗
113
+ # β•‘ VideoProcessor class β•‘
114
+ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
115
+ class VideoProcessor:
116
+ """
117
+ Main orchestrator – coordinates all specialised components.
118
+ """
119
+ def __init__(self):
120
+ self.config = get_config()
121
+ self._patch_config_defaults(self.config) # avoid AttributeError on older configs
122
+ self.device_manager = DeviceManager()
123
+ self.memory_manager = MemoryManager(self.device_manager.get_optimal_device())
124
+ self.model_loader = ModelLoader(self.device_manager, self.memory_manager)
125
+ self.audio_processor = AudioProcessor()
126
+ self.core_processor: Optional[CoreVideoProcessor] = None
127
+ self.two_stage_processor: Optional[Any] = None
128
+ self.models_loaded = False
129
+ self.loading_lock = threading.Lock()
130
+ self.cancel_event = threading.Event()
131
+ self.progress_tracker: Optional[ProgressTracker] = None
132
+ logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
133
+ # ── Config hardening: add missing fields safely ───────────────────────────
134
+ @staticmethod
135
+ def _patch_config_defaults(cfg: Any) -> None:
136
+ defaults = {
137
+ # video / i/o
138
+ "use_nvenc": False,
139
+ "prefer_mp4": True,
140
+ "video_codec": "mp4v",
141
+ "audio_copy": True,
142
+ "ffmpeg_path": "ffmpeg",
143
+ # model/resource guards
144
+ "max_model_size": 0,
145
+ "max_model_size_bytes": 0,
146
+ # housekeeping
147
+ "output_dir": str((Path(__file__).resolve().parent.parent) / "outputs"),
148
+ # MatAnyone settings to ensure it's enabled
149
+ "matanyone_enabled": True,
150
+ "use_matanyone": True,
151
+ }
152
+ for k, v in defaults.items():
153
+ if not hasattr(cfg, k):
154
+ setattr(cfg, k, v)
155
+ Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
156
+ # ── Progress helper ───────────────────────────────────────────────────────
157
+ def _init_progress(self, video_path: str, cb: Optional[Callable] = None):
158
+ try:
159
+ import cv2
160
+ cap = cv2.VideoCapture(video_path)
161
+ total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
162
+ cap.release()
163
+ if total <= 0:
164
+ total = 100
165
+ self.progress_tracker = ProgressTracker(total, cb)
166
+ except Exception as e:
167
+ logger.warning(f"Progress init failed: {e}")
168
+ self.progress_tracker = ProgressTracker(100, cb)
169
+ # ── Model loading ─────────────────────────────────────────────────────────
170
+ def load_models(self, progress_callback: Optional[Callable] = None) -> str:
171
+ with self.loading_lock:
172
+ if self.models_loaded:
173
+ return "Models already loaded and validated"
174
+ try:
175
+ self.cancel_event.clear()
176
+ if progress_callback:
177
+ progress_callback(0.0, f"Loading on {self.device_manager.get_optimal_device()}")
178
+ sam2_loaded, mat_loaded = self.model_loader.load_all_models(
179
+ progress_callback=progress_callback, cancel_event=self.cancel_event
180
+ )
181
+ if self.cancel_event.is_set():
182
+ return "Model loading cancelled"
183
+ # Get the actual models
184
+ sam2_predictor = sam2_loaded.model if sam2_loaded else None
185
+ mat_model = mat_loaded.model if mat_loaded else None # NOTE: in our MatAnyone loader this is a stateful adapter (callable)
186
+ # Initialize core processor
187
+ self.core_processor = CoreVideoProcessor(config=self.config, models=self.model_loader)
188
+ # Initialize two-stage processor if available
189
+ self.two_stage_processor = None
190
+ if TWO_STAGE_AVAILABLE and TwoStageProcessor and (sam2_predictor or mat_model):
191
+ try:
192
+ self.two_stage_processor = TwoStageProcessor(
193
+ sam2_predictor=sam2_predictor, matanyone_model=mat_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  )
195
+ logger.info("Two-stage processor initialised")
196
+ except Exception as e:
197
+ logger.warning(f"Two-stage init failed: {e}")
198
+ self.two_stage_processor = None
199
+ self.models_loaded = True
200
+ msg = self.model_loader.get_load_summary()
201
+
202
+ # Add status about processors
203
+ if self.two_stage_processor:
204
+ msg += "\nβœ… Two-stage processor ready"
205
+ else:
206
+ msg += "\n⚠️ Two-stage processor not available"
207
+
208
+ if mat_model:
209
+ msg += "\nβœ… MatAnyone refinement active"
210
+ else:
211
+ msg += "\n⚠️ MatAnyone not loaded (edges may be rough)"
212
+
213
+ logger.info(msg)
214
+ return msg
215
+ except (AttributeError, ModelLoadingError) as e:
216
+ self.models_loaded = False
217
+ err = f"Model loading failed: {e}"
218
+ logger.error(err)
219
+ return err
220
+ except Exception as e:
221
+ self.models_loaded = False
222
+ err = f"Unexpected error during model loading: {e}"
223
+ logger.error(f"{err}\n{traceback.format_exc()}")
224
+ return err
225
+ # ── Public entry – process video ─────────────────────────────────────────
226
+ def process_video(
227
+ self,
228
+ video_path: str,
229
+ background_choice: str,
230
+ custom_background_path: Optional[str] = None,
231
+ progress_callback: Optional[Callable] = None,
232
+ use_two_stage: bool = False,
233
+ chroma_preset: str = "standard",
234
+ key_color_mode: str = "auto",
235
+ preview_mask: bool = False,
236
+ preview_greenscreen: bool = False,
237
+ ) -> Tuple[Optional[str], Optional[str], str]:
238
+
239
+ # ===== BACKGROUND PATH DEBUG & FIX =====
240
+ logger.info("=" * 60)
241
+ logger.info("BACKGROUND PATH DEBUGGING")
242
+ logger.info(f"background_choice: {background_choice}")
243
+ logger.info(f"custom_background_path type: {type(custom_background_path)}")
244
+ logger.info(f"custom_background_path value: {custom_background_path}")
245
+
246
+ # Fix 1: Handle if Gradio sends a dict
247
+ if isinstance(custom_background_path, dict):
248
+ original = custom_background_path
249
+ custom_background_path = custom_background_path.get('name') or custom_background_path.get('path')
250
+ logger.info(f"Extracted path from dict: {original} -> {custom_background_path}")
251
+
252
+ # Fix 2: Handle PIL Image objects
253
+ try:
254
+ from PIL import Image
255
+ if isinstance(custom_background_path, Image.Image):
256
+ import tempfile
257
+ with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as tmp:
258
+ custom_background_path.save(tmp.name)
259
+ custom_background_path = tmp.name
260
+ logger.info(f"Saved PIL Image to: {custom_background_path}")
261
+ except ImportError:
262
+ pass
263
+
264
+ # Fix 3: Verify file exists when using custom background
265
+ if background_choice == "custom" or custom_background_path:
266
+ if custom_background_path:
267
+ if Path(custom_background_path).exists():
268
+ logger.info(f"βœ… Background file exists: {custom_background_path}")
269
+ else:
270
+ logger.warning(f"⚠️ Background file does not exist: {custom_background_path}")
271
+ # Try to find it in Gradio temp directories
272
+ import glob
273
+ patterns = [
274
+ "/tmp/gradio*/**/*.jpg",
275
+ "/tmp/gradio*/**/*.jpeg",
276
+ "/tmp/gradio*/**/*.png",
277
+ "/tmp/**/*.jpg",
278
+ "/tmp/**/*.jpeg",
279
+ "/tmp/**/*.png",
280
+ ]
281
+ for pattern in patterns:
282
+ files = glob.glob(pattern, recursive=True)
283
+ if files:
284
+ # Get the most recent file
285
+ newest = max(files, key=os.path.getmtime)
286
+ logger.info(f"Found potential background: {newest}")
287
+ # Only use it if it was created in the last 5 minutes
288
+ if (time.time() - os.path.getmtime(newest)) < 300:
289
+ custom_background_path = newest
290
+ logger.info(f"βœ… Using recent temp file: {custom_background_path}")
291
+ break
292
+ else:
293
+ logger.error("❌ Custom background mode but path is None!")
294
+
295
+ logger.info(f"Final custom_background_path: {custom_background_path}")
296
+ logger.info("=" * 60)
297
+
298
+ if not self.models_loaded or not self.core_processor:
299
+ return None, None, "Models not loaded. Please click 'Load Models' first."
300
+ if self.cancel_event.is_set():
301
+ return None, None, "Processing cancelled"
302
+ self._init_progress(video_path, progress_callback)
303
+ ok, why = validate_video_file(video_path)
304
+ if not ok:
305
+ return None, None, f"Invalid video: {why}"
306
+ try:
307
+ # Log which mode we're using
308
+ mode = "two-stage" if use_two_stage else "single-stage"
309
+ matanyone_status = "enabled" if self.model_loader.get_matanyone() else "disabled"
310
+ logger.info(f"Processing video in {mode} mode, MatAnyone: {matanyone_status}")
311
+ # IMPORTANT: start each video with a clean MatAnyone memory
312
+ self._reset_matanyone_session()
313
+ if use_two_stage:
314
+ if not TWO_STAGE_AVAILABLE or self.two_stage_processor is None:
315
+ return None, None, "Two-stage processing not available"
316
+ final, green, msg = self._process_two_stage(
317
+ video_path,
318
+ background_choice,
319
+ custom_background_path,
320
+ progress_callback,
321
+ chroma_preset,
322
+ key_color_mode,
323
+ )
324
+ return final, green, msg
325
+ else:
326
+ final, green, msg = self._process_single_stage(
327
+ video_path,
328
+ background_choice,
329
+ custom_background_path,
330
+ progress_callback,
331
+ preview_mask,
332
+ preview_greenscreen,
333
+ )
334
+ return final, green, msg
335
+ except VideoProcessingError as e:
336
+ logger.error(f"Processing failed: {e}")
337
+ return None, None, f"Processing failed: {e}"
338
+ except Exception as e:
339
+ logger.error(f"Unexpected processing error: {e}\n{traceback.format_exc()}")
340
+ return None, None, f"Unexpected error: {e}"
341
+ # ── Private – per-video MatAnyone reset ──────────────────────────────────
342
+ def _reset_matanyone_session(self):
343
+ """
344
+ Ensure a fresh MatAnyone memory per video. The MatAnyone loader we use returns a
345
+ callable *stateful adapter*. If present, reset() clears its InferenceCore memory.
346
+ """
347
+ try:
348
+ mat = self.model_loader.get_matanyone()
349
+ except Exception:
350
+ mat = None
351
+ if mat is not None and hasattr(mat, "reset") and callable(mat.reset):
352
+ try:
353
+ mat.reset()
354
+ logger.info("MatAnyone session reset for new video")
355
+ except Exception as e:
356
+ logger.warning(f"MatAnyone session reset failed (continuing): {e}")
357
+ # ── Private – single-stage ───────────────────────────────────────────────
358
+ def _process_single_stage(
359
+ self,
360
+ video_path: str,
361
+ background_choice: str,
362
+ custom_background_path: Optional[str],
363
+ progress_callback: Optional[Callable],
364
+ preview_mask: bool,
365
+ preview_greenscreen: bool,
366
+ ) -> Tuple[Optional[str], Optional[str], str]:
367
+ import time
368
+
369
+ # Additional debug logging for single-stage
370
+ logger.info(f"[Single-stage] background_choice: {background_choice}")
371
+ logger.info(f"[Single-stage] custom_background_path: {custom_background_path}")
372
+
373
+ ts = int(time.time())
374
+ out_dir = Path(self.config.output_dir) / "single_stage"
375
+ out_dir.mkdir(parents=True, exist_ok=True)
376
+ out_path = str(out_dir / f"processed_{ts}.mp4")
377
+ # Process video via your CoreVideoProcessor
378
+ result = self.core_processor.process_video(
379
+ input_path=video_path,
380
+ output_path=out_path,
381
+ bg_config={
382
+ "background_choice": background_choice,
383
+ "custom_path": custom_background_path,
384
+ },
385
+ progress_callback=progress_callback,
386
  )
387
+
388
+ if not result:
389
+ return None, None, "Video processing failed"
390
+ # Mux audio unless preview-only
391
+ if not (preview_mask or preview_greenscreen):
392
+ try:
393
+ final_path = self.audio_processor.add_audio_to_video(
394
+ original_video=video_path, processed_video=out_path
395
+ )
396
+ except Exception as e:
397
+ logger.warning(f"Audio mux failed, returning video without audio: {e}")
398
+ final_path = out_path
399
+ else:
400
+ final_path = out_path
401
+ # Build status message
402
+ try:
403
+ mat_loaded = bool(self.model_loader.get_matanyone())
404
+ except Exception:
405
+ mat_loaded = False
406
+ matanyone_status = "βœ“" if mat_loaded else "βœ—"
407
+ msg = (
408
+ "Processing completed.\n"
409
+ f"Frames: {result.get('frames', 'unknown')}\n"
410
+ f"Background: {background_choice}\n"
411
+ f"Mode: Single-stage\n"
412
+ f"MatAnyone: {matanyone_status}\n"
413
+ f"Device: {self.device_manager.get_optimal_device()}"
414
  )
415
+ return final_path, None, msg # No green in single-stage
416
+ # ── Private – two-stage ─────────────────────────────────────────────────
417
+ def _process_two_stage(
418
+ self,
419
+ video_path: str,
420
+ background_choice: str,
421
+ custom_background_path: Optional[str],
422
+ progress_callback: Optional[Callable],
423
+ chroma_preset: str,
424
+ key_color_mode: str,
425
+ ) -> Tuple[Optional[str], Optional[str], str]:
426
+ if self.two_stage_processor is None:
427
+ return None, None, "Two-stage processor not available"
428
+
429
+ # Additional debug logging for two-stage
430
+ logger.info(f"[Two-stage] background_choice: {background_choice}")
431
+ logger.info(f"[Two-stage] custom_background_path: {custom_background_path}")
432
+
433
+ import cv2, time
434
+ cap = cv2.VideoCapture(video_path)
435
+ if not cap.isOpened():
436
+ return None, None, "Could not open input video"
437
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) or 1280
438
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) or 720
439
+ cap.release()
440
+ # Prepare background
441
+ try:
442
+ background = self.core_processor.prepare_background(
443
+ background_choice, custom_background_path, w, h
444
  )
445
+ except Exception as e:
446
+ logger.error(f"Background preparation failed: {e}")
447
+ return None, None, f"Failed to prepare background: {e}"
448
+ if background is None:
449
+ return None, None, "Failed to prepare background"
450
+ ts = int(time.time())
451
+ out_dir = Path(self.config.output_dir) / "two_stage"
452
+ out_dir.mkdir(parents=True, exist_ok=True)
453
+ final_out = str(out_dir / f"final_{ts}.mp4")
454
+ chroma_cfg = CHROMA_PRESETS.get(chroma_preset, CHROMA_PRESETS.get("standard", {}))
455
+ logger.info(f"Two-stage with preset: {chroma_preset} | key_color: {key_color_mode}")
456
+ # (Per-video reset already called in process_video)
457
+ final_path, green_path, stage2_msg = self.two_stage_processor.process_full_pipeline(
458
+ video_path,
459
+ background,
460
+ final_out,
461
+ key_color_mode=key_color_mode,
462
+ chroma_settings=chroma_cfg,
463
+ progress_callback=progress_callback,
 
 
 
 
 
 
464
  )
465
+ if final_path is None:
466
+ return None, None, stage2_msg
467
+ # Mux audio
468
+ try:
469
+ final_with_audio = self.audio_processor.add_audio_to_video(
470
+ original_video=video_path, processed_video=final_path
471
+ )
472
+ except Exception as e:
473
+ logger.warning(f"Audio mux failed: {e}")
474
+ final_with_audio = final_path
475
+ try:
476
+ mat_loaded = bool(self.model_loader.get_matanyone())
477
+ except Exception:
478
+ mat_loaded = False
479
+ matanyone_status = "βœ“" if mat_loaded else "βœ—"
480
+ msg = (
481
+ "Two-stage processing completed.\n"
482
+ f"Background: {background_choice}\n"
483
+ f"Chroma Preset: {chroma_preset}\n"
484
+ f"MatAnyone: {matanyone_status}\n"
485
+ f"Device: {self.device_manager.get_optimal_device()}"
 
 
 
 
 
 
 
 
 
 
 
486
  )
487
+ return final_with_audio, green_path, msg
488
+ # ── Status helpers ───────────────────────────────────────────────────────
489
+ def get_status(self) -> Dict[str, Any]:
490
+ status = {
491
+ "models_loaded": self.models_loaded,
492
+ "two_stage_available": bool(TWO_STAGE_AVAILABLE and self.two_stage_processor),
493
+ "two_stage_origin": TWO_STAGE_IMPORT_ORIGIN or "",
494
+ "device": str(self.device_manager.get_optimal_device()),
495
+ "core_processor_loaded": self.core_processor is not None,
496
+ "config": self._safe_config_dict(),
497
+ "memory_usage": self._safe_memory_usage(),
498
+ }
499
+
500
+ try:
501
+ status["sam2_loaded"] = self.model_loader.get_sam2() is not None
502
+ status["matanyone_loaded"] = self.model_loader.get_matanyone() is not None
503
+ status["model_info"] = self.model_loader.get_model_info()
504
+ except Exception:
505
+ status["sam2_loaded"] = False
506
+ status["matanyone_loaded"] = False
507
+ if self.progress_tracker:
508
+ status["progress"] = self.progress_tracker.get_all_progress()
509
+
510
+ return status
511
+ def _safe_config_dict(self) -> Dict[str, Any]:
512
+ try:
513
+ return self.config.to_dict()
514
+ except Exception:
515
+ keys = ["use_nvenc", "prefer_mp4", "video_codec", "audio_copy",
516
+ "ffmpeg_path", "max_model_size", "max_model_size_bytes",
517
+ "output_dir", "matanyone_enabled"]
518
+ return {k: getattr(self.config, k, None) for k in keys}
519
+ def _safe_memory_usage(self) -> Dict[str, Any]:
520
+ try:
521
+ return self.memory_manager.get_memory_usage()
522
+ except Exception:
523
+ return {}
524
+ def cancel_processing(self):
525
+ self.cancel_event.set()
526
+ logger.info("Cancellation requested")
527
+ def cleanup_resources(self):
528
+ try:
529
+ self.memory_manager.cleanup_aggressive()
530
+ except Exception:
531
+ pass
532
+ try:
533
+ self.model_loader.cleanup()
534
+ except Exception:
535
+ pass
536
+ logger.info("Resources cleaned up")
537
+ # ── Singleton + thin wrappers (used by UI callbacks) ────────────────────────
538
+ processor = VideoProcessor()
539
+ def load_models_with_validation(progress_callback: Optional[Callable] = None) -> str:
540
+ return processor.load_models(progress_callback)
541
+ def process_video_fixed(
542
+ video_path: str,
543
+ background_choice: str,
544
+ custom_background_path: Optional[str],
545
+ progress_callback: Optional[Callable] = None,
546
+ use_two_stage: bool = False,
547
+ chroma_preset: str = "standard",
548
+ key_color_mode: str = "auto",
549
+ preview_mask: bool = False,
550
+ preview_greenscreen: bool = False,
551
+ ) -> Tuple[Optional[str], Optional[str], str]:
552
+ return processor.process_video(
553
+ video_path,
554
+ background_choice,
555
+ custom_background_path,
556
+ progress_callback,
557
+ use_two_stage,
558
+ chroma_preset,
559
+ key_color_mode,
560
+ preview_mask,
561
+ preview_greenscreen,
562
+ )
563
+ def get_model_status() -> Dict[str, Any]:
564
+ return processor.get_status()
565
+ def get_cache_status() -> Dict[str, Any]:
566
+ return processor.get_status()
567
+ PROCESS_CANCELLED = processor.cancel_event
568
+ # ── CLI entrypoint (must exist; app.py imports main) ─────────────────────────
569
+ def main():
570
+ try:
571
+ logger.info("Starting BackgroundFX Pro")
572
+ logger.info(f"Device: {processor.device_manager.get_optimal_device()}")
573
+ logger.info(f"Two-stage available: {TWO_STAGE_AVAILABLE}")
574
+ # πŸ”Ή Quiet model self-check (defaults to async; set SELF_CHECK_MODE=sync to block)
575
+ if schedule_startup_selfcheck is not None:
576
+ try:
577
+ schedule_startup_selfcheck(mode=os.getenv("SELF_CHECK_MODE", "async"))
578
+ except Exception as e:
579
+ logger.error(f"Startup self-check skipped: {e}", exc_info=True)
580
+ # Log model loader type
581
+ try:
582
+ from models.loaders.model_loader import ModelLoader
583
+ logger.info("Using split loader architecture")
584
+ except Exception:
585
+ logger.info("Using legacy loader")
586
+ from ui.ui_components import create_interface
587
+ demo = create_interface()
588
+ demo.queue().launch(
589
+ server_name="0.0.0.0",
590
+ server_port=7860,
591
+ show_error=True,
592
+ debug=False,
593
  )
594
+ finally:
595
+ processor.cleanup_resources()
596
+ if __name__ == "__main__":
597
+ main()