MogensR commited on
Commit
39171fb
·
1 Parent(s): 7c2f655

Update processing/two_stage/two_stage_processor.py

Browse files
processing/two_stage/two_stage_processor.py CHANGED
@@ -1,346 +1,677 @@
1
  #!/usr/bin/env python3
2
  """
3
- UI Components for BackgroundFX Pro (forced Two-Stage)
4
- -----------------------------------------------------
5
- * Pure layout (tiny wrapper to set env for quality)
6
- * All heavy logic stays in ui/callbacks.py
7
- * Two-stage mode is always active (checkbox removed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
-
10
  from __future__ import annotations
11
- import os
12
- import gradio as gr
13
-
14
- from ui.callbacks import (
15
- cb_load_models,
16
- cb_process_video,
17
- cb_cancel,
18
- cb_status,
19
- cb_clear,
20
- cb_generate_bg,
21
- cb_use_gen_bg,
22
- cb_preset_bg_preview,
23
- )
24
-
25
- # Typography & UI polish: sharper text + cleaner cards
26
- CSS = """
27
- :root {
28
- --radius: 16px;
29
- --font-sans: 'Inter', system-ui, -apple-system, 'Segoe UI', Roboto,
30
- 'Helvetica Neue', Arial, sans-serif;
31
- }
32
-
33
- /* Global crisp text */
34
- html, body, .gradio-container, .gradio-container * {
35
- font-family: var(--font-sans) !important;
36
- -webkit-font-smoothing: antialiased !important;
37
- -moz-osx-font-smoothing: grayscale !important;
38
- text-rendering: optimizeLegibility !important;
39
- font-synthesis-weight: none;
40
- }
41
 
42
- /* Headings tighter & bolder */
43
- .gradio-container h1, .gradio-container h2, .gradio-container h3 {
44
- letter-spacing: -0.01em;
45
- font-weight: 700;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  }
47
 
48
- /* Body copy slightly tighter */
49
- #hero .prose, .gr-markdown, .gr-text {
50
- letter-spacing: -0.003em;
 
 
 
 
51
  }
52
 
53
- /* Card look */
54
- .card {
55
- border-radius: var(--radius);
56
- border: 1px solid rgba(0,0,0,.08);
57
- padding: 16px;
58
- background: linear-gradient(180deg, rgba(255,255,255,.94), rgba(248,250,252,.94));
59
- box-shadow: 0 10px 30px rgba(0,0,0,.06);
60
- }
61
-
62
- .footer-note { opacity: 0.7; font-size: 12px; }
63
- .sm { font-size: 13px; opacity: 0.85; }
64
- #statusbox { min-height: 120px; }
65
- .preview-img { border-radius: var(--radius); border: 1px solid rgba(0,0,0,.08); }
66
-
67
- /* Buttons get a tiny weight bump for clarity */
68
- button, .gr-button { font-weight: 600; }
69
-
70
- /* Inline Quality select between buttons */
71
- .inline-quality .wrap-inner { min-width: 170px; }
72
- """
73
-
74
- # Keep in sync with utils/cv_processing.PROFESSIONAL_BACKGROUNDS
75
- _BG_CHOICES = [
76
- "minimalist",
77
- "office_modern",
78
- "studio_blue",
79
- "studio_green",
80
- "warm_gradient",
81
- "tech_dark",
82
- ]
83
- PRO_IMAGE_CHOICES = ["minimalist", "office_modern", "studio_blue", "studio_green"]
84
- GRADIENT_COLOR_CHOICES = ["warm_gradient", "tech_dark"]
85
-
86
-
87
- def create_interface() -> gr.Blocks:
88
- with gr.Blocks(
89
- title="🎬 BackgroundFX Pro",
90
- css=CSS,
91
- analytics_enabled=False,
92
- theme=gr.themes.Soft()
93
- ) as demo:
94
-
95
- # ------------------------------------------------------------------
96
- # HERO
97
- # ------------------------------------------------------------------
98
- with gr.Row(elem_id="hero"):
99
- gr.Markdown(
100
- "## 🎬 BackgroundFX Pro (CSP-Safe)\n"
101
- "Replace your video background with cinema-quality AI matting. "
102
- "Built for Hugging Face Spaces CSP.\n\n"
103
- "_Tip: press **Load Models** once after the Space spins up._"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  )
105
 
106
- # ------------------------------------------------------------------
107
- # TAB Quick Start
108
- # ------------------------------------------------------------------
109
- with gr.Tab("🏁 Quick Start"):
110
-
111
- with gr.Row():
112
- # ── Left column ────────────────────────────────────────────
113
- with gr.Column(scale=1):
114
- video = gr.Video(label="Upload Video", interactive=True)
115
-
116
- # Hidden: effective preset key (still used by callbacks / defaults)
117
- bg_style = gr.Dropdown(
118
- label="Background Style (hidden)",
119
- choices=_BG_CHOICES,
120
- value="minimalist",
121
- visible=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  )
123
-
124
- # =======================
125
- # Background Source block
126
- # =======================
127
- gr.Markdown("### 🖼️ Background Source")
128
-
129
- bg_method = gr.Radio(
130
- label="Choose method",
131
- choices=[
132
- "Upload file",
133
- "Pre-loaded professional images",
134
- "Pre-loaded Gradients / Colors",
135
- "AI generated background",
136
- ],
137
- value="Upload file",
138
  )
139
 
140
- # a) Upload file option
141
- with gr.Group(visible=True) as grp_upload:
142
- custom_bg = gr.Image(
143
- label="Upload Background Image",
144
- interactive=True,
145
- type="filepath", # returns file path
146
- elem_classes=["preview-img"]
147
- )
148
-
149
- # b) Pre-loaded professional images
150
- with gr.Group(visible=False) as grp_pro_images:
151
- pro_image_dd = gr.Dropdown(
152
- label="Professional Images",
153
- choices=PRO_IMAGE_CHOICES,
154
- value=PRO_IMAGE_CHOICES[0],
155
- info="Pre-defined photo-like backgrounds",
156
- )
157
- gr.Markdown(
158
- "<span class='sm'>Selecting a preset updates the preview below.</span>"
159
- )
160
-
161
- # c) Pre-loaded gradients & full colors
162
- with gr.Group(visible=False) as grp_gradients:
163
- gradient_dd = gr.Dropdown(
164
- label="Gradients & Full Colors",
165
- choices=GRADIENT_COLOR_CHOICES,
166
- value=GRADIENT_COLOR_CHOICES[0],
167
- info="Clean gradients or solid color styles",
168
- )
169
- gr.Markdown(
170
- "<span class='sm'>Selecting a preset updates the preview below.</span>"
171
- )
172
-
173
- # d) AI-generated background (inline, lightweight)
174
- with gr.Group(visible=False) as grp_ai:
175
- prompt = gr.Textbox(
176
- label="Describe vibe",
177
- value="modern office",
178
- info="e.g. 'soft sunset studio', 'cool tech dark', 'forest ambience'"
179
- )
180
- with gr.Row():
181
- gen_width = gr.Slider(640, 1920, 1280, step=10, label="Width")
182
- gen_height = gr.Slider(360, 1080, 720, step=10, label="Height")
183
- with gr.Row():
184
- bokeh = gr.Slider(0, 30, 8, step=1, label="Bokeh Blur")
185
- vignette = gr.Slider(0, 0.6, 0.15, step=0.01, label="Vignette")
186
- contrast = gr.Slider(0.8, 1.4, 1.05, step=0.01, label="Contrast")
187
- with gr.Row():
188
- btn_gen_bg_inline = gr.Button("✨ Generate Background", variant="primary")
189
- use_gen_as_custom_inline = gr.Button("📌 Use as Custom Background", variant="secondary")
190
- gen_preview = gr.Image(
191
- label="Generated Background",
192
- interactive=False,
193
- elem_classes=["preview-img"]
194
- )
195
- gen_path = gr.Textbox(label="Saved Path", interactive=False)
196
-
197
- # ── Advanced options accordion ───────────────────────
198
- with gr.Accordion("Advanced", open=False):
199
- chroma_preset = gr.Dropdown(
200
- label="Chroma Preset",
201
- choices=["standard"], # can add 'studio', 'outdoor' later
202
- value="standard"
203
- )
204
- key_color_mode = gr.Dropdown(
205
- label="Key-Colour Mode",
206
- choices=["auto", "green", "blue", "cyan", "magenta"],
207
- value="auto",
208
- info="Auto picks a colour far from your clothes; override if needed."
209
- )
210
- preview_mask = gr.Checkbox(
211
- label="Preview Mask only (mute audio)",
212
- value=False
213
- )
214
- preview_greenscreen = gr.Checkbox(
215
- label="Preview Green-screen only (mute audio)",
216
- value=False
217
- )
218
-
219
- # ── Controls row: Load → Quality → Process / Cancel ──
220
- with gr.Row():
221
- btn_load = gr.Button("🔄 Load Models", variant="secondary")
222
-
223
- quality = gr.Dropdown(
224
- label="Quality",
225
- choices=["speed", "balanced", "max"],
226
- value=os.getenv("BFX_QUALITY", "balanced"),
227
- info="Speed = fastest; Max = best edges & spill control.",
228
- elem_classes=["inline-quality"],
229
- )
230
-
231
- btn_run = gr.Button("🎬 Process Video", variant="primary")
232
- btn_cancel = gr.Button("⏹️ Cancel", variant="secondary")
233
-
234
- # ── Right column ──────────────────────────────────────────
235
- with gr.Column(scale=1):
236
- out_video = gr.Video(label="Processed Output", interactive=False)
237
- statusbox = gr.Textbox(label="Status", lines=8, elem_id="statusbox")
238
- with gr.Row():
239
- btn_refresh = gr.Button("🔍 Refresh Status", variant="secondary")
240
- btn_clear = gr.Button("🧹 Clear", variant="secondary")
241
-
242
- # ------------------------------------------------------------------
243
- # TAB – Status & settings
244
- # ------------------------------------------------------------------
245
- with gr.Tab("📈 Status & Settings"):
246
- with gr.Row():
247
- with gr.Column(scale=1, elem_classes=["card"]):
248
- model_status = gr.JSON(label="Model Status")
249
- with gr.Column(scale=1, elem_classes=["card"]):
250
- cache_status = gr.JSON(label="Cache / System Status")
251
-
252
- gr.Markdown(
253
- "<div class='footer-note'>If models fail to load, fallbacks keep the UI responsive. "
254
- "Check the runtime log for details.</div>"
255
- )
256
-
257
- # ------------------------------------------------------------------
258
- # Callback wiring
259
- # ------------------------------------------------------------------
260
-
261
- # Toggle which background sub-section is visible
262
- def _toggle_bg_sections(choice: str):
263
- return (
264
- gr.update(visible=(choice == "Upload file")),
265
- gr.update(visible=(choice == "Pre-loaded professional images")),
266
- gr.update(visible=(choice == "Pre-loaded Gradients / Colors")),
267
- gr.update(visible=(choice == "AI generated background")),
268
- )
269
-
270
- bg_method.change(
271
- _toggle_bg_sections,
272
- inputs=[bg_method],
273
- outputs=[grp_upload, grp_pro_images, grp_gradients, grp_ai],
274
  )
275
-
276
- # Load models
277
- btn_load.click(cb_load_models, outputs=statusbox)
278
-
279
- # Tiny wrapper to set env for quality before calling the existing callback
280
- def _run_with_quality(video_pth, bg_style_val, custom_bg_pth,
281
- use_two_stage_state, chroma_p, key_mode,
282
- prev_mask, prev_gs, quality_val):
283
- os.environ["BFX_QUALITY"] = (quality_val or "balanced")
284
- return cb_process_video(
285
- video_pth, bg_style_val, custom_bg_pth,
286
- use_two_stage_state, chroma_p, key_mode, prev_mask, prev_gs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  )
288
 
289
- # Always two-stage: pass use_two_stage=True to callback via State
290
- btn_run.click(
291
- _run_with_quality,
292
- inputs=[
293
- video,
294
- bg_style,
295
- custom_bg,
296
- gr.State(value=True), # Always two-stage
297
- chroma_preset,
298
- key_color_mode,
299
- preview_mask,
300
- preview_greenscreen,
301
- quality, # <-- the new control
302
- ],
303
- outputs=[out_video, statusbox],
304
- )
305
-
306
- # Cancel / Status / Clear
307
- btn_cancel.click(cb_cancel, outputs=statusbox)
308
- btn_refresh.click(cb_status, outputs=[model_status, cache_status])
309
-
310
- btn_clear.click(
311
- cb_clear,
312
- outputs=[out_video, statusbox, gen_preview, gen_path, custom_bg]
313
- )
314
-
315
- # Preloaded presets → update preview (write into custom_bg)
316
- pro_image_dd.change(
317
- cb_preset_bg_preview,
318
- inputs=[pro_image_dd],
319
- outputs=[custom_bg],
320
- )
321
- gradient_dd.change(
322
- cb_preset_bg_preview,
323
- inputs=[gradient_dd],
324
- outputs=[custom_bg],
325
- )
326
 
327
- # AI background generation (inline)
328
- btn_gen_bg_inline.click(
329
- cb_generate_bg,
330
- inputs=[prompt, gen_width, gen_height, bokeh, vignette, contrast],
331
- outputs=[gen_preview, gen_path],
332
- )
333
- use_gen_as_custom_inline.click(
334
- cb_use_gen_bg,
335
- inputs=[gen_path],
336
- outputs=[custom_bg],
337
- )
338
-
339
- # Initialize with a default preset preview on load
340
- demo.load(
341
- cb_preset_bg_preview,
342
- inputs=[bg_style],
343
- outputs=[custom_bg]
344
- )
345
 
346
- return demo
 
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ Two-Stage Green-Screen Processing System ✅ 2025-08-29
4
+ Stage 1: Original → keyed background (auto-selected colour)
5
+ Stage 2: Keyed video final composite (hybrid chroma + segmentation rescue)
6
+
7
+ Aligned with current project layout:
8
+ * uses helpers from utils.cv_processing (segment_person_hq, refine_mask_hq)
9
+ * safe local create_video_writer (no core.app dependency)
10
+ * cancel support via stop_event
11
+ * progress_callback(pct, desc)
12
+ * fully self-contained – just drop in and import TwoStageProcessor
13
+
14
+ Additional safety:
15
+ * Ensures MatAnyone receives a valid first-frame mask (bootstraps the session
16
+ with the first SAM2 mask). This prevents "First frame arrived without a mask"
17
+ warnings and shape mismatches inside the stateful refiner.
18
+
19
+ Quality profiles (set via env BFX_QUALITY = speed | balanced | max):
20
+ * refine cadence, spill suppression, edge softness
21
+ * hybrid matte mix (segmentation vs chroma), small dilate/blur on mask
22
+ * optional tiny background blur to hide seams on very flat backgrounds
23
  """
 
24
  from __future__ import annotations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ import cv2, numpy as np, os, gc, pickle, logging, tempfile, traceback, threading
27
+ from pathlib import Path
28
+ from typing import Optional, Dict, Any, Callable, Tuple, List
29
+
30
+ from utils.cv_processing import segment_person_hq, refine_mask_hq
31
+
32
+ # Project logger if available
33
+ try:
34
+ from utils.logger import get_logger
35
+ logger = get_logger(__name__)
36
+ except Exception:
37
+ logger = logging.getLogger(__name__)
38
+
39
+ # ---------------------------------------------------------------------------
40
+ # Local video-writer helper
41
+ # ---------------------------------------------------------------------------
42
+ def create_video_writer(output_path: str, fps: float, width: int, height: int, prefer_mp4: bool = True):
43
+ try:
44
+ ext = ".mp4" if prefer_mp4 else ".avi"
45
+ if not output_path:
46
+ output_path = tempfile.mktemp(suffix=ext)
47
+ else:
48
+ base, curr_ext = os.path.splitext(output_path)
49
+ if curr_ext.lower() not in [".mp4", ".avi", ".mov", ".mkv"]:
50
+ output_path = base + ext
51
+
52
+ fourcc = cv2.VideoWriter_fourcc(*("mp4v" if prefer_mp4 else "XVID"))
53
+ writer = cv2.VideoWriter(output_path, fourcc, float(fps), (int(width), int(height)))
54
+ if writer is None or not writer.isOpened():
55
+ alt_ext = ".avi" if prefer_mp4 else ".mp4"
56
+ alt_fourcc = cv2.VideoWriter_fourcc(*("XVID" if prefer_mp4 else "mp4v"))
57
+ alt_path = os.path.splitext(output_path)[0] + alt_ext
58
+ writer = cv2.VideoWriter(alt_path, alt_fourcc, float(fps), (int(width), int(height)))
59
+ if writer is None or not writer.isOpened():
60
+ return None, output_path
61
+ return writer, alt_path
62
+ return writer, output_path
63
+ except Exception as e:
64
+ logger.error(f"create_video_writer failed: {e}")
65
+ return None, output_path
66
+
67
+ # ---------------------------------------------------------------------------
68
+ # Key-colour helpers (fast, no external deps)
69
+ # ---------------------------------------------------------------------------
70
+ def _bgr_to_hsv_hue_deg(bgr: np.ndarray) -> np.ndarray:
71
+ hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
72
+ # OpenCV H is 0-180; scale to degrees 0-360
73
+ return hsv[..., 0].astype(np.float32) * 2.0
74
+
75
+
76
+ def _hue_distance(a_deg: float, b_deg: float) -> float:
77
+ """Circular distance on the hue wheel (degrees)."""
78
+ d = abs(a_deg - b_deg) % 360.0
79
+ return min(d, 360.0 - d)
80
+
81
+
82
+ def _key_candidates_bgr() -> dict:
83
+ return {
84
+ "green": {"bgr": np.array([ 0,255, 0], dtype=np.uint8), "hue": 120.0},
85
+ "blue": {"bgr": np.array([255, 0, 0], dtype=np.uint8), "hue": 240.0},
86
+ "cyan": {"bgr": np.array([255,255, 0], dtype=np.uint8), "hue": 180.0},
87
+ "magenta": {"bgr": np.array([255, 0,255], dtype=np.uint8), "hue": 300.0},
88
+ }
89
+
90
+
91
+ def _choose_best_key_color(frame_bgr: np.ndarray, mask_uint8: np.ndarray) -> dict:
92
+ """Pick the candidate colour farthest from the actor's dominant hues."""
93
+ try:
94
+ fg = frame_bgr[mask_uint8 > 127]
95
+ if fg.size < 1_000:
96
+ return _key_candidates_bgr()["green"]
97
+
98
+ fg_hue = _bgr_to_hsv_hue_deg(fg.reshape(-1, 1, 3)).reshape(-1)
99
+ hist, edges = np.histogram(fg_hue, bins=36, range=(0.0, 360.0))
100
+ top_idx = np.argsort(hist)[-3:]
101
+ top_hues = [(edges[i] + edges[i+1]) * 0.5 for i in top_idx]
102
+
103
+ best_name, best_score = None, -1.0
104
+ for name, info in _key_candidates_bgr().items():
105
+ cand_hue = info["hue"]
106
+ score = min(abs((cand_hue - th + 180) % 360 - 180) for th in top_hues)
107
+ if score > best_score:
108
+ best_name, best_score = name, score
109
+ return _key_candidates_bgr().get(best_name, _key_candidates_bgr()["green"])
110
+ except Exception:
111
+ return _key_candidates_bgr()["green"]
112
+
113
+
114
+ # ---------------------------------------------------------------------------
115
+ # Chroma presets
116
+ # ---------------------------------------------------------------------------
117
+ CHROMA_PRESETS: Dict[str, Dict[str, Any]] = {
118
+ 'standard': {'key_color': [0,255,0], 'tolerance': 38, 'edge_softness': 2, 'spill_suppression': 0.35},
119
+ 'studio': {'key_color': [0,255,0], 'tolerance': 30, 'edge_softness': 1, 'spill_suppression': 0.45},
120
+ 'outdoor': {'key_color': [0,255,0], 'tolerance': 50, 'edge_softness': 3, 'spill_suppression': 0.25},
121
  }
122
 
123
+ # ---------------------------------------------------------------------------
124
+ # Quality profiles (env: BFX_QUALITY = speed | balanced | max)
125
+ # ---------------------------------------------------------------------------
126
+ QUALITY_PROFILES: Dict[str, Dict[str, Any]] = {
127
+ "speed": dict(refine_stride=4, spill=0.30, edge_softness=2, mix=0.60, dilate=0, blur=0, bg_sigma=0.0),
128
+ "balanced": dict(refine_stride=2, spill=0.40, edge_softness=2, mix=0.75, dilate=1, blur=1, bg_sigma=0.6),
129
+ "max": dict(refine_stride=1, spill=0.45, edge_softness=3, mix=0.85, dilate=2, blur=2, bg_sigma=1.0),
130
  }
131
 
132
+ # ---------------------------------------------------------------------------
133
+ # Two-Stage Processor
134
+ # ---------------------------------------------------------------------------
135
+ class TwoStageProcessor:
136
+ def __init__(self, sam2_predictor=None, matanyone_model=None):
137
+ self.sam2 = self._unwrap_sam2(sam2_predictor)
138
+ self.matanyone = matanyone_model
139
+ self.mask_cache_dir = Path("/tmp/mask_cache")
140
+ self.mask_cache_dir.mkdir(parents=True, exist_ok=True)
141
+
142
+ # Internal flags/state
143
+ self._mat_bootstrapped = False
144
+ self._alpha_prev: Optional[np.ndarray] = None # temporal smoothing
145
+
146
+ # Quality selection at construction
147
+ qname = os.getenv("BFX_QUALITY", "balanced").strip().lower()
148
+ if qname not in QUALITY_PROFILES:
149
+ qname = "balanced"
150
+ self.quality = qname
151
+ self.q = QUALITY_PROFILES[qname]
152
+ logger.info(f"TwoStageProcessor quality='{self.quality}' ⇒ {self.q}")
153
+
154
+ logger.info(f"TwoStageProcessor init – SAM2: {self.sam2 is not None} | MatAnyOne: {self.matanyone is not None}")
155
+
156
+ # --------------------------- internal utils ---------------------------
157
+
158
+ def _unwrap_sam2(self, predictor):
159
+ """Unwrap the SAM2 predictor if needed."""
160
+ if predictor is None:
161
+ return None
162
+ if hasattr(predictor, 'sam_predictor'):
163
+ return predictor.sam_predictor
164
+ return predictor
165
+
166
+ def _refresh_quality_from_env(self):
167
+ """Pick up UI changes to BFX_QUALITY without rebuilding models."""
168
+ qname = os.getenv("BFX_QUALITY", self.quality).strip().lower()
169
+ if qname not in QUALITY_PROFILES:
170
+ qname = "balanced"
171
+ if qname != getattr(self, "quality", None) or not hasattr(self, "q"):
172
+ self.quality = qname
173
+ self.q = QUALITY_PROFILES[qname]
174
+ logger.info(f"Quality switched to '{self.quality}' ⇒ {self.q}")
175
+
176
+ def _get_mask(self, frame: np.ndarray) -> np.ndarray:
177
+ """Get segmentation mask using SAM2 (delegates to project helper)."""
178
+ if self.sam2 is None:
179
+ # Fallback: simple luminance threshold (kept to avoid breaking callers)
180
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
181
+ _, mask = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
182
+ return mask
183
+
184
+ try:
185
+ mask = segment_person_hq(frame, self.sam2)
186
+ # segment_person_hq returns either uint8(0..255) or float(0..1) in most builds
187
+ return mask
188
+ except Exception as e:
189
+ logger.warning(f"SAM2 segmentation failed: {e}")
190
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
191
+ _, mask = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
192
+ return mask
193
+
194
+ @staticmethod
195
+ def _to_binary_mask(mask: np.ndarray) -> Optional[np.ndarray]:
196
+ """Convert mask to uint8(0..255)."""
197
+ if mask is None:
198
+ return None
199
+ if mask.dtype == bool:
200
+ return mask.astype(np.uint8) * 255
201
+ if np.issubdtype(mask.dtype, np.floating):
202
+ m = np.clip(mask, 0.0, 1.0)
203
+ return (m * 255.0 + 0.5).astype(np.uint8)
204
+ return mask
205
+
206
+ @staticmethod
207
+ def _to_float01(mask: np.ndarray, h: int = None, w: int = None) -> Optional[np.ndarray]:
208
+ """Float [0,1] mask, optionally resized to (h,w)."""
209
+ if mask is None:
210
+ return None
211
+ m = mask.astype(np.float32)
212
+ if m.max() > 1.0:
213
+ m = m / 255.0
214
+ if h is not None and w is not None and (m.shape[0] != h or m.shape[1] != w):
215
+ m = cv2.resize(m, (w, h), interpolation=cv2.INTER_LINEAR)
216
+ return np.clip(m, 0.0, 1.0)
217
+
218
+ def _apply_greenscreen_hard(self, frame: np.ndarray, mask: np.ndarray, bg: np.ndarray) -> np.ndarray:
219
+ """Apply hard greenscreen compositing."""
220
+ mask_3ch = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) if mask.ndim == 2 else mask
221
+ mask_norm = mask_3ch.astype(np.float32) / 255.0
222
+ result = frame * mask_norm + bg * (1 - mask_norm)
223
+ return result.astype(np.uint8)
224
+
225
+ # -------- improved spill suppression (preserves luminance & skin) --------
226
+ def _suppress_green_spill(self, frame: np.ndarray, amount: float = 0.35) -> np.ndarray:
227
+ """
228
+ Desaturate green dominance while preserving luminance and red skin hues.
229
+ amount: 0..1
230
+ """
231
+ b, g, r = cv2.split(frame.astype(np.float32))
232
+ green_dom = (g > r) & (g > b)
233
+ avg_rb = (r + b) * 0.5
234
+ g2 = np.where(green_dom, g*(1.0-amount) + avg_rb*amount, g)
235
+ skin = (r > g + 12) # protect skin tones
236
+ g2 = np.where(skin, g, g2)
237
+ out = cv2.merge([np.clip(b,0,255), np.clip(g2,0,255), np.clip(r,0,255)]).astype(np.uint8)
238
+ return out
239
+
240
+ # -------- edge-aware alpha refinement (guided-like) --------
241
+ def _refine_alpha_edges(self, frame_bgr: np.ndarray, alpha_u8: np.ndarray, radius: int = 3, iters: int = 1) -> np.ndarray:
242
+ """
243
+ Fast, dependency-free, guided-like refinement on the alpha border.
244
+ Returns: uint8 alpha
245
+ """
246
+ a = alpha_u8.astype(np.uint8)
247
+ if radius <= 0:
248
+ return a
249
+
250
+ band = cv2.Canny(a, 32, 64)
251
+ if band.max() == 0:
252
+ return a
253
+
254
+ for _ in range(max(1, iters)):
255
+ a_blur = cv2.GaussianBlur(a, (radius*2+1, radius*2+1), 0)
256
+ b,g,r = cv2.split(frame_bgr.astype(np.float32))
257
+ green_dom = (g > r) & (g > b)
258
+ spill_mask = (green_dom & (a > 96) & (a < 224)).astype(np.uint8)*255
259
+ u = cv2.bitwise_or(band, spill_mask)
260
+ a = np.where(u>0, a_blur, a).astype(np.uint8)
261
+
262
+ return a
263
+
264
+ # -------- soft key based on chosen color (robust to blue/cyan/magenta) --------
265
+ def _soft_key_mask(self, frame_bgr: np.ndarray, key_bgr: np.ndarray, tol: int = 40) -> np.ndarray:
266
+ """
267
+ Soft chroma mask (uint8 0..255, 255=keep subject) using CbCr distance.
268
+ """
269
+ if key_bgr is None:
270
+ return np.full(frame_bgr.shape[:2], 255, np.uint8)
271
+
272
+ ycbcr = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2YCrCb).astype(np.float32)
273
+ kycbcr = cv2.cvtColor(key_bgr.reshape(1,1,3).astype(np.uint8), cv2.COLOR_BGR2YCrCb).astype(np.float32)[0,0]
274
+ d = np.linalg.norm((ycbcr[...,1:] - kycbcr[1:]), axis=-1)
275
+ d = cv2.GaussianBlur(d, (5,5), 0)
276
+ alpha = 255.0 * np.clip((d - tol) / (tol*1.7), 0.0, 1.0) # far from key = keep (255)
277
+ return alpha.astype(np.uint8)
278
+
279
+ # --------------------- NEW: MatAnyone bootstrap ----------------------
280
+ def _bootstrap_matanyone_if_needed(self, frame_bgr: np.ndarray, coarse_mask: np.ndarray):
281
+ """
282
+ Call the MatAnyone session ONCE with the first coarse mask to initialize
283
+ its memory. This guarantees downstream calls never hit "first frame without a mask".
284
+ """
285
+ if self.matanyone is None or self._mat_bootstrapped:
286
+ return
287
+ try:
288
+ h, w = frame_bgr.shape[:2]
289
+ mask_f = self._to_float01(coarse_mask, h, w)
290
+ rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
291
+ _ = self.matanyone(rgb, mask_f) # boot only; ignore returned alpha
292
+ self._mat_bootstrapped = True
293
+ logger.info("MatAnyone session bootstrapped with first-frame mask.")
294
+ except Exception as e:
295
+ logger.warning(f"MatAnyone bootstrap failed (continuing without): {e}")
296
+
297
+ # ---------------------------------------------------------------------
298
+ # Stage 1 – Original → keyed (green/blue/…) -- chooses colour on 1st frame
299
+ # ---------------------------------------------------------------------
300
+ def stage1_extract_to_greenscreen(
301
+ self,
302
+ video_path: str,
303
+ output_path: str,
304
+ *,
305
+ key_color_mode: str = "auto", # "auto" | "green" | "blue" | "cyan" | "magenta"
306
+ progress_callback: Optional[Callable[[float, str], None]] = None,
307
+ stop_event: Optional["threading.Event"] = None,
308
+ ) -> Tuple[Optional[dict], str]:
309
+
310
+ def _prog(p, d):
311
+ if progress_callback:
312
+ try:
313
+ progress_callback(float(p), str(d))
314
+ except Exception:
315
+ pass
316
+
317
+ try:
318
+ # pick up any new quality selection
319
+ self._refresh_quality_from_env()
320
+
321
+ _prog(0.0, "Stage 1: opening video…")
322
+ cap = cv2.VideoCapture(video_path)
323
+ if not cap.isOpened():
324
+ return None, "Could not open input video"
325
+
326
+ fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
327
+ total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 0
328
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
329
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
330
+
331
+ writer, out_path = create_video_writer(output_path, fps, w, h)
332
+ if writer is None:
333
+ cap.release()
334
+ return None, "Could not create output writer"
335
+
336
+ key_info: dict | None = None
337
+ chosen_bgr = np.array([0, 255, 0], np.uint8) # default
338
+ probe_done = False
339
+ masks: List[np.ndarray] = []
340
+ frame_idx = 0
341
+
342
+ solid_bg = np.zeros((h, w, 3), np.uint8) # overwritten per-frame
343
+
344
+ while True:
345
+ if stop_event and stop_event.is_set():
346
+ _prog(1.0, "Stage 1: cancelled")
347
+ break
348
+
349
+ ok, frame = cap.read()
350
+ if not ok:
351
+ break
352
+
353
+ # --- SAM2 segmentation ---
354
+ mask = self._get_mask(frame)
355
+
356
+ # --- MatAnyone bootstrap exactly once (first frame) ---
357
+ if frame_idx == 0 and self.matanyone is not None:
358
+ try:
359
+ self._bootstrap_matanyone_if_needed(frame, mask)
360
+ except Exception as e:
361
+ logger.warning(f"Bootstrap error (non-fatal): {e}")
362
+
363
+ # --- Decide key colour once ---
364
+ if not probe_done:
365
+ if key_color_mode.lower() == "auto":
366
+ key_info = _choose_best_key_color(frame, self._to_binary_mask(mask))
367
+ chosen_bgr = key_info["bgr"]
368
+ else:
369
+ cand = _key_candidates_bgr().get(key_color_mode.lower())
370
+ if cand is not None:
371
+ chosen_bgr = cand["bgr"]
372
+ probe_done = True
373
+ logger.info(f"[TwoStage] Using key colour: {key_color_mode} → {chosen_bgr.tolist()}")
374
+
375
+ # --- Optional refinement via MatAnyone (profile cadence) ---
376
+ stride = int(self.q.get("refine_stride", 3))
377
+ if self.matanyone and (frame_idx % max(1, stride) == 0):
378
+ try:
379
+ mask = refine_mask_hq(frame, mask, self.matanyone, fallback_enabled=True)
380
+ except Exception as e:
381
+ logger.warning(f"MatAnyOne refine fail f={frame_idx}: {e}")
382
+
383
+ # --- Composite onto solid key colour ---
384
+ solid_bg[:] = chosen_bgr
385
+ mask_u8 = self._to_binary_mask(mask)
386
+ gs = self._apply_greenscreen_hard(frame, mask_u8, solid_bg)
387
+ writer.write(gs)
388
+ masks.append(mask_u8)
389
+
390
+ frame_idx += 1
391
+ pct = 0.05 + 0.9 * (frame_idx / total) if total else min(0.95, 0.05 + frame_idx * 0.002)
392
+ _prog(pct, f"Stage 1: {frame_idx}/{total or '?'}")
393
+
394
+ cap.release()
395
+ writer.release()
396
+
397
+ # save mask cache
398
+ try:
399
+ cache_file = self.mask_cache_dir / (Path(out_path).stem + "_masks.pkl")
400
+ with open(cache_file, "wb") as f:
401
+ pickle.dump(masks, f)
402
+ except Exception as e:
403
+ logger.warning(f"mask cache save fail: {e}")
404
+
405
+ _prog(1.0, "Stage 1: complete")
406
+ return (
407
+ {"path": out_path, "frames": frame_idx, "key_bgr": chosen_bgr.tolist()},
408
+ f"Green-screen video created ({frame_idx} frames)"
409
  )
410
 
411
+ except Exception as e:
412
+ logger.error(f"Stage 1 error: {e}\n{traceback.format_exc()}")
413
+ return None, f"Stage 1 failed: {e}"
414
+
415
+ # ---------------------------------------------------------------------
416
+ # Stage 2 – keyed video → final composite (hybrid matte)
417
+ # ---------------------------------------------------------------------
418
+ def stage2_greenscreen_to_final(
419
+ self,
420
+ gs_path: str,
421
+ background: np.ndarray | str,
422
+ output_path: str,
423
+ *,
424
+ chroma_settings: Optional[Dict[str, Any]] = None,
425
+ progress_callback: Optional[Callable[[float, str], None]] = None,
426
+ stop_event: Optional["threading.Event"] = None,
427
+ key_bgr: Optional[np.ndarray] = None, # pass chosen key color
428
+ ) -> Tuple[Optional[str], str]:
429
+
430
+ def _prog(p, d):
431
+ if progress_callback:
432
+ try:
433
+ progress_callback(float(p), str(d))
434
+ except Exception:
435
+ pass
436
+
437
+ try:
438
+ # pick up any new quality selection
439
+ self._refresh_quality_from_env()
440
+
441
+ _prog(0.0, "Stage 2: opening keyed video…")
442
+ cap = cv2.VideoCapture(gs_path)
443
+ if not cap.isOpened():
444
+ return None, "Could not open keyed video"
445
+
446
+ fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
447
+ total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 0
448
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
449
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
450
+
451
+ # Load or prepare background
452
+ if isinstance(background, str):
453
+ bg = cv2.imread(background)
454
+ if bg is None:
455
+ cap.release()
456
+ return None, "Could not load background image"
457
+ bg = cv2.resize(bg, (w, h))
458
+ else:
459
+ bg = cv2.resize(background, (w, h))
460
+
461
+ # Optional tiny BG blur per profile to hide seams on flat BGs
462
+ sigma = float(self.q.get("bg_sigma", 0.0))
463
+ if sigma > 0:
464
+ bg = cv2.GaussianBlur(bg, (0, 0), sigmaX=sigma, sigmaY=sigma)
465
+
466
+ writer, out_path = create_video_writer(output_path, fps, w, h)
467
+ if writer is None:
468
+ cap.release()
469
+ return None, "Could not create output writer"
470
+
471
+ # Load cached masks if available
472
+ masks = None
473
+ try:
474
+ cache_file = self.mask_cache_dir / (Path(gs_path).stem + "_masks.pkl")
475
+ if cache_file.exists():
476
+ with open(cache_file, "rb") as f:
477
+ masks = pickle.load(f)
478
+ logger.info(f"Loaded {len(masks)} cached masks")
479
+ except Exception as e:
480
+ logger.warning(f"Could not load mask cache: {e}")
481
+
482
+ # Get chroma settings and override with profile
483
+ settings = chroma_settings or CHROMA_PRESETS.get('standard', {})
484
+ tolerance = int(settings.get('tolerance', 38)) # keep user tolerance
485
+ edge_softness = int(self.q.get('edge_softness', settings.get('edge_softness', 2)))
486
+ spill_suppression = float(self.q.get('spill', settings.get('spill_suppression', 0.35)))
487
+
488
+ # If caller didn't pass key_bgr, try preset or default green
489
+ if key_bgr is None:
490
+ key_bgr = np.array(settings.get('key_color', [0,255,0]), dtype=np.uint8)
491
+
492
+ self._alpha_prev = None # reset temporal smoothing per render
493
+
494
+ frame_idx = 0
495
+ while True:
496
+ if stop_event and stop_event.is_set():
497
+ _prog(1.0, "Stage 2: cancelled")
498
+ break
499
+
500
+ ok, frame = cap.read()
501
+ if not ok:
502
+ break
503
+
504
+ # Apply chroma keying with optional mask assistance
505
+ if masks and frame_idx < len(masks):
506
+ mask = masks[frame_idx]
507
+ final_frame = self._hybrid_composite(
508
+ frame, bg, mask,
509
+ tolerance=tolerance,
510
+ edge_softness=edge_softness,
511
+ spill_suppression=spill_suppression,
512
+ key_bgr=key_bgr
513
  )
514
+ else:
515
+ # Pure chroma key
516
+ final_frame = self._chroma_key_composite(
517
+ frame, bg,
518
+ tolerance=tolerance,
519
+ edge_softness=edge_softness,
520
+ spill_suppression=spill_suppression,
521
+ key_bgr=key_bgr
 
 
 
 
 
 
 
522
  )
523
 
524
+ writer.write(final_frame)
525
+ frame_idx += 1
526
+ pct = 0.05 + 0.9 * (frame_idx / total) if total else min(0.95, 0.05 + frame_idx * 0.002)
527
+ _prog(pct, f"Stage 2: {frame_idx}/{total or '?'}")
528
+
529
+ cap.release()
530
+ writer.release()
531
+
532
+ _prog(1.0, "Stage 2: complete")
533
+ return out_path, f"Final composite created ({frame_idx} frames)"
534
+
535
+ except Exception as e:
536
+ logger.error(f"Stage 2 error: {e}\n{traceback.format_exc()}")
537
+ return None, f"Stage 2 failed: {e}"
538
+
539
+ # ---------------- chroma + hybrid compositors (polished) ----------------
540
+ def _chroma_key_composite(self, frame, bg, *, tolerance=38, edge_softness=2, spill_suppression=0.35, key_bgr: Optional[np.ndarray] = None):
541
+ """Apply chroma key compositing with soft color distance + edge refinement."""
542
+ # 1) spill first
543
+ if spill_suppression > 0:
544
+ frame = self._suppress_green_spill(frame, spill_suppression)
545
+
546
+ # 2) build alpha
547
+ if key_bgr is not None:
548
+ alpha = self._soft_key_mask(frame, key_bgr, tol=int(tolerance))
549
+ else:
550
+ # Fallback: HSV green range
551
+ hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
552
+ lower_green = np.array([40, 40, 40])
553
+ upper_green = np.array([80, 255, 255])
554
+ alpha = cv2.bitwise_not(cv2.inRange(hsv, lower_green, upper_green))
555
+
556
+ # 3) soft edges + refinement
557
+ if edge_softness > 0:
558
+ k = edge_softness * 2 + 1
559
+ alpha = cv2.GaussianBlur(alpha, (k, k), 0)
560
+ alpha = self._refine_alpha_edges(frame, alpha, radius=max(1, edge_softness), iters=1)
561
+
562
+ # 4) temporal smoothing
563
+ if self._alpha_prev is not None and self._alpha_prev.shape == alpha.shape:
564
+ alpha = cv2.addWeighted(alpha, 0.75, self._alpha_prev, 0.25, 0)
565
+ self._alpha_prev = alpha
566
+
567
+ # 5) composite
568
+ mask_3ch = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR).astype(np.float32) / 255.0
569
+ out = frame.astype(np.float32) * mask_3ch + bg.astype(np.float32) * (1.0 - mask_3ch)
570
+ return np.clip(out, 0, 255).astype(np.uint8)
571
+
572
+ def _hybrid_composite(self, frame, bg, mask, *, tolerance=38, edge_softness=2, spill_suppression=0.35, key_bgr: Optional[np.ndarray] = None):
573
+ """Apply hybrid compositing using both chroma key and cached mask, with profile controls."""
574
+ chroma_result = self._chroma_key_composite(
575
+ frame, bg,
576
+ tolerance=tolerance,
577
+ edge_softness=edge_softness,
578
+ spill_suppression=spill_suppression,
579
+ key_bgr=key_bgr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
  )
581
+ if mask is None:
582
+ return chroma_result
583
+
584
+ # profile-driven dilate/feather on cached mask to close pinholes + soften edges
585
+ m = mask
586
+ d = int(self.q.get("dilate", 0))
587
+ if d > 0:
588
+ k = 2*d + 1
589
+ se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k, k))
590
+ m = cv2.dilate(m, se, iterations=1)
591
+ b = int(self.q.get("blur", 0))
592
+ if b > 0:
593
+ m = cv2.GaussianBlur(m, (2*b+1, 2*b+1), 0)
594
+
595
+ m3 = cv2.cvtColor(m, cv2.COLOR_GRAY2BGR) if m.ndim == 2 else m
596
+ m3f = (m3.astype(np.float32) / 255.0)
597
+
598
+ seg_comp = frame.astype(np.float32) * m3f + bg.astype(np.float32) * (1.0 - m3f)
599
+
600
+ mix = float(self.q.get("mix", 0.7)) # weight towards segmentation on "max"
601
+ out = chroma_result.astype(np.float32) * (1.0 - mix) + seg_comp * mix
602
+ return np.clip(out, 0, 255).astype(np.uint8)
603
+
604
+ # ---------------------------------------------------------------------
605
+ # Combined pipeline
606
+ # ---------------------------------------------------------------------
607
+ def process_full_pipeline(
608
+ self,
609
+ video_path: str,
610
+ background: np.ndarray | str,
611
+ output_path: str,
612
+ *,
613
+ key_color_mode: str = "auto",
614
+ chroma_settings: Optional[Dict[str, Any]] = None,
615
+ progress_callback: Optional[Callable[[float, str], None]] = None,
616
+ stop_event: Optional["threading.Event"] = None,
617
+ ) -> Tuple[Optional[str], str]:
618
+ """Run both stages in sequence."""
619
+
620
+ def _combined_progress(pct, desc):
621
+ # Scale progress: Stage 1 is 0-50%, Stage 2 is 50-100%
622
+ if "Stage 1" in desc:
623
+ actual_pct = pct * 0.5
624
+ else: # Stage 2
625
+ actual_pct = 0.5 + pct * 0.5
626
+
627
+ if progress_callback:
628
+ try:
629
+ progress_callback(actual_pct, desc)
630
+ except Exception:
631
+ pass
632
+
633
+ try:
634
+ # pick up any new quality selection once per run
635
+ self._refresh_quality_from_env()
636
+
637
+ # Reset per-video state
638
+ self._mat_bootstrapped = False
639
+ self._alpha_prev = None
640
+ if self.matanyone is not None and hasattr(self.matanyone, "reset"):
641
+ try:
642
+ self.matanyone.reset()
643
+ except Exception:
644
+ pass
645
+
646
+ # Stage 1
647
+ temp_gs_path = tempfile.mktemp(suffix="_greenscreen.mp4")
648
+ stage1_result, stage1_msg = self.stage1_extract_to_greenscreen(
649
+ video_path, temp_gs_path,
650
+ key_color_mode=key_color_mode,
651
+ progress_callback=_combined_progress,
652
+ stop_event=stop_event
653
+ )
654
+ if stage1_result is None:
655
+ return None, stage1_msg
656
+
657
+ # Stage 2 (pass through chosen key color)
658
+ key_bgr = np.array(stage1_result.get("key_bgr", [0,255,0]), dtype=np.uint8)
659
+ final_path, stage2_msg = self.stage2_greenscreen_to_final(
660
+ stage1_result["path"], background, output_path,
661
+ chroma_settings=chroma_settings,
662
+ progress_callback=_combined_progress,
663
+ stop_event=stop_event,
664
+ key_bgr=key_bgr,
665
  )
666
 
667
+ # Clean up temp file
668
+ try:
669
+ os.remove(temp_gs_path)
670
+ except Exception:
671
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
672
 
673
+ return final_path, stage2_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674
 
675
+ except Exception as e:
676
+ logger.error(f"Full pipeline error: {e}\n{traceback.format_exc()}")
677
+ return None, f"Pipeline failed: {e}"