ElBeh commited on
Commit
b36a3c3
·
verified ·
1 Parent(s): dd039ce

Update tabs/tab_videoframes.py

Browse files
Files changed (1) hide show
  1. tabs/tab_videoframes.py +255 -72
tabs/tab_videoframes.py CHANGED
@@ -40,7 +40,7 @@ def merge_annotations(base_image, annotations, mode, current_frame_idx, global_a
40
 
41
  return img
42
 
43
- def apply_transformation(frame, transformation, process_image_func):
44
  """Applies selected transformation to frame"""
45
  if frame is None or transformation == "None":
46
  return frame
@@ -51,8 +51,8 @@ def apply_transformation(frame, transformation, process_image_func):
51
  else:
52
  pil_frame = frame
53
 
54
- # Call process_image with the frame
55
- result = process_image_func(pil_frame, transformation)
56
 
57
  # Extract transformed image from tuple
58
  if isinstance(result, tuple) and len(result) == 2:
@@ -60,19 +60,25 @@ def apply_transformation(frame, transformation, process_image_func):
60
  else:
61
  transformed = result
62
 
63
- # Convert back to numpy array
64
  if transformed is not None:
 
 
 
 
 
 
65
  return np.array(transformed)
66
 
67
  return frame
68
 
69
- def create_sketchpad_value(base_image, annotations, mode, current_frame_idx, global_annotation, transformation, process_image_func):
70
  """Creates Sketchpad value (Background + Layers)"""
71
  if base_image is None:
72
  return None
73
 
74
  # Apply transformation first
75
- transformed_frame = apply_transformation(base_image, transformation, process_image_func)
76
 
77
  # Prepare base image
78
  if isinstance(transformed_frame, np.ndarray):
@@ -120,7 +126,7 @@ def extract_annotation_from_sketch(sketch_data):
120
 
121
  return None
122
 
123
- def create_comparison_slider(frame, transformation, process_image_func):
124
  """Creates ImageSlider comparison between original and transformed frame"""
125
  if frame is None:
126
  return None
@@ -135,7 +141,7 @@ def create_comparison_slider(frame, transformation, process_image_func):
135
  return (original, original)
136
 
137
  # Apply transformation
138
- transformed_array = apply_transformation(frame, transformation, process_image_func)
139
 
140
  if isinstance(transformed_array, np.ndarray):
141
  transformed = Image.fromarray(transformed_array)
@@ -145,10 +151,15 @@ def create_comparison_slider(frame, transformation, process_image_func):
145
  return (original, transformed)
146
 
147
 
148
- def update_frame_display(frame_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, process_image_func):
149
  """Updates frame display"""
150
  if not frames or frame_idx >= len(frames):
151
- return {"background": None, "layers": []}, None, f"Frame {int(frame_idx)+1} / 0", "--:--"
 
 
 
 
 
152
 
153
  # Calculate video time
154
  if fps > 0:
@@ -163,31 +174,31 @@ def update_frame_display(frame_idx, frames, fps, annotations, global_annotation,
163
  frame = frames[int(frame_idx)]
164
 
165
  # Create Sketchpad value with transformation
166
- sketch_value = create_sketchpad_value(frame, annotations, annotation_mode, int(frame_idx), global_annotation, transformation, process_image_func)
167
 
168
  # Create comparison slider
169
- slider_value = create_comparison_slider(frame, transformation, process_image_func)
170
 
171
  return sketch_value, slider_value, f"Frame {int(frame_idx)+1} / {len(frames)}", time_str
172
 
173
 
174
- def go_to_prev_frame(current_idx, steps, frames, fps, annotations, global_annotation, annotation_mode, transformation, process_image_func):
175
  """Goes one frame back"""
176
  if not frames:
177
  return 0, {"background": None, "layers": []}, None, "No video loaded", "--:--"
178
 
179
  new_idx = max(0, int(current_idx) - steps)
180
- sketch_value, slider_value, info, time_str = update_frame_display(new_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, process_image_func)
181
  return new_idx, sketch_value, slider_value, info, time_str
182
 
183
 
184
- def go_to_next_frame(current_idx, steps, frames, fps, annotations, global_annotation, annotation_mode, transformation, process_image_func):
185
  """Goes one frame forward"""
186
  if not frames:
187
  return 0, {"background": None, "layers": []}, None, "No video loaded", "--:--"
188
 
189
  new_idx = min(len(frames) - 1, int(current_idx) + steps)
190
- sketch_value, slider_value, info, time_str = update_frame_display(new_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, process_image_func)
191
  return new_idx, sketch_value, slider_value, info, time_str
192
 
193
 
@@ -251,10 +262,45 @@ def clear_annotations(mode, annotations, global_annotation):
251
  return annotations, None
252
 
253
 
254
- def create_tab_videoframes(tab_label, process_image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  """Creates a tab for video frame processing"""
256
  with gr.TabItem(tab_label):
257
- video_frames = gr.State([])
 
 
 
 
 
258
  current_frame_idx = gr.State(0)
259
  video_duration = gr.State(0)
260
  video_fps = gr.State(0)
@@ -262,6 +308,7 @@ def create_tab_videoframes(tab_label, process_image):
262
  global_annotation = gr.State(None)
263
  annotation_mode = gr.State("A")
264
  selected_transformation = gr.State("None")
 
265
 
266
 
267
  # Row 1: raw video
@@ -306,36 +353,65 @@ def create_tab_videoframes(tab_label, process_image):
306
  scale=2
307
  )
308
  with gr.Column(scale=1, min_width=1):
309
-
310
-
311
- frame_info = gr.Textbox(label="Frame Info", value="No video loaded", interactive=False, scale=2,)
312
  video_time_display = gr.Textbox(label="Video Time", value="--:--", interactive=False, scale=1)
313
  gr.Markdown("---")
314
- radio_transformation = gr.Radio(
315
- choices=[
316
- "None",
317
- "Laplacian High-Pass",
318
- "FFT Spectrum",
319
- "Error Level Analysis",
320
- "Wavelet Decomposition",
321
- "Noise Extraction",
322
- "YCbCr Channels",
323
- "Gradient Magnitude",
324
- "Histogram Stretching"
325
- ],
326
- value="None",
327
- label="Frame Transformation",
328
- info="Apply analysis filters to the current frame"
329
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
- # with gr.Row():
332
- # with gr.Column():
333
- # frame_info = gr.Textbox(label="Frame Info", value="No video loaded", interactive=False, scale=2)
334
- # video_time_display = gr.Textbox(label="Video Time", value="--:--", interactive=False, scale=1)
335
-
336
  # Row: Frame navigation
337
  with gr.Row():
338
-
339
  gr.Markdown("---")
340
 
341
  with gr.Row():
@@ -355,47 +431,162 @@ def create_tab_videoframes(tab_label, process_image):
355
 
356
  with gr.Row():
357
  gr.Markdown("---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
-
360
  # Video Upload
361
  video_input.change(
362
  fn=load_video_frames,
363
  inputs=[video_input],
364
  outputs=[video_frames, current_frame_idx, frame_slider, frame_info, video_duration, video_fps, frame_annotations, global_annotation]
365
  ).then(
366
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, process_image),
367
- inputs=[current_frame_idx, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
368
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
369
  )
370
 
371
  # Frame Navigation
372
  frame_slider.release(
373
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, process_image),
374
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
375
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
376
  )
377
 
378
  btn_prev_frame.click(
379
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: go_to_prev_frame(idx, 1, frames, fps, annots, glob_annot, mode, trans, process_image),
380
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
381
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
382
  )
383
 
384
  btn_next_frame.click(
385
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: go_to_next_frame(idx, 1, frames, fps, annots, glob_annot, mode, trans, process_image),
386
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
387
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
388
  )
389
 
390
  btn_prev10_frame.click(
391
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: go_to_prev_frame(idx, 10, frames, fps, annots, glob_annot, mode, trans, process_image),
392
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
393
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
394
  )
395
 
396
  btn_next10_frame.click(
397
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: go_to_next_frame(idx, 10, frames, fps, annots, glob_annot, mode, trans, process_image),
398
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
399
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
400
  )
401
 
@@ -406,25 +597,14 @@ def create_tab_videoframes(tab_label, process_image):
406
  outputs=[frame_annotations, global_annotation]
407
  )
408
 
409
- # Transformation Change
410
- radio_transformation.change(
411
- fn=lambda new_trans: new_trans,
412
- inputs=[radio_transformation],
413
- outputs=[selected_transformation]
414
- ).then(
415
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, process_image),
416
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
417
- outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
418
- )
419
-
420
  # Mode Change
421
  radio_mode.change(
422
  fn=lambda new_mode: new_mode,
423
  inputs=[radio_mode],
424
  outputs=[annotation_mode]
425
  ).then(
426
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, process_image),
427
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
428
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
429
  )
430
 
@@ -434,7 +614,10 @@ def create_tab_videoframes(tab_label, process_image):
434
  inputs=[annotation_mode, frame_annotations, global_annotation],
435
  outputs=[frame_annotations, global_annotation]
436
  ).then(
437
- fn=lambda idx, frames, fps, annots, glob_annot, mode, trans: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, process_image),
438
- inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation],
439
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
440
- )
 
 
 
 
40
 
41
  return img
42
 
43
+ def apply_transformation(frame, transformation, quality, process_image_func):
44
  """Applies selected transformation to frame"""
45
  if frame is None or transformation == "None":
46
  return frame
 
51
  else:
52
  pil_frame = frame
53
 
54
+ # Call process_image with the frame and quality
55
+ result = process_image_func(pil_frame, transformation, quality)
56
 
57
  # Extract transformed image from tuple
58
  if isinstance(result, tuple) and len(result) == 2:
 
60
  else:
61
  transformed = result
62
 
63
+ # CRITICAL FOR GRADIO 6.x: Convert grayscale to RGB
64
  if transformed is not None:
65
+ if isinstance(transformed, Image.Image) and transformed.mode == 'L':
66
+ transformed = transformed.convert('RGB')
67
+ elif isinstance(transformed, np.ndarray) and len(transformed.shape) == 2:
68
+ transformed = Image.fromarray(cv2.cvtColor(transformed, cv2.COLOR_GRAY2RGB))
69
+
70
+ # Convert to numpy array
71
  return np.array(transformed)
72
 
73
  return frame
74
 
75
+ def create_sketchpad_value(base_image, annotations, mode, current_frame_idx, global_annotation, transformation, quality, process_image_func):
76
  """Creates Sketchpad value (Background + Layers)"""
77
  if base_image is None:
78
  return None
79
 
80
  # Apply transformation first
81
+ transformed_frame = apply_transformation(base_image, transformation, quality, process_image_func)
82
 
83
  # Prepare base image
84
  if isinstance(transformed_frame, np.ndarray):
 
126
 
127
  return None
128
 
129
+ def create_comparison_slider(frame, transformation, quality, process_image_func):
130
  """Creates ImageSlider comparison between original and transformed frame"""
131
  if frame is None:
132
  return None
 
141
  return (original, original)
142
 
143
  # Apply transformation
144
+ transformed_array = apply_transformation(frame, transformation, quality, process_image_func)
145
 
146
  if isinstance(transformed_array, np.ndarray):
147
  transformed = Image.fromarray(transformed_array)
 
151
  return (original, transformed)
152
 
153
 
154
+ def update_frame_display(frame_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, quality, process_image_func):
155
  """Updates frame display"""
156
  if not frames or frame_idx >= len(frames):
157
+ return (
158
+ {"background": None, "layers": [], "composite": None}, # Fixed: Added 'composite' key
159
+ None,
160
+ f"Frame {int(frame_idx)+1} / 0",
161
+ "--:--"
162
+ )
163
 
164
  # Calculate video time
165
  if fps > 0:
 
174
  frame = frames[int(frame_idx)]
175
 
176
  # Create Sketchpad value with transformation
177
+ sketch_value = create_sketchpad_value(frame, annotations, annotation_mode, int(frame_idx), global_annotation, transformation, quality, process_image_func)
178
 
179
  # Create comparison slider
180
+ slider_value = create_comparison_slider(frame, transformation, quality, process_image_func)
181
 
182
  return sketch_value, slider_value, f"Frame {int(frame_idx)+1} / {len(frames)}", time_str
183
 
184
 
185
+ def go_to_prev_frame(current_idx, steps, frames, fps, annotations, global_annotation, annotation_mode, transformation, quality, process_image_func):
186
  """Goes one frame back"""
187
  if not frames:
188
  return 0, {"background": None, "layers": []}, None, "No video loaded", "--:--"
189
 
190
  new_idx = max(0, int(current_idx) - steps)
191
+ sketch_value, slider_value, info, time_str = update_frame_display(new_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, quality, process_image_func)
192
  return new_idx, sketch_value, slider_value, info, time_str
193
 
194
 
195
+ def go_to_next_frame(current_idx, steps, frames, fps, annotations, global_annotation, annotation_mode, transformation, quality, process_image_func):
196
  """Goes one frame forward"""
197
  if not frames:
198
  return 0, {"background": None, "layers": []}, None, "No video loaded", "--:--"
199
 
200
  new_idx = min(len(frames) - 1, int(current_idx) + steps)
201
+ sketch_value, slider_value, info, time_str = update_frame_display(new_idx, frames, fps, annotations, global_annotation, annotation_mode, transformation, quality, process_image_func)
202
  return new_idx, sketch_value, slider_value, info, time_str
203
 
204
 
 
262
  return annotations, None
263
 
264
 
265
+ def toggle_accordion(accordion_name, current_active):
266
+ """Toggles accordion visibility and returns new transformation state with button variants"""
267
+ transformation_names = [
268
+ "Laplacian High-Pass",
269
+ "FFT Spectrum",
270
+ "Error Level Analysis",
271
+ "Wavelet Decomposition",
272
+ "Noise Extraction",
273
+ "YCbCr Channels",
274
+ "Gradient Magnitude",
275
+ "Histogram Stretching"
276
+ ]
277
+
278
+ if current_active == accordion_name:
279
+ # Clicking active accordion closes it -> None
280
+ new_transformation = "None"
281
+ visibility = [False] * 8
282
+ variants = ["secondary"] * 8 # All buttons secondary (gray)
283
+ else:
284
+ # Open clicked accordion, close all others
285
+ new_transformation = accordion_name
286
+ visibility = [accordion_name == name for name in transformation_names]
287
+ # Set clicked button to primary (highlighted), others to secondary
288
+ variants = ["primary" if accordion_name == name else "secondary" for name in transformation_names]
289
+
290
+ return (new_transformation,
291
+ *[gr.update(visible=v) for v in visibility],
292
+ *[gr.update(variant=var) for var in variants])
293
+
294
+
295
+ def create_tab_videoframes(tab_label, process_image, shared_video_frames=None):
296
  """Creates a tab for video frame processing"""
297
  with gr.TabItem(tab_label):
298
+ # Use shared state if provided, otherwise create local state
299
+ if shared_video_frames is None:
300
+ video_frames = gr.State([])
301
+ else:
302
+ video_frames = shared_video_frames
303
+
304
  current_frame_idx = gr.State(0)
305
  video_duration = gr.State(0)
306
  video_fps = gr.State(0)
 
308
  global_annotation = gr.State(None)
309
  annotation_mode = gr.State("A")
310
  selected_transformation = gr.State("None")
311
+ ela_quality = gr.State(90)
312
 
313
 
314
  # Row 1: raw video
 
353
  scale=2
354
  )
355
  with gr.Column(scale=1, min_width=1):
356
+ frame_info = gr.Textbox(label="Frame Info", value="No video loaded", interactive=False, scale=2)
 
 
357
  video_time_display = gr.Textbox(label="Video Time", value="--:--", interactive=False, scale=1)
358
  gr.Markdown("---")
359
+
360
+ # Accordion-based transformation selection
361
+ with gr.Column():
362
+ gr.Markdown("### Frame Transformation")
363
+ gr.Markdown("*Click to activate transformation*")
364
+
365
+ # Laplacian High-Pass
366
+ btn_laplacian = gr.Button(" Laplacian High-Pass", size="sm")
367
+ with gr.Column(visible=False) as content_laplacian:
368
+ gr.Markdown("Emphasizes high-frequency details and edges")
369
+
370
+ # FFT Spectrum
371
+ btn_fft = gr.Button("▶ FFT Spectrum", size="sm")
372
+ with gr.Column(visible=False) as content_fft:
373
+ gr.Markdown("Shows frequency domain representation")
374
+
375
+ # Error Level Analysis
376
+ btn_ela = gr.Button("▶ Error Level Analysis", size="sm")
377
+ with gr.Column(visible=False) as content_ela:
378
+ gr.Markdown("Detects JPEG compression artifacts")
379
+ quality_slider = gr.Slider(
380
+ minimum=1,
381
+ maximum=99,
382
+ value=90,
383
+ step=1,
384
+ label="JPEG Quality",
385
+ info="Higher = more subtle differences"
386
+ )
387
+
388
+ # Wavelet Decomposition
389
+ btn_wavelet = gr.Button("▶ Wavelet Decomposition", size="sm")
390
+ with gr.Column(visible=False) as content_wavelet:
391
+ gr.Markdown("Multi-scale frequency analysis")
392
+
393
+ # Noise Extraction
394
+ btn_noise = gr.Button("▶ Noise Extraction", size="sm")
395
+ with gr.Column(visible=False) as content_noise:
396
+ gr.Markdown("Isolates high-frequency noise")
397
+
398
+ # YCbCr Channels
399
+ btn_ycbcr = gr.Button("▶ YCbCr Channels", size="sm")
400
+ with gr.Column(visible=False) as content_ycbcr:
401
+ gr.Markdown("Separates luminance and chrominance")
402
+
403
+ # Gradient Magnitude
404
+ btn_gradient = gr.Button("▶ Gradient Magnitude", size="sm")
405
+ with gr.Column(visible=False) as content_gradient:
406
+ gr.Markdown("Visualizes edge strength via Sobel")
407
+
408
+ # Histogram Stretching
409
+ btn_histogram = gr.Button("▶ Histogram Stretching", size="sm")
410
+ with gr.Column(visible=False) as content_histogram:
411
+ gr.Markdown("Extreme contrast enhancement")
412
 
 
 
 
 
 
413
  # Row: Frame navigation
414
  with gr.Row():
 
415
  gr.Markdown("---")
416
 
417
  with gr.Row():
 
431
 
432
  with gr.Row():
433
  gr.Markdown("---")
434
+
435
+ # Collect all content columns for visibility updates
436
+ content_columns = [
437
+ content_laplacian,
438
+ content_fft,
439
+ content_ela,
440
+ content_wavelet,
441
+ content_noise,
442
+ content_ycbcr,
443
+ content_gradient,
444
+ content_histogram
445
+ ]
446
+
447
+ # Collect all buttons for variant updates
448
+ transformation_buttons = [
449
+ btn_laplacian,
450
+ btn_fft,
451
+ btn_ela,
452
+ btn_wavelet,
453
+ btn_noise,
454
+ btn_ycbcr,
455
+ btn_gradient,
456
+ btn_histogram
457
+ ]
458
+
459
+ # Accordion button clicks
460
+ btn_laplacian.click(
461
+ fn=lambda current: toggle_accordion("Laplacian High-Pass", current),
462
+ inputs=[selected_transformation],
463
+ outputs=[selected_transformation] + content_columns + transformation_buttons
464
+ ).then(
465
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
466
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
467
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
468
+ )
469
+
470
+ btn_fft.click(
471
+ fn=lambda current: toggle_accordion("FFT Spectrum", current),
472
+ inputs=[selected_transformation],
473
+ outputs=[selected_transformation] + content_columns + transformation_buttons
474
+ ).then(
475
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
476
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
477
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
478
+ )
479
+
480
+ btn_ela.click(
481
+ fn=lambda current: toggle_accordion("Error Level Analysis", current),
482
+ inputs=[selected_transformation],
483
+ outputs=[selected_transformation] + content_columns + transformation_buttons
484
+ ).then(
485
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
486
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
487
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
488
+ )
489
+
490
+ btn_wavelet.click(
491
+ fn=lambda current: toggle_accordion("Wavelet Decomposition", current),
492
+ inputs=[selected_transformation],
493
+ outputs=[selected_transformation] + content_columns + transformation_buttons
494
+ ).then(
495
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
496
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
497
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
498
+ )
499
+
500
+ btn_noise.click(
501
+ fn=lambda current: toggle_accordion("Noise Extraction", current),
502
+ inputs=[selected_transformation],
503
+ outputs=[selected_transformation] + content_columns + transformation_buttons
504
+ ).then(
505
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
506
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
507
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
508
+ )
509
+
510
+ btn_ycbcr.click(
511
+ fn=lambda current: toggle_accordion("YCbCr Channels", current),
512
+ inputs=[selected_transformation],
513
+ outputs=[selected_transformation] + content_columns + transformation_buttons
514
+ ).then(
515
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
516
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
517
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
518
+ )
519
+
520
+ btn_gradient.click(
521
+ fn=lambda current: toggle_accordion("Gradient Magnitude", current),
522
+ inputs=[selected_transformation],
523
+ outputs=[selected_transformation] + content_columns + transformation_buttons
524
+ ).then(
525
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
526
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
527
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
528
+ )
529
+
530
+ btn_histogram.click(
531
+ fn=lambda current: toggle_accordion("Histogram Stretching", current),
532
+ inputs=[selected_transformation],
533
+ outputs=[selected_transformation] + content_columns + transformation_buttons
534
+ ).then(
535
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
536
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
537
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
538
+ )
539
+
540
+ # Quality slider change (only affects ELA)
541
+ quality_slider.change(
542
+ fn=lambda q: q,
543
+ inputs=[quality_slider],
544
+ outputs=[ela_quality]
545
+ ).then(
546
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
547
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
548
+ outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
549
+ )
550
 
 
551
  # Video Upload
552
  video_input.change(
553
  fn=load_video_frames,
554
  inputs=[video_input],
555
  outputs=[video_frames, current_frame_idx, frame_slider, frame_info, video_duration, video_fps, frame_annotations, global_annotation]
556
  ).then(
557
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
558
+ inputs=[current_frame_idx, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
559
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
560
  )
561
 
562
  # Frame Navigation
563
  frame_slider.release(
564
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
565
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
566
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
567
  )
568
 
569
  btn_prev_frame.click(
570
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: go_to_prev_frame(idx, 1, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
571
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
572
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
573
  )
574
 
575
  btn_next_frame.click(
576
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: go_to_next_frame(idx, 1, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
577
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
578
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
579
  )
580
 
581
  btn_prev10_frame.click(
582
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: go_to_prev_frame(idx, 10, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
583
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
584
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
585
  )
586
 
587
  btn_next10_frame.click(
588
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: go_to_next_frame(idx, 10, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
589
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
590
  outputs=[frame_slider, sketch_output, comparison_slider, frame_info, video_time_display]
591
  )
592
 
 
597
  outputs=[frame_annotations, global_annotation]
598
  )
599
 
 
 
 
 
 
 
 
 
 
 
 
600
  # Mode Change
601
  radio_mode.change(
602
  fn=lambda new_mode: new_mode,
603
  inputs=[radio_mode],
604
  outputs=[annotation_mode]
605
  ).then(
606
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
607
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
608
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
609
  )
610
 
 
614
  inputs=[annotation_mode, frame_annotations, global_annotation],
615
  outputs=[frame_annotations, global_annotation]
616
  ).then(
617
+ fn=lambda idx, frames, fps, annots, glob_annot, mode, trans, quality: update_frame_display(idx, frames, fps, annots, glob_annot, mode, trans, quality, process_image),
618
+ inputs=[frame_slider, video_frames, video_fps, frame_annotations, global_annotation, annotation_mode, selected_transformation, ela_quality],
619
  outputs=[sketch_output, comparison_slider, frame_info, video_time_display]
620
+ )
621
+
622
+ # Return video_frames state for sharing with other tabs
623
+ return video_frames