sharul20001 commited on
Commit
a1343b8
Β·
verified Β·
1 Parent(s): 89b20a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +294 -202
app.py CHANGED
@@ -1,50 +1,39 @@
1
  import gradio as gr
2
- import os
3
- import tempfile
4
  import cv2
5
  import numpy as np
6
- from PIL import Image, ImageDraw, ImageFont
7
- import json
8
-
9
- # CSS untuk styling
10
- CSS = """
11
- .title {
12
- text-align: center;
13
- font-size: 2em;
14
- font-weight: bold;
15
- margin-bottom: 20px;
16
- }
17
- """
18
 
19
- class VideoEditor:
 
20
  def __init__(self):
21
- self.api_key = None
22
 
23
- def extract_frames(self, video_path, max_frames=30):
24
- """Extract frames from video"""
25
  frames = []
26
- if not video_path:
27
- return frames
28
-
29
  cap = cv2.VideoCapture(video_path)
 
 
 
 
 
30
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
31
- sample_rate = max(1, total_frames // max_frames)
32
 
33
- frame_count = 0
34
- while cap.isOpened() and len(frames) < max_frames:
35
  ret, frame = cap.read()
36
  if not ret:
37
  break
38
-
39
- if frame_count % sample_rate == 0:
40
- frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
41
- frame_count += 1
42
 
43
  cap.release()
44
- return frames
45
 
46
- def create_video_from_frames(self, frames, output_path, fps=24):
47
- """Create video from frames"""
48
  if not frames:
49
  return None
50
 
@@ -58,217 +47,320 @@ class VideoEditor:
58
  out.release()
59
  return output_path
60
 
61
- def add_text_overlay(self, frame, text, position=(50, 50)):
62
- """Add text overlay to frame"""
63
- frame_copy = frame.copy()
64
- cv2.putText(frame_copy, text, position,
65
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2,
66
- cv2.LINE_AA)
67
- return frame_copy
68
 
69
- def change_background(self, video_path, bg_prompt):
70
- """Simulate background change"""
71
- if not video_path:
72
- return None, "Please upload a video first"
 
 
 
 
 
 
 
 
 
 
73
 
74
- try:
75
- # Extract frames
76
- frames = self.extract_frames(video_path)
77
- if not frames:
78
- return None, "Failed to extract frames"
79
 
80
- # Process frames (simulation)
81
- processed_frames = []
82
- for i, frame in enumerate(frames):
83
- # Add overlay to simulate processing
84
- text = f"BG: {bg_prompt[:30]}..."
85
- processed = self.add_text_overlay(frame, text, (10, 30))
86
-
87
- # Simple color tint based on prompt
88
- if "beach" in bg_prompt.lower():
89
- processed[:,:,2] = np.clip(processed[:,:,2] * 1.1, 0, 255)
90
- elif "forest" in bg_prompt.lower():
91
- processed[:,:,1] = np.clip(processed[:,:,1] * 1.2, 0, 255)
92
- elif "city" in bg_prompt.lower():
93
- processed = processed * 0.8
94
-
95
- processed_frames.append(processed.astype(np.uint8))
96
 
97
- # Create output video
98
- output_path = tempfile.mktemp(suffix='.mp4')
99
- self.create_video_from_frames(processed_frames, output_path)
 
 
100
 
101
- return output_path, f"Background changed to: {bg_prompt}"
 
 
 
 
 
 
 
 
 
102
 
103
- except Exception as e:
104
- return None, f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- def change_outfit(self, video_path, outfit_prompt):
107
- """Simulate outfit change"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  if not video_path:
109
  return None, "Please upload a video first"
110
-
111
  try:
112
- frames = self.extract_frames(video_path)
 
113
  if not frames:
114
  return None, "Failed to extract frames"
115
 
116
- processed_frames = []
117
- for frame in frames:
118
- text = f"Outfit: {outfit_prompt[:25]}..."
119
- processed = self.add_text_overlay(frame, text, (10, 60))
120
- processed_frames.append(processed)
121
 
122
- output_path = tempfile.mktemp(suffix='.mp4')
123
- self.create_video_from_frames(processed_frames, output_path)
 
 
 
 
 
 
 
 
 
124
 
125
- return output_path, f"Outfit changed to: {outfit_prompt}"
 
 
 
 
 
126
 
127
- except Exception as e:
128
- return None, f"Error: {str(e)}"
129
-
130
- def apply_style(self, video_path, style_prompt, color_prompt):
131
- """Apply style and color effects"""
132
- if not video_path:
133
- return None, "Please upload a video first"
134
 
135
- try:
136
- frames = self.extract_frames(video_path)
137
- if not frames:
138
- return None, "Failed to extract frames"
 
 
139
 
140
- processed_frames = []
141
- for frame in frames:
142
- # Apply color effects
143
- if "warm" in color_prompt.lower():
144
- frame[:,:,0] = np.clip(frame[:,:,0] * 1.2, 0, 255)
145
- frame[:,:,2] = np.clip(frame[:,:,2] * 0.8, 0, 255)
146
- elif "cool" in color_prompt.lower():
147
- frame[:,:,0] = np.clip(frame[:,:,0] * 0.8, 0, 255)
148
- frame[:,:,2] = np.clip(frame[:,:,2] * 1.2, 0, 255)
149
- elif "vintage" in color_prompt.lower():
150
- # Sepia effect
151
- gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
152
- frame = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
153
- frame[:,:,0] = np.clip(frame[:,:,0] * 1.1, 0, 255)
154
-
155
- text = f"Style: {style_prompt[:20]}..."
156
- processed = self.add_text_overlay(frame.astype(np.uint8), text, (10, 90))
157
- processed_frames.append(processed)
158
 
 
 
159
  output_path = tempfile.mktemp(suffix='.mp4')
160
- self.create_video_from_frames(processed_frames, output_path)
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
- return output_path, f"Style applied: {style_prompt}"
163
 
164
  except Exception as e:
165
- return None, f"Error: {str(e)}"
166
 
167
  # Create instance
168
- editor = VideoEditor()
169
 
170
  # Create interface
171
- with gr.Blocks(css=CSS) as demo:
172
- gr.HTML("""
173
- <h1 class="title">🎬 AI Video Editor</h1>
174
- <p style="text-align: center;">Transform videos with AI-powered editing</p>
175
  """)
176
 
177
- # Video input
178
  with gr.Row():
179
- video_input = gr.Video(label="Upload Video")
180
-
181
- # Tabs for different features
182
- with gr.Tabs():
183
- # Background Tab
184
- with gr.Tab("🏞️ Background"):
185
- gr.Markdown("### Change Video Background")
186
- bg_prompt = gr.Textbox(
187
- placeholder="Describe the new background...",
188
- label="Background Description"
189
- )
190
- bg_button = gr.Button("Change Background", variant="primary")
191
- bg_output = gr.Video(label="Result")
192
- bg_status = gr.Textbox(label="Status")
193
 
194
- gr.Examples(
195
- examples=[
196
- "Tropical beach with palm trees",
197
- "Modern city skyline at night",
198
- "Enchanted forest with fireflies"
199
- ],
200
- inputs=bg_prompt
201
- )
202
 
203
- bg_button.click(
204
- fn=editor.change_background,
205
- inputs=[video_input, bg_prompt],
206
- outputs=[bg_output, bg_status]
 
207
  )
208
-
209
- # Outfit Tab
210
- with gr.Tab("πŸ‘” Outfit"):
211
- gr.Markdown("### Change Character Outfit")
212
- outfit_prompt = gr.Textbox(
213
- placeholder="Describe the new outfit...",
214
- label="Outfit Description"
215
- )
216
- outfit_button = gr.Button("Change Outfit", variant="primary")
217
- outfit_output = gr.Video(label="Result")
218
- outfit_status = gr.Textbox(label="Status")
219
 
220
- gr.Examples(
221
- examples=[
222
- "Business suit with red tie",
223
- "Casual summer dress",
224
- "Superhero costume"
225
- ],
226
- inputs=outfit_prompt
227
- )
228
 
229
- outfit_button.click(
230
- fn=editor.change_outfit,
231
- inputs=[video_input, outfit_prompt],
232
- outputs=[outfit_output, outfit_status]
233
- )
234
-
235
- # Style Tab
236
- with gr.Tab("🎨 Style"):
237
- gr.Markdown("### Apply Visual Style")
238
- style_prompt = gr.Textbox(
239
- placeholder="Visual style...",
240
- label="Style"
241
- )
242
- color_prompt = gr.Textbox(
243
- placeholder="Color grading...",
244
- label="Colors"
245
- )
246
- style_button = gr.Button("Apply Style", variant="primary")
247
- style_output = gr.Video(label="Result")
248
- style_status = gr.Textbox(label="Status")
249
 
250
- gr.Examples(
251
- examples=[
252
- ["Cinematic", "Warm sunset tones"],
253
- ["Anime style", "Vibrant colors"],
254
- ["Film noir", "Cool blue tones"]
255
- ],
256
- inputs=[style_prompt, color_prompt]
257
- )
258
 
259
- style_button.click(
260
- fn=editor.apply_style,
261
- inputs=[video_input, style_prompt, color_prompt],
262
- outputs=[style_output, style_status]
263
- )
 
 
264
 
 
265
  gr.Markdown("""
266
  ---
267
- ### πŸ“ Notes:
268
- - This is a demonstration with simulated effects
269
- - Real implementation requires AI model APIs
270
- - Upload short videos (<30s) for best results
 
 
 
 
 
 
 
 
271
  """)
 
 
 
 
 
 
 
272
 
273
  # Launch
274
- demo.launch()
 
1
  import gradio as gr
 
 
2
  import cv2
3
  import numpy as np
4
+ import tempfile
5
+ import os
6
+ from PIL import Image, ImageEnhance, ImageFilter
7
+ import random
8
+ import math
 
 
 
 
 
 
 
9
 
10
+ class AdvancedVideoTransformer:
11
+
12
  def __init__(self):
13
+ self.transformations = []
14
 
15
+ def extract_frames(self, video_path):
16
+ """Extract frames with metadata"""
17
  frames = []
 
 
 
18
  cap = cv2.VideoCapture(video_path)
19
+
20
+ if not cap.isOpened():
21
+ return [], 30
22
+
23
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
24
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
25
 
26
+ for i in range(total_frames):
 
27
  ret, frame = cap.read()
28
  if not ret:
29
  break
30
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
 
 
 
31
 
32
  cap.release()
33
+ return frames, fps
34
 
35
+ def save_video(self, frames, fps, output_path):
36
+ """Save processed frames as video"""
37
  if not frames:
38
  return None
39
 
 
47
  out.release()
48
  return output_path
49
 
50
+ # TRANSFORMATION METHODS
 
 
 
 
 
 
51
 
52
+ def apply_geometric_transform(self, frame, transform_type="random"):
53
+ """Apply geometric transformations to avoid duplicate detection"""
54
+ h, w = frame.shape[:2]
55
+
56
+ if transform_type == "random" or transform_type == "perspective":
57
+ # Random perspective transform
58
+ pts1 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
59
+ margin = min(w, h) * 0.1
60
+ pts2 = np.float32([
61
+ [random.uniform(0, margin), random.uniform(0, margin)],
62
+ [w - random.uniform(0, margin), random.uniform(0, margin)],
63
+ [random.uniform(0, margin), h - random.uniform(0, margin)],
64
+ [w - random.uniform(0, margin), h - random.uniform(0, margin)]
65
+ ])
66
 
67
+ matrix = cv2.getPerspectiveTransform(pts1, pts2)
68
+ transformed = cv2.warpPerspective(frame, matrix, (w, h))
69
+ return transformed
 
 
70
 
71
+ elif transform_type == "rotation":
72
+ # Small rotation
73
+ angle = random.uniform(-5, 5)
74
+ center = (w // 2, h // 2)
75
+ matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
76
+ rotated = cv2.warpAffine(frame, matrix, (w, h))
77
+ return rotated
 
 
 
 
 
 
 
 
 
78
 
79
+ elif transform_type == "scale":
80
+ # Slight scale variation
81
+ scale = random.uniform(0.95, 1.05)
82
+ new_w, new_h = int(w * scale), int(h * scale)
83
+ scaled = cv2.resize(frame, (new_w, new_h))
84
 
85
+ # Pad or crop to original size
86
+ if scale > 1:
87
+ start_x = (new_w - w) // 2
88
+ start_y = (new_h - h) // 2
89
+ result = scaled[start_y:start_y+h, start_x:start_x+w]
90
+ else:
91
+ pad_x = (w - new_w) // 2
92
+ pad_y = (h - new_h) // 2
93
+ result = np.zeros((h, w, 3), dtype=np.uint8)
94
+ result[pad_y:pad_y+new_h, pad_x:pad_x+new_w] = scaled
95
 
96
+ return result
97
+
98
+ return frame
99
+
100
+ def apply_color_transformation(self, frame):
101
+ """Comprehensive color transformation"""
102
+ # Convert to PIL for easier processing
103
+ pil_img = Image.fromarray(frame)
104
+
105
+ # Random brightness adjustment
106
+ brightness_factor = random.uniform(0.9, 1.1)
107
+ enhancer = ImageEnhance.Brightness(pil_img)
108
+ pil_img = enhancer.enhance(brightness_factor)
109
+
110
+ # Random contrast adjustment
111
+ contrast_factor = random.uniform(0.9, 1.1)
112
+ enhancer = ImageEnhance.Contrast(pil_img)
113
+ pil_img = enhancer.enhance(contrast_factor)
114
+
115
+ # Random color saturation
116
+ color_factor = random.uniform(0.8, 1.2)
117
+ enhancer = ImageEnhance.Color(pil_img)
118
+ pil_img = enhancer.enhance(color_factor)
119
+
120
+ # Apply color grading
121
+ img_array = np.array(pil_img, dtype=np.float32)
122
+
123
+ # Random RGB channel adjustments
124
+ r_mult = random.uniform(0.95, 1.05)
125
+ g_mult = random.uniform(0.95, 1.05)
126
+ b_mult = random.uniform(0.95, 1.05)
127
+
128
+ img_array[:,:,0] *= r_mult
129
+ img_array[:,:,1] *= g_mult
130
+ img_array[:,:,2] *= b_mult
131
+
132
+ # Clip values
133
+ img_array = np.clip(img_array, 0, 255).astype(np.uint8)
134
+
135
+ return img_array
136
 
137
+ def apply_temporal_modification(self, frames):
138
+ """Modify timing to avoid duplicate detection"""
139
+ if len(frames) < 10:
140
+ return frames
141
+
142
+ modified_frames = []
143
+
144
+ # Add slight variations in frame timing
145
+ for i, frame in enumerate(frames):
146
+ # Occasionally duplicate or skip frames
147
+ action = random.choices(
148
+ ['keep', 'duplicate', 'skip'],
149
+ weights=[0.8, 0.1, 0.1],
150
+ k=1
151
+ )[0]
152
+
153
+ if action == 'keep':
154
+ modified_frames.append(frame)
155
+ elif action == 'duplicate' and i > 0 and i < len(frames) - 1:
156
+ # Add a slightly modified version of this frame
157
+ modified_frame = frame.copy()
158
+
159
+ # Add small noise
160
+ noise = np.random.normal(0, 5, frame.shape)
161
+ modified_frame = np.clip(modified_frame.astype(np.float32) + noise, 0, 255).astype(np.uint8)
162
+
163
+ modified_frames.append(frame)
164
+ modified_frames.append(modified_frame)
165
+ # If 'skip', don't add the frame (effectively speeding up that part)
166
+
167
+ return modified_frames if modified_frames else frames
168
+
169
+ def apply_visual_effects(self, frame):
170
+ """Add subtle visual effects to change appearance"""
171
+ h, w = frame.shape[:2]
172
+
173
+ # Add very subtle noise (imperceptible but changes pixel values)
174
+ noise = np.random.normal(0, 2, frame.shape)
175
+ frame_with_noise = np.clip(frame.astype(np.float32) + noise, 0, 255).astype(np.uint8)
176
+
177
+ # Add subtle vignette effect
178
+ center = (w // 2, h // 2)
179
+ Y, X = np.ogrid[:h, :w]
180
+ dist_from_center = np.sqrt((X - center[0])**2 + (Y - center[1])**2)
181
+ max_dist = np.sqrt(center[0]**2 + center[1]**2)
182
+ vignette = 1 - (dist_from_center / max_dist) * 0.1 # Very subtle
183
+ vignette = np.stack([vignette] * 3, axis=2)
184
+
185
+ frame_vignette = (frame_with_noise * vignette).astype(np.uint8)
186
+
187
+ # Apply very slight blur to reduce sharpness differences
188
+ frame_blurred = cv2.GaussianBlur(frame_vignette, (3, 3), 0)
189
+
190
+ return frame_blurred
191
+
192
+ def apply_content_rearrangement(self, frames):
193
+ """Rearrange content structure to avoid duplicate detection"""
194
+ if len(frames) < 20:
195
+ return frames
196
+
197
+ # Split into segments
198
+ segment_size = max(10, len(frames) // 5)
199
+ segments = []
200
+
201
+ for i in range(0, len(frames), segment_size):
202
+ segment = frames[i:i+segment_size]
203
+ if len(segment) > 0:
204
+ segments.append(segment)
205
+
206
+ if len(segments) < 2:
207
+ return frames
208
+
209
+ # Randomly reorder segments (but keep first and last for coherence)
210
+ if len(segments) > 2:
211
+ middle_segments = segments[1:-1]
212
+ random.shuffle(middle_segments)
213
+ segments = [segments[0]] + middle_segments + [segments[-1]]
214
+
215
+ # Reconstruct video
216
+ rearranged_frames = []
217
+ for segment in segments:
218
+ rearranged_frames.extend(segment)
219
+
220
+ return rearranged_frames
221
+
222
+ def apply_comprehensive_transformation(self, video_path, intensity="medium"):
223
+ """Apply all transformations for maximum uniqueness while preserving quality"""
224
  if not video_path:
225
  return None, "Please upload a video first"
226
+
227
  try:
228
+ # Extract frames
229
+ frames, fps = self.extract_frames(video_path)
230
  if not frames:
231
  return None, "Failed to extract frames"
232
 
233
+ original_count = len(frames)
234
+ print(f"Processing {original_count} frames...")
 
 
 
235
 
236
+ # Apply geometric transformations
237
+ print("Applying geometric transformations...")
238
+ geo_frames = []
239
+ for i, frame in enumerate(frames):
240
+ if intensity == "high":
241
+ transform_type = random.choice(["perspective", "rotation", "scale"])
242
+ else:
243
+ transform_type = "rotation" if i % 3 == 0 else "scale"
244
+
245
+ transformed = self.apply_geometric_transform(frame, transform_type)
246
+ geo_frames.append(transformed)
247
 
248
+ # Apply color transformations
249
+ print("Applying color transformations...")
250
+ color_frames = []
251
+ for frame in geo_frames:
252
+ transformed = self.apply_color_transformation(frame)
253
+ color_frames.append(transformed)
254
 
255
+ # Apply temporal modifications (for medium/high intensity)
256
+ if intensity in ["medium", "high"]:
257
+ print("Applying temporal modifications...")
258
+ temporal_frames = self.apply_temporal_modification(color_frames)
259
+ else:
260
+ temporal_frames = color_frames
 
261
 
262
+ # Apply content rearrangement (for high intensity)
263
+ if intensity == "high":
264
+ print("Applying content rearrangement...")
265
+ rearranged_frames = self.apply_content_rearrangement(temporal_frames)
266
+ else:
267
+ rearranged_frames = temporal_frames
268
 
269
+ # Apply visual effects
270
+ print("Applying visual effects...")
271
+ final_frames = []
272
+ for frame in rearranged_frames:
273
+ transformed = self.apply_visual_effects(frame)
274
+ final_frames.append(transformed)
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
+ # Save video
277
+ print("Saving final video...")
278
  output_path = tempfile.mktemp(suffix='.mp4')
279
+ self.save_video(final_frames, fps, output_path)
280
+
281
+ final_count = len(final_frames)
282
+ status_msg = f"""βœ… Video successfully transformed!
283
+
284
+ πŸ“Š Transformation Summary:
285
+ - Original frames: {original_count}
286
+ - Final frames: {final_count}
287
+ - Intensity level: {intensity}
288
+ - Applied transformations: Geometric, Color, Temporal, Visual Effects
289
+
290
+ 🎯 Result: Video is now significantly different from original while maintaining quality.
291
+ Perfect for avoiding duplicate content detection!"""
292
 
293
+ return output_path, status_msg
294
 
295
  except Exception as e:
296
+ return None, f"❌ Error: {str(e)}"
297
 
298
  # Create instance
299
+ transformer = AdvancedVideoTransformer()
300
 
301
  # Create interface
302
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
303
+ gr.Markdown("""
304
+ # 🎬 Advanced Video Transformer
305
+ ### Transform videos to avoid duplicate content detection while maintaining quality
306
  """)
307
 
 
308
  with gr.Row():
309
+ with gr.Column(scale=1):
310
+ gr.Markdown("### πŸ“€ Upload Your Video")
311
+ video_input = gr.Video(label="Original Video")
 
 
 
 
 
 
 
 
 
 
 
312
 
313
+ gr.Markdown("### βš™οΈ Transformation Settings")
 
 
 
 
 
 
 
314
 
315
+ intensity = gr.Radio(
316
+ choices=["low", "medium", "high"],
317
+ value="medium",
318
+ label="Transformation Intensity",
319
+ info="Higher intensity = more changes, better for avoiding detection"
320
  )
 
 
 
 
 
 
 
 
 
 
 
321
 
322
+ transform_btn = gr.Button("πŸš€ Transform Video", variant="primary", size="lg")
 
 
 
 
 
 
 
323
 
324
+ gr.Markdown("""
325
+ ### 🎯 What This Does:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
+ - **Geometric Changes**: Rotation, scaling, perspective warping
328
+ - **Color Adjustments**: Brightness, contrast, saturation variations
329
+ - **Temporal Modifications**: Frame timing changes, occasional duplicates/skips
330
+ - **Visual Effects**: Subtle noise, vignette, slight blur
331
+ - **Content Rearrangement**: Segment reordering (high intensity only)
 
 
 
332
 
333
+ All while preserving overall video quality!
334
+ """)
335
+
336
+ with gr.Column(scale=1):
337
+ gr.Markdown("### πŸ“₯ Transformed Video")
338
+ video_output = gr.Video(label="Transformed Video")
339
+ status_output = gr.Textbox(label="Transformation Status", lines=10)
340
 
341
+ # Examples section
342
  gr.Markdown("""
343
  ---
344
+ ### πŸ’‘ Recommended Settings:
345
+
346
+ - **Low Intensity**: Minor changes, good for slight variations
347
+ - **Medium Intensity**: Balanced approach, recommended for most use cases
348
+ - **High Intensity**: Maximum changes, best for strict duplicate detection avoidance
349
+
350
+ ### πŸ“ˆ Quality Preservation Features:
351
+
352
+ - Maintains original resolution
353
+ - Preserves audio quality (if any)
354
+ - Smooth transitions between frames
355
+ - Minimal visible artifacts
356
  """)
357
+
358
+ # Event handler
359
+ transform_btn.click(
360
+ fn=transformer.apply_comprehensive_transformation,
361
+ inputs=[video_input, intensity],
362
+ outputs=[video_output, status_output]
363
+ )
364
 
365
  # Launch
366
+ demo.queue().launch()