GLAkavya commited on
Commit
cbe4e63
Β·
verified Β·
1 Parent(s): e8898e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +313 -111
app.py CHANGED
@@ -3,10 +3,11 @@ import tempfile
3
  import io
4
  import math
5
  import time
 
6
  import numpy as np
7
  import cv2
8
  import gradio as gr
9
- from PIL import Image
10
 
11
  # ── HF SETUP ─────────────────────────────────────────────────────────────────
12
  hf_token = (
@@ -24,7 +25,7 @@ if hf_token:
24
  except Exception as e:
25
  print(f"⚠️ HF login skipped: {e}")
26
  else:
27
- print("⚠️ No HF token β€” will use Ken Burns fallback only")
28
 
29
  print("βœ… App ready!")
30
 
@@ -45,7 +46,7 @@ def try_hf_model(model_id, pil_image, prompt):
45
  return None
46
  try:
47
  buf = io.BytesIO()
48
- pil_image.save(buf, format="JPEG")
49
  image_bytes = buf.getvalue()
50
  print(f" πŸ€– Trying {model_id} ...")
51
  result = hf_client.image_to_video(
@@ -72,7 +73,7 @@ def generate_video_with_fallback(pil_image, prompt, style, progress_callback=Non
72
  progress_callback(f"⏳ Trying: {model_name}")
73
 
74
  if model_id == "__ken_burns__":
75
- print(" 🎬 Using Ken Burns (local fallback)")
76
  path = generate_video_ken_burns(pil_image, style=style.lower())
77
  return path, f"🎨 {model_name}"
78
 
@@ -90,135 +91,310 @@ def generate_video_with_fallback(pil_image, prompt, style, progress_callback=Non
90
  return path, "🎨 Ken Burns (local)"
91
 
92
 
93
- # ── KEN BURNS VIDEO ───────────────────────────────────────────────────────────
 
 
 
 
94
  def ease_in_out(t):
 
95
  return t * t * (3 - 2 * t)
96
 
97
- def ease_out_bounce(t):
98
- if t < 1/2.75:
99
- return 7.5625 * t * t
100
- elif t < 2/2.75:
101
- t -= 1.5/2.75
102
- return 7.5625 * t * t + 0.75
103
- elif t < 2.5/2.75:
104
- t -= 2.25/2.75
105
- return 7.5625 * t * t + 0.9375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  else:
107
- t -= 2.625/2.75
108
- return 7.5625 * t * t + 0.984375
 
 
109
 
110
- def apply_vignette(frame, strength=0.6):
111
- h, w = frame.shape[:2]
112
- Y, X = np.ogrid[:h, :w]
113
- cx, cy = w / 2, h / 2
114
- dist = np.sqrt(((X - cx) / cx) ** 2 + ((Y - cy) / cy) ** 2)
115
- mask = np.clip(1.0 - strength * (dist ** 1.5), 0, 1)
116
- return (frame * mask[:, :, np.newaxis]).astype(np.uint8)
117
 
 
 
 
 
 
 
 
 
 
118
  def apply_color_grade(frame, style="premium"):
119
- f = frame.astype(np.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  if style == "premium":
121
- f[:,:,0] = np.clip(f[:,:,0] * 1.05, 0, 255)
122
- f[:,:,2] = np.clip(f[:,:,2] * 1.08, 0, 255)
123
- f = np.clip(f * 1.05, 0, 255)
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  elif style == "energetic":
125
- gray = np.mean(f, axis=2, keepdims=True)
126
- f = np.clip(gray + 1.4 * (f - gray), 0, 255)
127
- f = np.clip(f * 1.1, 0, 255)
 
 
 
128
  elif style == "fun":
129
- f[:,:,0] = np.clip(f[:,:,0] * 1.1, 0, 255)
130
- f[:,:,1] = np.clip(f[:,:,1] * 1.05, 0, 255)
131
- return f.astype(np.uint8)
 
 
132
 
133
- def generate_video_ken_burns(pil_image, duration_sec=5, fps=24, style="premium"):
134
- total_frames = duration_sec * fps
135
- img = pil_image.convert("RGB")
136
- target_w, target_h = 720, 1280
137
- img = img.resize((target_w, target_h), Image.LANCZOS)
138
 
139
- tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
140
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
141
- out = cv2.VideoWriter(tmp.name, fourcc, fps, (target_w, target_h))
142
 
143
- pad = 160
144
- big_h, big_w = target_h + pad * 2, target_w + pad * 2
145
- big_img = np.array(img.resize((big_w, big_h), Image.LANCZOS))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
- s1_end = int(fps * 1.5)
148
- s2_end = int(fps * 3.0)
149
- s3_end = int(fps * 4.2)
150
- s4_end = total_frames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  for i in range(total_frames):
153
- if i < s1_end:
154
- t = i / s1_end
155
- te = ease_out_bounce(min(t * 1.1, 1.0))
156
- zoom = 1.35 - 0.25 * te
157
- pan_x = int(pad * 0.1 * t)
158
- pan_y = int(-pad * 0.15 * t)
159
- elif i < s2_end:
160
- t = (i - s1_end) / (s2_end - s1_end)
161
- te = ease_in_out(t)
162
- zoom = 1.10 - 0.05 * te
163
- shake_x = int(3 * math.sin(i * 0.8))
164
- shake_y = int(2 * math.cos(i * 1.1))
165
- pan_x = int(pad * 0.1 + shake_x)
166
- pan_y = int(-pad * 0.15 - pad * 0.20 * te + shake_y)
167
- elif i < s3_end:
168
- t = (i - s2_end) / (s3_end - s2_end)
169
- te = ease_in_out(t)
170
- zoom = 1.05 - 0.04 * te
171
- pan_x = int(pad * 0.1 * (1 - te))
172
- pan_y = int(-pad * 0.35 * (1 - te))
173
- else:
174
- t = (i - s3_end) / (s4_end - s3_end)
175
- te = ease_in_out(t)
176
- zoom = 1.01 + 0.03 * te
177
- pan_x = 0
178
- pan_y = 0
179
-
180
- crop_w = int(target_w / zoom)
181
- crop_h = int(target_h / zoom)
182
- cx = big_w // 2 + pan_x
183
- cy = big_h // 2 + pan_y
184
  x1 = max(0, cx - crop_w // 2)
185
  y1 = max(0, cy - crop_h // 2)
186
  x2 = min(big_w, x1 + crop_w)
187
  y2 = min(big_h, y1 + crop_h)
188
 
189
- if x2 - x1 < 10 or y2 - y1 < 10:
190
- x1, y1, x2, y2 = 0, 0, target_w, target_h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
- cropped = big_img[y1:y2, x1:x2]
193
- frame = cv2.resize(cropped, (target_w, target_h), interpolation=cv2.INTER_LINEAR)
194
- frame = apply_color_grade(frame, style)
195
- frame = apply_vignette(frame, strength=0.55)
196
 
197
- fade_in_end = int(fps * 0.4)
198
- fade_out_sta = int(fps * 4.4)
199
- if i < fade_in_end:
200
- alpha = ease_in_out(i / fade_in_end)
201
- elif i >= fade_out_sta:
202
- alpha = ease_in_out(1.0 - (i - fade_out_sta) / (total_frames - fade_out_sta))
 
203
  else:
204
  alpha = 1.0
205
 
206
- flash_frames = {s1_end, s1_end+1, s2_end, s2_end+1}
207
- if i in flash_frames:
208
- fs = 0.35 if i in {s1_end, s2_end} else 0.15
209
- white = np.ones_like(frame) * 255
210
- frame = cv2.addWeighted(frame, 1 - fs, white.astype(np.uint8), fs, 0)
211
 
212
- frame = np.clip(frame.astype(np.float32) * alpha, 0, 255).astype(np.uint8)
213
  frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
214
- out.write(frame_bgr)
215
 
216
- out.release()
217
  return tmp.name
218
 
219
 
220
  # ── MAIN ──────────────────────────────────────────────────────────────────────
221
- def generate_ad(image, prompt_text, style, progress=gr.Progress()):
222
  if image is None:
223
  return None, "⚠️ Please upload an image first!"
224
 
@@ -229,7 +405,7 @@ def generate_ad(image, prompt_text, style, progress=gr.Progress()):
229
 
230
  def log(msg):
231
  status_lines.append(msg)
232
- progress(0.2 + len(status_lines) * 0.12, desc=msg)
233
 
234
  progress(0.1, desc="🎬 Starting video generation...")
235
 
@@ -240,6 +416,17 @@ def generate_ad(image, prompt_text, style, progress=gr.Progress()):
240
  progress_callback=log,
241
  )
242
 
 
 
 
 
 
 
 
 
 
 
 
243
  progress(1.0, desc="βœ… Done!")
244
  log_text = "\n".join(status_lines) + f"\n\nβœ… Used: {model_used}"
245
  return video_path, log_text
@@ -247,27 +434,34 @@ def generate_ad(image, prompt_text, style, progress=gr.Progress()):
247
 
248
  # ── UI ────────────────────────────────────────────────────────────────────────
249
  css = """
250
- #title { text-align:center; font-size:2.2rem; font-weight:800; margin-bottom:.2rem; }
251
- #sub { text-align:center; color:#888; margin-bottom:1.5rem; }
252
  """
253
 
254
  with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="violet")) as demo:
255
 
256
  gr.Markdown("# 🎬 AI Reel Generator", elem_id="title")
257
- gr.Markdown("Image upload karo β†’ cinematic video ready!", elem_id="sub")
258
 
259
  with gr.Row():
 
260
  with gr.Column(scale=1):
261
  image_input = gr.Image(label="πŸ“Έ Upload Image", type="pil", height=320)
262
  prompt_input = gr.Textbox(
263
- label="✏️ Prompt (optional)",
264
  placeholder="e.g. cinematic slow zoom, product floating in air ...",
265
- lines=3,
266
  )
267
  style_dd = gr.Dropdown(
268
- choices=["Fun", "Premium", "Energetic"],
269
- value="Premium", label="🎨 Style",
270
  )
 
 
 
 
 
 
271
  gen_btn = gr.Button("πŸš€ Generate Video", variant="primary", size="lg")
272
 
273
  gr.Markdown(
@@ -277,18 +471,26 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="violet")) as demo:
277
  "3. Stable Video Diffusion XT\n"
278
  "4. KlingTeam/LivePortrait\n"
279
  "5. Lightricks/LTX-Video\n"
280
- "6. Ken Burns (always works βœ…)"
281
  )
282
 
 
283
  with gr.Column(scale=1):
284
- video_out = gr.Video(label="πŸŽ₯ Generated Video", height=450)
285
  status_out = gr.Textbox(label="πŸ“Š Model Log", lines=8, interactive=False)
286
 
287
  gen_btn.click(
288
  fn=generate_ad,
289
- inputs=[image_input, prompt_input, style_dd],
290
  outputs=[video_out, status_out],
291
  )
292
 
 
 
 
 
 
 
 
293
  if __name__ == "__main__":
294
  demo.launch()
 
3
  import io
4
  import math
5
  import time
6
+ import random
7
  import numpy as np
8
  import cv2
9
  import gradio as gr
10
+ from PIL import Image, ImageFilter, ImageEnhance
11
 
12
  # ── HF SETUP ─────────────────────────────────────────────────────────────────
13
  hf_token = (
 
25
  except Exception as e:
26
  print(f"⚠️ HF login skipped: {e}")
27
  else:
28
+ print("⚠️ No HF token β€” Ken Burns fallback will be used")
29
 
30
  print("βœ… App ready!")
31
 
 
46
  return None
47
  try:
48
  buf = io.BytesIO()
49
+ pil_image.save(buf, format="JPEG", quality=95)
50
  image_bytes = buf.getvalue()
51
  print(f" πŸ€– Trying {model_id} ...")
52
  result = hf_client.image_to_video(
 
73
  progress_callback(f"⏳ Trying: {model_name}")
74
 
75
  if model_id == "__ken_burns__":
76
+ print(" 🎬 Using Ken Burns (cinematic local)")
77
  path = generate_video_ken_burns(pil_image, style=style.lower())
78
  return path, f"🎨 {model_name}"
79
 
 
91
  return path, "🎨 Ken Burns (local)"
92
 
93
 
94
+ # ═══════════════════════════════════════════════════════════════════
95
+ # CINEMATIC KEN BURNS β€” UPGRADED
96
+ # ═══════════════════════════════════════════════════════════════════
97
+
98
+ # ── Easing ───────────────────────────────────────────────────────────────────
99
  def ease_in_out(t):
100
+ t = max(0.0, min(1.0, t))
101
  return t * t * (3 - 2 * t)
102
 
103
+ def ease_out_expo(t):
104
+ return 1 - math.pow(2, -10 * t) if t < 1 else 1.0
105
+
106
+ def ease_in_out_cubic(t):
107
+ t = max(0.0, min(1.0, t))
108
+ if t < 0.5:
109
+ return 4 * t * t * t
110
+ return 1 - math.pow(-2 * t + 2, 3) / 2
111
+
112
+ def cubic_bezier(t, p0, p1, p2, p3):
113
+ """Generic cubic bezier interpolation."""
114
+ u = 1 - t
115
+ return u**3*p0 + 3*u**2*t*p1 + 3*u*t**2*p2 + t**3*p3
116
+
117
+
118
+ # ── Image Pre-processing ──────────────────────────────────────────────────────
119
+ def preprocess_image(pil_image, target_w, target_h):
120
+ """Resize + smart sharpen + slight contrast boost."""
121
+ img = pil_image.convert("RGB")
122
+
123
+ # Smart crop to fill target aspect ratio without distortion
124
+ src_w, src_h = img.size
125
+ src_ratio = src_w / src_h
126
+ tgt_ratio = target_w / target_h
127
+ if src_ratio > tgt_ratio:
128
+ new_h = src_h
129
+ new_w = int(src_h * tgt_ratio)
130
+ left = (src_w - new_w) // 2
131
+ img = img.crop((left, 0, left + new_w, new_h))
132
  else:
133
+ new_w = src_w
134
+ new_h = int(src_w / tgt_ratio)
135
+ top = (src_h - new_h) // 2
136
+ img = img.crop((0, top, new_w, top + new_h))
137
 
138
+ # Resize with high quality
139
+ img = img.resize((target_w, target_h), Image.LANCZOS)
140
+
141
+ # Unsharp mask β€” brings out crisp details
142
+ img = img.filter(ImageFilter.UnsharpMask(radius=1.2, percent=130, threshold=2))
 
 
143
 
144
+ # Subtle contrast + saturation lift
145
+ img = ImageEnhance.Contrast(img).enhance(1.08)
146
+ img = ImageEnhance.Color(img).enhance(1.12)
147
+ img = ImageEnhance.Sharpness(img).enhance(1.15)
148
+
149
+ return np.array(img)
150
+
151
+
152
+ # ── Color Grading ─────────────────────────────────────���───────────────────────
153
  def apply_color_grade(frame, style="premium"):
154
+ """
155
+ Premium LUT-style grading:
156
+ - S-curve for contrast
157
+ - Per-channel color shifts
158
+ - Highlight/shadow split toning
159
+ """
160
+ f = frame.astype(np.float32) / 255.0
161
+
162
+ # S-curve (raises mids, deepens blacks, lifts highlights)
163
+ def scurve(x, strength=0.18):
164
+ return x + strength * x * (1 - x) * (2 * x - 1) * (-1)
165
+
166
+ f = scurve(f, strength=0.20)
167
+
168
  if style == "premium":
169
+ # Teal-orange β€” Hollywood standard
170
+ # Shadows β†’ teal, highlights β†’ warm orange
171
+ lum = 0.299*f[:,:,0] + 0.587*f[:,:,1] + 0.114*f[:,:,2]
172
+ shadow_mask = np.clip(1.0 - lum * 2.5, 0, 1)[:,:,np.newaxis]
173
+ highlight_mask = np.clip((lum - 0.6) * 2.5, 0, 1)[:,:,np.newaxis]
174
+ # Shadows: +teal (G+B, -R)
175
+ f[:,:,0] -= 0.04 * shadow_mask[:,:,0]
176
+ f[:,:,1] += 0.03 * shadow_mask[:,:,0]
177
+ f[:,:,2] += 0.05 * shadow_mask[:,:,0]
178
+ # Highlights: +warm (R+G, -B)
179
+ f[:,:,0] += 0.05 * highlight_mask[:,:,0]
180
+ f[:,:,1] += 0.02 * highlight_mask[:,:,0]
181
+ f[:,:,2] -= 0.04 * highlight_mask[:,:,0]
182
+ # Global slight brightness
183
+ f *= 1.04
184
+
185
  elif style == "energetic":
186
+ # High saturation, punchy contrast, slight crush blacks
187
+ gray = 0.299*f[:,:,0:1] + 0.587*f[:,:,1:2] + 0.114*f[:,:,2:3]
188
+ f = np.clip(gray + 1.5 * (f - gray), 0, 1) # +50% saturation
189
+ f = np.clip(f * 1.12 - 0.02, 0, 1) # crush blacks slightly
190
+ f[:,:,0] = np.clip(f[:,:,0] * 1.06, 0, 1) # red channel boost
191
+
192
  elif style == "fun":
193
+ # Warm, bright, pastel-ish
194
+ f[:,:,0] = np.clip(f[:,:,0] * 1.10, 0, 1) # warm reds
195
+ f[:,:,1] = np.clip(f[:,:,1] * 1.06, 0, 1) # green lift
196
+ f[:,:,2] = np.clip(f[:,:,2] * 0.95, 0, 1) # desaturate blue
197
+ f = np.clip(f * 1.05 + 0.02, 0, 1) # lift blacks (matte look)
198
 
199
+ return np.clip(f * 255, 0, 255).astype(np.uint8)
 
 
 
 
200
 
 
 
 
201
 
202
+ # ── Vignette ──────────────────────────────────────────────────────────────────
203
+ def apply_vignette(frame, strength=0.65, softness=2.0):
204
+ """Oval cinematic vignette β€” darker, softer edges."""
205
+ h, w = frame.shape[:2]
206
+ Y, X = np.ogrid[:h, :w]
207
+ cx, cy = w / 2, h / 2
208
+ # Oval (wider horizontally for portrait/reel format)
209
+ dist = np.sqrt(((X - cx) / (cx * 0.85))**2 + ((Y - cy) / cy)**2)
210
+ mask = np.clip(1.0 - strength * (dist ** softness), 0, 1)
211
+ return (frame * mask[:, :, np.newaxis]).astype(np.uint8)
212
+
213
+
214
+ # ── Film Grain ────────────────────────────────────────────────────────────────
215
+ def apply_film_grain(frame, intensity=6.0):
216
+ """Subtle luminance noise β€” makes it feel organic, not AI-flat."""
217
+ grain = np.random.normal(0, intensity, frame.shape).astype(np.float32)
218
+ result = frame.astype(np.float32) + grain
219
+ return np.clip(result, 0, 255).astype(np.uint8)
220
+
221
+
222
+ # ── Light Leak ────────────────────────────────────────────────────────────────
223
+ def apply_light_leak(frame, progress, style="premium"):
224
+ """
225
+ Sweeping diagonal light leak β€” appears at 30-60% of video.
226
+ Gives that 'lens flare' feel without actual 3D rendering.
227
+ """
228
+ if not (0.28 < progress < 0.65):
229
+ return frame
230
+
231
+ h, w = frame.shape[:2]
232
+ t = (progress - 0.28) / 0.37 # 0β†’1 within leak window
233
+ peak = math.sin(t * math.pi) # rises and falls
234
 
235
+ # Diagonal gradient from top-right
236
+ Y, X = np.ogrid[:h, :w]
237
+ diag = (X / w + (h - Y) / h) / 2.0
238
+ leak_pos = 0.3 + t * 0.6 # sweep across
239
+ leak_width = 0.25
240
+ leak_mask = np.exp(-((diag - leak_pos)**2) / (2 * leak_width**2))
241
+
242
+ if style == "premium":
243
+ color = np.array([255, 220, 160], dtype=np.float32) # warm gold
244
+ elif style == "energetic":
245
+ color = np.array([160, 200, 255], dtype=np.float32) # electric blue
246
+ else:
247
+ color = np.array([255, 180, 200], dtype=np.float32) # pink fun
248
+
249
+ strength = peak * 0.22
250
+ leak_layer = (leak_mask[:,:,np.newaxis] * color * strength).astype(np.float32)
251
+ result = np.clip(frame.astype(np.float32) + leak_layer, 0, 255)
252
+ return result.astype(np.uint8)
253
+
254
+
255
+ # ── Cinematic Bars ─────────────────────────────────────────────────────────────
256
+ def apply_letterbox(frame, bar_h=40):
257
+ """Black cinematic bars at top and bottom."""
258
+ frame[:bar_h, :] = 0
259
+ frame[-bar_h:, :] = 0
260
+ return frame
261
+
262
+
263
+ # ── Main Video Generator ──────────────────────────────────────────────────────
264
+ def generate_video_ken_burns(
265
+ pil_image,
266
+ duration_sec = 6,
267
+ fps = 30, # 30fps β€” smoother than 24
268
+ style = "premium",
269
+ add_grain = True,
270
+ add_leak = True,
271
+ add_bars = True,
272
+ ):
273
+ """
274
+ Cinematic Ken Burns with:
275
+ βœ… 1080Γ—1920 (full HD portrait / Reels format)
276
+ βœ… 30 fps
277
+ βœ… Smart aspect-ratio crop + LANCZOS resize
278
+ βœ… Unsharp mask sharpening
279
+ βœ… S-curve + split-toning color grade
280
+ βœ… Oval soft vignette
281
+ βœ… Subtle film grain
282
+ βœ… Diagonal light leak sweep
283
+ βœ… Cinematic letterbox bars
284
+ βœ… Smooth bezier motion paths
285
+ βœ… Fade in / fade out
286
+ """
287
+ TARGET_W, TARGET_H = 1080, 1920
288
+ RENDER_W, RENDER_H = 1080, 1920 # full res render
289
+
290
+ total_frames = duration_sec * fps # 180 frames @ 30fps
291
+
292
+ # ── Prepare canvas ──────────────────────────────────────────────────────
293
+ pad = 220 # generous padding for all movements
294
+ big_w = RENDER_W + pad * 2
295
+ big_h = RENDER_H + pad * 2
296
+
297
+ base = preprocess_image(pil_image, big_w, big_h) # large canvas
298
+
299
+ # ── Output file ─────────────────────────────────────────────────────────
300
+ tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
301
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
302
+ writer = cv2.VideoWriter(tmp.name, fourcc, fps, (TARGET_W, TARGET_H))
303
+
304
+ # ── Motion keyframes (zoom, pan_x, pan_y) ───────────────────────────────
305
+ # Each segment: (start_zoom, end_zoom, start_px, end_px, start_py, end_py)
306
+ SEG = [
307
+ # t=0.00-0.25 : burst zoom in
308
+ (0.00, 0.25, 1.40, 1.15, 0, int(-pad*0.10), 0, int(-pad*0.12)),
309
+ # t=0.25-0.55 : slow upward drift
310
+ (0.25, 0.55, 1.15, 1.08, int(-pad*0.05), int(pad*0.08), int(-pad*0.12), int(-pad*0.30)),
311
+ # t=0.55-0.78 : subtle right pan + tiny zoom out
312
+ (0.55, 0.78, 1.08, 1.05, int(pad*0.08), int(pad*0.18), int(-pad*0.30), int(-pad*0.18)),
313
+ # t=0.78-1.00 : pull back + settle
314
+ (0.78, 1.00, 1.05, 1.00, int(pad*0.18), 0, int(-pad*0.18), 0),
315
+ ]
316
 
317
  for i in range(total_frames):
318
+ t_global = i / (total_frames - 1) # 0.0 β†’ 1.0
319
+
320
+ # Find active segment
321
+ zoom = pan_x = pan_y = None
322
+ for (t0, t1, z0, z1, px0, px1, py0, py1) in SEG:
323
+ if t0 <= t_global <= t1 or (zoom is None and t_global < t0):
324
+ if t0 <= t_global <= t1:
325
+ seg_t = (t_global - t0) / (t1 - t0)
326
+ te = ease_in_out_cubic(seg_t)
327
+ zoom = z0 + (z1 - z0) * te
328
+ pan_x = int(px0 + (px1 - px0) * te)
329
+ pan_y = int(py0 + (py1 - py0) * te)
330
+ break
331
+
332
+ if zoom is None:
333
+ zoom, pan_x, pan_y = 1.00, 0, 0
334
+
335
+ # ── Micro camera shake (only in first 30% of video) ─────────────────
336
+ if t_global < 0.30:
337
+ shake_strength = (0.30 - t_global) / 0.30 * 2.5
338
+ pan_x += int(shake_strength * math.sin(i * 1.3))
339
+ pan_y += int(shake_strength * math.cos(i * 0.9))
340
+
341
+ # ── Crop from canvas ─────────────────────────────────────────────────
342
+ crop_w = int(RENDER_W / zoom)
343
+ crop_h = int(RENDER_H / zoom)
344
+ cx = big_w // 2 + pan_x
345
+ cy = big_h // 2 + pan_y
346
+
 
 
347
  x1 = max(0, cx - crop_w // 2)
348
  y1 = max(0, cy - crop_h // 2)
349
  x2 = min(big_w, x1 + crop_w)
350
  y2 = min(big_h, y1 + crop_h)
351
 
352
+ # Guard
353
+ if (x2 - x1) < 10 or (y2 - y1) < 10:
354
+ x1, y1, x2, y2 = 0, 0, RENDER_W, RENDER_H
355
+
356
+ cropped = base[y1:y2, x1:x2]
357
+ # High quality resize β€” LANCZOS4 is the best OpenCV offers
358
+ frame = cv2.resize(cropped, (RENDER_W, RENDER_H), interpolation=cv2.INTER_LANCZOS4)
359
+
360
+ # ── Post-processing pipeline ─────────────────────────────────────────
361
+ frame = apply_color_grade(frame, style)
362
+
363
+ if add_leak:
364
+ frame = apply_light_leak(frame, t_global, style)
365
+
366
+ frame = apply_vignette(frame, strength=0.60, softness=2.2)
367
+
368
+ if add_grain:
369
+ grain_strength = 5.5 if style != "premium" else 4.0
370
+ frame = apply_film_grain(frame, intensity=grain_strength)
371
 
372
+ if add_bars:
373
+ frame = apply_letterbox(frame, bar_h=48)
 
 
374
 
375
+ # ── Fade in / out ────────────────────────────────────────────────────
376
+ FADE_IN = 0.06 # first 6%
377
+ FADE_OUT = 0.90 # last 10%
378
+ if t_global < FADE_IN:
379
+ alpha = ease_out_expo(t_global / FADE_IN)
380
+ elif t_global > FADE_OUT:
381
+ alpha = ease_in_out(1.0 - (t_global - FADE_OUT) / (1.0 - FADE_OUT))
382
  else:
383
  alpha = 1.0
384
 
385
+ if alpha < 1.0:
386
+ frame = np.clip(frame.astype(np.float32) * alpha, 0, 255).astype(np.uint8)
 
 
 
387
 
388
+ # ── Write ────────────────────────────────────────────────────────────
389
  frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
390
+ writer.write(frame_bgr)
391
 
392
+ writer.release()
393
  return tmp.name
394
 
395
 
396
  # ── MAIN ──────────────────────────────────────────────────────────────────────
397
+ def generate_ad(image, prompt_text, style, add_grain, add_leak, add_bars, progress=gr.Progress()):
398
  if image is None:
399
  return None, "⚠️ Please upload an image first!"
400
 
 
405
 
406
  def log(msg):
407
  status_lines.append(msg)
408
+ progress(0.2 + len(status_lines) * 0.10, desc=msg)
409
 
410
  progress(0.1, desc="🎬 Starting video generation...")
411
 
 
416
  progress_callback=log,
417
  )
418
 
419
+ # If ken burns was used, regenerate with user options
420
+ if "Ken Burns" in model_used:
421
+ progress(0.7, desc="🎨 Rendering cinematic video...")
422
+ video_path = generate_video_ken_burns(
423
+ pil_image,
424
+ style = style.lower(),
425
+ add_grain= add_grain,
426
+ add_leak = add_leak,
427
+ add_bars = add_bars,
428
+ )
429
+
430
  progress(1.0, desc="βœ… Done!")
431
  log_text = "\n".join(status_lines) + f"\n\nβœ… Used: {model_used}"
432
  return video_path, log_text
 
434
 
435
  # ── UI ────────────────────────────────────────────────────────────────────────
436
  css = """
437
+ #title { text-align:center; font-size:2.4rem; font-weight:900; margin-bottom:.2rem; }
438
+ #sub { text-align:center; color:#888; margin-bottom:1.5rem; font-size:1.05rem; }
439
  """
440
 
441
  with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="violet")) as demo:
442
 
443
  gr.Markdown("# 🎬 AI Reel Generator", elem_id="title")
444
+ gr.Markdown("Image upload karo β†’ **cinematic 1080p video** ready in seconds!", elem_id="sub")
445
 
446
  with gr.Row():
447
+ # ── LEFT ─────────────────────────────────────────────────────────────
448
  with gr.Column(scale=1):
449
  image_input = gr.Image(label="πŸ“Έ Upload Image", type="pil", height=320)
450
  prompt_input = gr.Textbox(
451
+ label="✏️ Prompt (optional β€” for AI models)",
452
  placeholder="e.g. cinematic slow zoom, product floating in air ...",
453
+ lines=2,
454
  )
455
  style_dd = gr.Dropdown(
456
+ choices=["Premium", "Energetic", "Fun"],
457
+ value="Premium", label="🎨 Color Grade Style",
458
  )
459
+
460
+ with gr.Row():
461
+ grain_cb = gr.Checkbox(label="🎞 Film Grain", value=True)
462
+ leak_cb = gr.Checkbox(label="✨ Light Leak", value=True)
463
+ bars_cb = gr.Checkbox(label="🎬 Cinematic Bars", value=True)
464
+
465
  gen_btn = gr.Button("πŸš€ Generate Video", variant="primary", size="lg")
466
 
467
  gr.Markdown(
 
471
  "3. Stable Video Diffusion XT\n"
472
  "4. KlingTeam/LivePortrait\n"
473
  "5. Lightricks/LTX-Video\n"
474
+ "6. 🎨 Ken Burns **1080p** (always works βœ…)"
475
  )
476
 
477
+ # ── RIGHT ────────────────────────────────────────────────────────────
478
  with gr.Column(scale=1):
479
+ video_out = gr.Video(label="πŸŽ₯ Generated Video (1080Γ—1920)", height=500)
480
  status_out = gr.Textbox(label="πŸ“Š Model Log", lines=8, interactive=False)
481
 
482
  gen_btn.click(
483
  fn=generate_ad,
484
+ inputs=[image_input, prompt_input, style_dd, grain_cb, leak_cb, bars_cb],
485
  outputs=[video_out, status_out],
486
  )
487
 
488
+ gr.Markdown(
489
+ "---\n"
490
+ "**Ken Burns pipeline:** Smart crop β†’ LANCZOS resize β†’ Unsharp Mask β†’ "
491
+ "S-curve + Split-toning Grade β†’ Light Leak β†’ Oval Vignette β†’ "
492
+ "Film Grain β†’ Cinematic Bars β†’ Bezier motion β†’ 30fps @ 1080Γ—1920"
493
+ )
494
+
495
  if __name__ == "__main__":
496
  demo.launch()