jebin2 commited on
Commit
de68d35
·
1 Parent(s): b388d86

veo change

Browse files
Files changed (3) hide show
  1. src/api_clients.py +23 -10
  2. src/automation.py +19 -12
  3. src/video_renderer.py +29 -15
src/api_clients.py CHANGED
@@ -466,16 +466,29 @@ class APIClients:
466
  """
467
  try:
468
  if os.getenv("TEST_AUTOMATION", "").lower() == "true":
469
- return {
470
- "task_id": "644319db-5226-42cf-b45f-5388e40d38a6",
471
- "video_url": f"{os.getenv('TEST_DATA_DIRECTORY')}/veo_text_to_video.mp4",
472
- "local_path": f"{os.getenv('TEST_DATA_DIRECTORY')}/veo_text_to_video.mp4",
473
- "duration": 3,
474
- "prompt": prompt,
475
- "status": "SUCCEEDED",
476
- "created_at": "2025-10-15T12:39:24.279Z",
477
- "model": "gen4_turbo",
478
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
479
  logger.info(f"🎬 Generating video with: {prompt[:1000]}...")
480
 
481
  prompt_image_value = ""
 
466
  """
467
  try:
468
  if os.getenv("TEST_AUTOMATION", "").lower() == "true":
469
+ if image_input:
470
+ return {
471
+ "task_id": "644319db-5226-42cf-b45f-5388e40d38a6",
472
+ "video_url": f"{os.getenv('TEST_DATA_DIRECTORY')}/image-to-video.mp4",
473
+ "local_path": f"{os.getenv('TEST_DATA_DIRECTORY')}/image-to-video.mp4",
474
+ "duration": 3,
475
+ "prompt": prompt,
476
+ "status": "SUCCEEDED",
477
+ "created_at": "2025-10-15T12:39:24.279Z",
478
+ "model": "gen4_turbo",
479
+ }
480
+ else:
481
+ return {
482
+ "task_id": "644319db-5226-42cf-b45f-5388e40d38a6",
483
+ "video_url": f"{os.getenv('TEST_DATA_DIRECTORY')}/veo_text_to_video.mp4",
484
+ "local_path": f"{os.getenv('TEST_DATA_DIRECTORY')}/veo_text_to_video.mp4",
485
+ "duration": 3,
486
+ "prompt": prompt,
487
+ "status": "SUCCEEDED",
488
+ "created_at": "2025-10-15T12:39:24.279Z",
489
+ "model": "gen4_turbo",
490
+ }
491
+
492
  logger.info(f"🎬 Generating video with: {prompt[:1000]}...")
493
 
494
  prompt_image_value = ""
src/automation.py CHANGED
@@ -258,26 +258,27 @@ class ContentAutomation:
258
  logger.info(f"Auto-generated captions: {captions[:50]}...")
259
 
260
  if os.getenv("USE_VEO", "false").lower() == "true":
261
- video_data = await self.api_clients.generate_video(
262
  prompt=strategy["runway_veo_prompt"], duration=strategy.get("duration", 4)
263
  )
264
- else:
265
- # Step 1: Generate image using Imagen 4 Ultra
266
- image_path = await self.api_clients.generate_image(strategy["gemini_prompt"])
267
- if not image_path:
268
- raise Exception("Image generation failed")
269
 
270
- # Step 2: Upload image to GCS
271
- await self.api_clients.store_in_gcs(image_path, "image")
 
 
272
 
 
 
273
 
274
- # Step 3: Generate video using gen4_turbo
275
- video_data = await self.api_clients.generate_video(
276
- prompt=strategy["runway_prompt"], image_input=image_path, duration=strategy.get("duration", 3)
277
- )
 
278
 
279
  video_data["captions"] = captions
280
  video_data["script"] = self.data_holder.tts_script
 
281
 
282
  logger.info(f"✓ Hook video generated: {video_data['task_id']}")
283
  return video_data
@@ -300,6 +301,12 @@ class ContentAutomation:
300
  download_tasks.append(
301
  self._download_with_fallback(hook_url, "hook_video.mp4", assets["hook_video"], "local_path")
302
  )
 
 
 
 
 
 
303
 
304
  # Download library videos
305
  for i, video in enumerate(assets.get("selected_videos", [])):
 
258
  logger.info(f"Auto-generated captions: {captions[:50]}...")
259
 
260
  if os.getenv("USE_VEO", "false").lower() == "true":
261
+ veo_video_data = await self.api_clients.generate_video(
262
  prompt=strategy["runway_veo_prompt"], duration=strategy.get("duration", 4)
263
  )
 
 
 
 
 
264
 
265
+ # Step 1: Generate image using Imagen 4 Ultra
266
+ image_path = await self.api_clients.generate_image(strategy["gemini_prompt"])
267
+ if not image_path:
268
+ raise Exception("Image generation failed")
269
 
270
+ # Step 2: Upload image to GCS
271
+ await self.api_clients.store_in_gcs(image_path, "image")
272
 
273
+
274
+ # Step 3: Generate video using gen4_turbo
275
+ video_data = await self.api_clients.generate_video(
276
+ prompt=strategy["runway_prompt"], image_input=image_path, duration=strategy.get("duration", 3)
277
+ )
278
 
279
  video_data["captions"] = captions
280
  video_data["script"] = self.data_holder.tts_script
281
+ video_data["veo_video_data"] = veo_video_data
282
 
283
  logger.info(f"✓ Hook video generated: {video_data['task_id']}")
284
  return video_data
 
301
  download_tasks.append(
302
  self._download_with_fallback(hook_url, "hook_video.mp4", assets["hook_video"], "local_path")
303
  )
304
+ # VEO library videos
305
+ if assets["hook_video"].get("veo_video_data") and assets["hook_video"].get("veo_video_data").get("video_url"):
306
+ veo_hook_url = assets["hook_video"]["veo_video_data"]["video_url"]
307
+ download_tasks.append(
308
+ self._download_with_fallback(veo_hook_url, "veo_hook_url.mp4", assets["hook_video"]["veo_video_data"], "local_path")
309
+ )
310
 
311
  # Download library videos
312
  for i, video in enumerate(assets.get("selected_videos", [])):
src/video_renderer.py CHANGED
@@ -36,6 +36,7 @@ import numpy as np
36
  ALLOWED_BG_MUSIC_VOLUME = 0.08
37
  REVERSE_THRESHOLD = 0.5
38
  HOOK_VIDEO_DURATION = 1.5
 
39
 
40
  class VideoRenderer:
41
  def __init__(self, config: Dict, data_holder: DataHolder = None):
@@ -83,35 +84,43 @@ class VideoRenderer:
83
  async def _prepare_video_clips_natural_speed(self) -> List[VideoFileClip]:
84
  """Load and prepare all video clips - NO speed adjustments"""
85
  clips = []
 
 
86
 
87
  try:
88
  assets = self.data_holder.visual_assets
89
  # Load hook video for seamless looping
 
90
  if assets.get("hook_video") and assets["hook_video"].get("local_path"):
91
- hook_clip = VideoFileClip(assets["hook_video"]["local_path"])
 
 
 
 
 
 
 
 
92
  hook_duration = hook_clip.duration
93
  hook_clip = hook_clip.without_audio()
94
 
95
  logger.info(f"🔄 Creating seamless loop from {hook_duration:.2f}s hook video (NATURAL SPEED)")
96
 
97
- # FIXED DURATIONS: 1.5 seconds each
98
- HOOK_SEGMENT_DURATION = 1.5
99
-
100
  # Last 1.5 seconds for start
101
- start_segment_begin = max(0, hook_duration - HOOK_SEGMENT_DURATION)
102
  hook_start = hook_clip.subclip(start_segment_begin, hook_duration)
103
  # clips.append(("hook_start", hook_start))
104
  logger.info(f"✓ Hook start: {hook_start.duration:.2f}s")
105
 
106
  # First 1.5 seconds for end
107
- hook_end_duration = min(HOOK_SEGMENT_DURATION, hook_duration)
108
  hook_end = hook_clip.subclip(0, hook_end_duration)
109
  # clips.append(("hook_end", hook_end))
110
  logger.info(f"✓ Hook end: {hook_end.duration:.2f}s")
111
 
112
  hook_clip.close()
113
  else:
114
- logger.warning("⚠️ No hook video available for processing")
115
 
116
  # Combine all tts_script_segment texts into one string
117
  selected_videos = assets["selected_videos"]
@@ -127,6 +136,7 @@ class VideoRenderer:
127
 
128
  target_size = (1080, 1920)
129
  extra_secs = 0.0
 
130
  # Load library videos - NO speed adjustments
131
  for i, lib_video in enumerate(selected_videos):
132
  if lib_video.get("local_path"):
@@ -139,6 +149,10 @@ class VideoRenderer:
139
  prev_clip_file = None
140
  if i == 0:
141
  lib_hook_start = self._resize_for_vertical(hook_start, target_size)
 
 
 
 
142
  if i+1 == len(assets.get("selected_videos", [])):
143
  lib_hook_end = self._resize_for_vertical(hook_end, target_size)
144
  prev_clip = clips[-1][1]
@@ -218,6 +232,7 @@ class VideoRenderer:
218
 
219
  def _prepare_with_start_hook(self, lib_video, original_clip_path, alternate_url_local_path, original_clip, lib_hook_start, target_duration, prev_clip):
220
  """Handle clip preparation when a start hook is present."""
 
221
  logger.info(f"Start hook detected with duration {lib_hook_start.duration:.2f}s")
222
  total_duration = lib_hook_start.duration + original_clip.duration
223
 
@@ -226,12 +241,15 @@ class VideoRenderer:
226
  logger.info("Target duration fits start hook + original clip, concatenating and trimming")
227
  result = concatenate_videoclips([lib_hook_start, original_clip], method="compose").subclip(0, target_duration)
228
  logger.info(f"Prepared clip duration: {result.duration:.2f}s")
 
229
  return prev_clip, result, 0.0
230
 
231
  # Case 2: Need to extend beyond original clip
232
  modified_clip, extra_secs = self._extend_or_trim_clip(lib_video, original_clip_path, alternate_url_local_path, original_clip, target_duration-HOOK_VIDEO_DURATION)
233
 
234
  result = concatenate_videoclips([lib_hook_start, modified_clip], method="compose").subclip(0, target_duration)
 
 
235
  logger.info(f"Prepared clip duration: {result.duration:.2f}s")
236
  return prev_clip, result, extra_secs
237
 
@@ -331,10 +349,10 @@ class VideoRenderer:
331
  elif target_duration > total_duration:
332
  if alternate_url_local_path is None or (target_duration - total_duration <= REVERSE_THRESHOLD): # Small tolerance for floating point
333
  logger.info("⚠️ Reversing clip.")
334
- if original_clip_path:
335
- reversed_clip = self.reverse_clip(original_clip_path)
336
- else:
337
- reversed_clip = self.reverse_clip(original_clip)
338
  loop_clip = concatenate_videoclips([original_clip, reversed_clip, original_clip, reversed_clip], method="compose")
339
  return loop_clip.subclip(0, target_duration), target_duration - original_clip.duration
340
  else:
@@ -460,10 +478,6 @@ class VideoRenderer:
460
  hook_end = None
461
  library_clips = []
462
 
463
- HOOK_DURATION = 1.5
464
- if os.getenv("USE_VEO", "false").lower() == "true":
465
- HOOK_DURATION = 2
466
-
467
  for clip in video_clips:
468
  library_clips.append(clip)
469
 
 
36
  ALLOWED_BG_MUSIC_VOLUME = 0.08
37
  REVERSE_THRESHOLD = 0.5
38
  HOOK_VIDEO_DURATION = 1.5
39
+ HOOK_START_ORIGINAL_CLIP_USED = 0
40
 
41
  class VideoRenderer:
42
  def __init__(self, config: Dict, data_holder: DataHolder = None):
 
84
  async def _prepare_video_clips_natural_speed(self) -> List[VideoFileClip]:
85
  """Load and prepare all video clips - NO speed adjustments"""
86
  clips = []
87
+ global HOOK_START_ORIGINAL_CLIP_USED
88
+ global HOOK_VIDEO_DURATION
89
 
90
  try:
91
  assets = self.data_holder.visual_assets
92
  # Load hook video for seamless looping
93
+ runway_as_second_ai_video = None
94
  if assets.get("hook_video") and assets["hook_video"].get("local_path"):
95
+ HOOK_VIDEO_DURATION = 1.5
96
+ if os.getenv("USE_VEO", "false").lower() == "true":
97
+ HOOK_VIDEO_DURATION = 2
98
+ hook_clip = VideoFileClip(assets["hook_video"]["veo_video_data"]["local_path"])
99
+ runway_as_second_ai_video = VideoFileClip(assets["hook_video"]["local_path"]).subclip(0, 2)
100
+ runway_as_second_ai_video = self._resize_for_vertical(runway_as_second_ai_video)
101
+ else:
102
+ hook_clip = VideoFileClip(assets["hook_video"]["local_path"])
103
+
104
  hook_duration = hook_clip.duration
105
  hook_clip = hook_clip.without_audio()
106
 
107
  logger.info(f"🔄 Creating seamless loop from {hook_duration:.2f}s hook video (NATURAL SPEED)")
108
 
 
 
 
109
  # Last 1.5 seconds for start
110
+ start_segment_begin = max(0, hook_duration - HOOK_VIDEO_DURATION)
111
  hook_start = hook_clip.subclip(start_segment_begin, hook_duration)
112
  # clips.append(("hook_start", hook_start))
113
  logger.info(f"✓ Hook start: {hook_start.duration:.2f}s")
114
 
115
  # First 1.5 seconds for end
116
+ hook_end_duration = min(HOOK_VIDEO_DURATION, hook_duration)
117
  hook_end = hook_clip.subclip(0, hook_end_duration)
118
  # clips.append(("hook_end", hook_end))
119
  logger.info(f"✓ Hook end: {hook_end.duration:.2f}s")
120
 
121
  hook_clip.close()
122
  else:
123
+ raise ValueError("⚠️ No hook video available for processing")
124
 
125
  # Combine all tts_script_segment texts into one string
126
  selected_videos = assets["selected_videos"]
 
136
 
137
  target_size = (1080, 1920)
138
  extra_secs = 0.0
139
+ HOOK_START_ORIGINAL_CLIP_USED = 0
140
  # Load library videos - NO speed adjustments
141
  for i, lib_video in enumerate(selected_videos):
142
  if lib_video.get("local_path"):
 
149
  prev_clip_file = None
150
  if i == 0:
151
  lib_hook_start = self._resize_for_vertical(hook_start, target_size)
152
+ original_clip = runway_as_second_ai_video if runway_as_second_ai_video else original_clip
153
+ if i == 1 and os.getenv("USE_VEO", "false").lower() == "true":
154
+ if HOOK_START_ORIGINAL_CLIP_USED < runway_as_second_ai_video.duration-0.5:
155
+ original_clip = runway_as_second_ai_video.subclip(HOOK_START_ORIGINAL_CLIP_USED, runway_as_second_ai_video.duration) if runway_as_second_ai_video else original_clip
156
  if i+1 == len(assets.get("selected_videos", [])):
157
  lib_hook_end = self._resize_for_vertical(hook_end, target_size)
158
  prev_clip = clips[-1][1]
 
232
 
233
  def _prepare_with_start_hook(self, lib_video, original_clip_path, alternate_url_local_path, original_clip, lib_hook_start, target_duration, prev_clip):
234
  """Handle clip preparation when a start hook is present."""
235
+ global HOOK_START_ORIGINAL_CLIP_USED
236
  logger.info(f"Start hook detected with duration {lib_hook_start.duration:.2f}s")
237
  total_duration = lib_hook_start.duration + original_clip.duration
238
 
 
241
  logger.info("Target duration fits start hook + original clip, concatenating and trimming")
242
  result = concatenate_videoclips([lib_hook_start, original_clip], method="compose").subclip(0, target_duration)
243
  logger.info(f"Prepared clip duration: {result.duration:.2f}s")
244
+ HOOK_START_ORIGINAL_CLIP_USED = max(0, target_duration - lib_hook_start.duration)
245
  return prev_clip, result, 0.0
246
 
247
  # Case 2: Need to extend beyond original clip
248
  modified_clip, extra_secs = self._extend_or_trim_clip(lib_video, original_clip_path, alternate_url_local_path, original_clip, target_duration-HOOK_VIDEO_DURATION)
249
 
250
  result = concatenate_videoclips([lib_hook_start, modified_clip], method="compose").subclip(0, target_duration)
251
+ HOOK_START_ORIGINAL_CLIP_USED = max(0, target_duration - lib_hook_start.duration)
252
+ print(HOOK_START_ORIGINAL_CLIP_USED)
253
  logger.info(f"Prepared clip duration: {result.duration:.2f}s")
254
  return prev_clip, result, extra_secs
255
 
 
349
  elif target_duration > total_duration:
350
  if alternate_url_local_path is None or (target_duration - total_duration <= REVERSE_THRESHOLD): # Small tolerance for floating point
351
  logger.info("⚠️ Reversing clip.")
352
+ # if original_clip_path:
353
+ # reversed_clip = self.reverse_clip(original_clip_path)
354
+ # else:
355
+ reversed_clip = self.reverse_clip(original_clip)
356
  loop_clip = concatenate_videoclips([original_clip, reversed_clip, original_clip, reversed_clip], method="compose")
357
  return loop_clip.subclip(0, target_duration), target_duration - original_clip.duration
358
  else:
 
478
  hook_end = None
479
  library_clips = []
480
 
 
 
 
 
481
  for clip in video_clips:
482
  library_clips.append(clip)
483