tthhanh commited on
Commit
5a459dd
·
1 Parent(s): ff7ae34

chore: black reformating

Browse files
src/app/tools/langchain_tools.py CHANGED
@@ -36,7 +36,9 @@ _VIDEO_PATH_REGISTRY: List[str] = []
36
  def register_video_paths(paths: List[str]) -> None:
37
  """Register valid video paths for path resolution."""
38
  global _VIDEO_PATH_REGISTRY
39
- _VIDEO_PATH_REGISTRY = [os.path.abspath(p) for p in paths if p and os.path.exists(p)]
 
 
40
 
41
 
42
  def _resolve_video_path(video_path: str) -> Optional[str]:
@@ -48,16 +50,16 @@ def _resolve_video_path(video_path: str) -> Optional[str]:
48
  """
49
  # Clean the path
50
  video_path = video_path.strip()
51
-
52
  # Try direct path first
53
  if os.path.exists(video_path):
54
  return os.path.abspath(video_path)
55
-
56
  # Try absolute path conversion
57
  abs_path = os.path.abspath(video_path)
58
  if os.path.exists(abs_path):
59
  return abs_path
60
-
61
  # Try to find matching path in registry by filename
62
  if _VIDEO_PATH_REGISTRY:
63
  filename = os.path.basename(video_path)
@@ -65,7 +67,7 @@ def _resolve_video_path(video_path: str) -> Optional[str]:
65
  if os.path.basename(registered_path) == filename:
66
  if os.path.exists(registered_path):
67
  return registered_path
68
-
69
  # Try fuzzy matching - check if the path is similar to any registered path
70
  # This handles cases where the path got corrupted (e.g., missing characters)
71
  for registered_path in _VIDEO_PATH_REGISTRY:
@@ -74,7 +76,7 @@ def _resolve_video_path(video_path: str) -> Optional[str]:
74
  if filename in registered_path or registered_path.endswith(filename):
75
  if os.path.exists(registered_path):
76
  return registered_path
77
-
78
  return None
79
 
80
 
@@ -107,7 +109,7 @@ def video_summarizer_tool(video_path: str, fps: float = 2.0) -> str:
107
  else:
108
  # If resolution failed, try the original path anyway
109
  result_json = video_summarizer(video_path, fps=fps)
110
-
111
  # Validate and ensure the result matches VideoSummary schema
112
  try:
113
  parsed = json.loads(result_json)
@@ -147,8 +149,10 @@ def video_script_generator_tool(
147
  Returns:
148
  JSON string containing detailed script with scene information and composition details matching VideoScript schema
149
  """
150
- result_json = video_script_generator(video_summaries, user_description, target_duration)
151
-
 
 
152
  # Validate and ensure the result matches VideoScript schema
153
  try:
154
  parsed = json.loads(result_json)
@@ -197,7 +201,7 @@ def music_selector_tool(
197
  looping=looping,
198
  prompt_influence=prompt_influence,
199
  )
200
-
201
  # Return as JSON string matching MusicSelectorResult schema
202
  result = MusicSelectorResult(audio_path=audio_path)
203
  return result.model_dump_json()
@@ -226,11 +230,15 @@ def frame_extractor_tool(
226
  # Try to resolve the path in case it got corrupted
227
  resolved_path = _resolve_video_path(video_path)
228
  if resolved_path:
229
- frame_path = frame_extractor(resolved_path, thumbnail_timeframe=thumbnail_timeframe)
 
 
230
  else:
231
  # If resolution failed, try the original path anyway
232
- frame_path = frame_extractor(video_path, thumbnail_timeframe=thumbnail_timeframe)
233
-
 
 
234
  # Return as JSON string matching FrameExtractorResult schema
235
  result = FrameExtractorResult(frame_path=frame_path)
236
  return result.model_dump_json()
@@ -255,7 +263,7 @@ def thumbnail_generator_tool(image_path: str, summary: str) -> str:
255
  JSON string with thumbnail_path field matching ThumbnailGeneratorResult schema
256
  """
257
  thumbnail_path = thumbnail_generator(image_path, summary)
258
-
259
  # Return as JSON string matching ThumbnailGeneratorResult schema
260
  result = ThumbnailGeneratorResult(thumbnail_path=thumbnail_path)
261
  return result.model_dump_json()
@@ -293,7 +301,7 @@ def video_composer_tool(
293
  else:
294
  # Comma-separated paths
295
  clips_list = [path.strip() for path in video_clips.split(",") if path.strip()]
296
-
297
  # Resolve all video clip paths in case they got corrupted
298
  resolved_clips = []
299
  for clip_path in clips_list:
@@ -310,7 +318,7 @@ def video_composer_tool(
310
  music_path=music_path,
311
  thumbnail_image=thumbnail_image,
312
  )
313
-
314
  # Return as JSON string matching VideoComposerResult schema
315
  result = VideoComposerResult(video_path=video_path)
316
  return result.model_dump_json()
 
36
  def register_video_paths(paths: List[str]) -> None:
37
  """Register valid video paths for path resolution."""
38
  global _VIDEO_PATH_REGISTRY
39
+ _VIDEO_PATH_REGISTRY = [
40
+ os.path.abspath(p) for p in paths if p and os.path.exists(p)
41
+ ]
42
 
43
 
44
  def _resolve_video_path(video_path: str) -> Optional[str]:
 
50
  """
51
  # Clean the path
52
  video_path = video_path.strip()
53
+
54
  # Try direct path first
55
  if os.path.exists(video_path):
56
  return os.path.abspath(video_path)
57
+
58
  # Try absolute path conversion
59
  abs_path = os.path.abspath(video_path)
60
  if os.path.exists(abs_path):
61
  return abs_path
62
+
63
  # Try to find matching path in registry by filename
64
  if _VIDEO_PATH_REGISTRY:
65
  filename = os.path.basename(video_path)
 
67
  if os.path.basename(registered_path) == filename:
68
  if os.path.exists(registered_path):
69
  return registered_path
70
+
71
  # Try fuzzy matching - check if the path is similar to any registered path
72
  # This handles cases where the path got corrupted (e.g., missing characters)
73
  for registered_path in _VIDEO_PATH_REGISTRY:
 
76
  if filename in registered_path or registered_path.endswith(filename):
77
  if os.path.exists(registered_path):
78
  return registered_path
79
+
80
  return None
81
 
82
 
 
109
  else:
110
  # If resolution failed, try the original path anyway
111
  result_json = video_summarizer(video_path, fps=fps)
112
+
113
  # Validate and ensure the result matches VideoSummary schema
114
  try:
115
  parsed = json.loads(result_json)
 
149
  Returns:
150
  JSON string containing detailed script with scene information and composition details matching VideoScript schema
151
  """
152
+ result_json = video_script_generator(
153
+ video_summaries, user_description, target_duration
154
+ )
155
+
156
  # Validate and ensure the result matches VideoScript schema
157
  try:
158
  parsed = json.loads(result_json)
 
201
  looping=looping,
202
  prompt_influence=prompt_influence,
203
  )
204
+
205
  # Return as JSON string matching MusicSelectorResult schema
206
  result = MusicSelectorResult(audio_path=audio_path)
207
  return result.model_dump_json()
 
230
  # Try to resolve the path in case it got corrupted
231
  resolved_path = _resolve_video_path(video_path)
232
  if resolved_path:
233
+ frame_path = frame_extractor(
234
+ resolved_path, thumbnail_timeframe=thumbnail_timeframe
235
+ )
236
  else:
237
  # If resolution failed, try the original path anyway
238
+ frame_path = frame_extractor(
239
+ video_path, thumbnail_timeframe=thumbnail_timeframe
240
+ )
241
+
242
  # Return as JSON string matching FrameExtractorResult schema
243
  result = FrameExtractorResult(frame_path=frame_path)
244
  return result.model_dump_json()
 
263
  JSON string with thumbnail_path field matching ThumbnailGeneratorResult schema
264
  """
265
  thumbnail_path = thumbnail_generator(image_path, summary)
266
+
267
  # Return as JSON string matching ThumbnailGeneratorResult schema
268
  result = ThumbnailGeneratorResult(thumbnail_path=thumbnail_path)
269
  return result.model_dump_json()
 
301
  else:
302
  # Comma-separated paths
303
  clips_list = [path.strip() for path in video_clips.split(",") if path.strip()]
304
+
305
  # Resolve all video clip paths in case they got corrupted
306
  resolved_clips = []
307
  for clip_path in clips_list:
 
318
  music_path=music_path,
319
  thumbnail_image=thumbnail_image,
320
  )
321
+
322
  # Return as JSON string matching VideoComposerResult schema
323
  result = VideoComposerResult(video_path=video_path)
324
  return result.model_dump_json()
src/app/tools/tool_schemas.py CHANGED
@@ -41,7 +41,9 @@ class VideoSummary(BaseModel):
41
  class VideoScript(BaseModel):
42
  """Schema for video script generator tool output."""
43
 
44
- total_duration: float = Field(..., description="Total duration of the script in seconds")
 
 
45
  scenes: List[dict] = Field(..., description="List of scene objects")
46
  music: Optional[dict] = Field(None, description="Music configuration")
47
  pacing: Optional[str] = Field(None, description="Pacing description")
@@ -79,9 +81,7 @@ class MusicSelectorResult(BaseModel):
79
 
80
  class Config:
81
  json_schema_extra = {
82
- "example": {
83
- "audio_path": "/tmp/sound_effect_energetic_30s_1234567890.mp3"
84
- }
85
  }
86
 
87
 
@@ -92,22 +92,20 @@ class FrameExtractorResult(BaseModel):
92
 
93
  class Config:
94
  json_schema_extra = {
95
- "example": {
96
- "frame_path": "/path/to/video_frame_ai_13s.png"
97
- }
98
  }
99
 
100
 
101
  class ThumbnailGeneratorResult(BaseModel):
102
  """Schema for thumbnail generator tool output."""
103
 
104
- thumbnail_path: str = Field(..., description="Path to the generated thumbnail image")
 
 
105
 
106
  class Config:
107
  json_schema_extra = {
108
- "example": {
109
- "thumbnail_path": "/tmp/thumbnail_1234567890.png"
110
- }
111
  }
112
 
113
 
@@ -117,9 +115,4 @@ class VideoComposerResult(BaseModel):
117
  video_path: str = Field(..., description="Path to the final composed video file")
118
 
119
  class Config:
120
- json_schema_extra = {
121
- "example": {
122
- "video_path": "/tmp/composed_video_12345.mp4"
123
- }
124
- }
125
-
 
41
  class VideoScript(BaseModel):
42
  """Schema for video script generator tool output."""
43
 
44
+ total_duration: float = Field(
45
+ ..., description="Total duration of the script in seconds"
46
+ )
47
  scenes: List[dict] = Field(..., description="List of scene objects")
48
  music: Optional[dict] = Field(None, description="Music configuration")
49
  pacing: Optional[str] = Field(None, description="Pacing description")
 
81
 
82
  class Config:
83
  json_schema_extra = {
84
+ "example": {"audio_path": "/tmp/sound_effect_energetic_30s_1234567890.mp3"}
 
 
85
  }
86
 
87
 
 
92
 
93
  class Config:
94
  json_schema_extra = {
95
+ "example": {"frame_path": "/path/to/video_frame_ai_13s.png"}
 
 
96
  }
97
 
98
 
99
  class ThumbnailGeneratorResult(BaseModel):
100
  """Schema for thumbnail generator tool output."""
101
 
102
+ thumbnail_path: str = Field(
103
+ ..., description="Path to the generated thumbnail image"
104
+ )
105
 
106
  class Config:
107
  json_schema_extra = {
108
+ "example": {"thumbnail_path": "/tmp/thumbnail_1234567890.png"}
 
 
109
  }
110
 
111
 
 
115
  video_path: str = Field(..., description="Path to the final composed video file")
116
 
117
  class Config:
118
+ json_schema_extra = {"example": {"video_path": "/tmp/composed_video_12345.mp4"}}
 
 
 
 
 
src/app/tools/video_clipper.py CHANGED
@@ -88,16 +88,18 @@ def video_clipper(
88
  # Clean up
89
  clipped_video.close()
90
  video.close()
91
-
92
  # Verify the clipped video duration by reloading it
93
  # This helps catch any frame reading issues early
94
  verify_clip = VideoFileClip(output_path)
95
  actual_duration = verify_clip.duration
96
  verify_clip.close()
97
-
98
  # Log if there's a significant duration mismatch
99
  if abs(actual_duration - expected_duration) > 0.5:
100
- print(f"Warning: Clipped video expected {expected_duration:.2f}s but actual duration is {actual_duration:.2f}s")
 
 
101
 
102
  # Return absolute path
103
  return os.path.abspath(output_path)
 
88
  # Clean up
89
  clipped_video.close()
90
  video.close()
91
+
92
  # Verify the clipped video duration by reloading it
93
  # This helps catch any frame reading issues early
94
  verify_clip = VideoFileClip(output_path)
95
  actual_duration = verify_clip.duration
96
  verify_clip.close()
97
+
98
  # Log if there's a significant duration mismatch
99
  if abs(actual_duration - expected_duration) > 0.5:
100
+ print(
101
+ f"Warning: Clipped video expected {expected_duration:.2f}s but actual duration is {actual_duration:.2f}s"
102
+ )
103
 
104
  # Return absolute path
105
  return os.path.abspath(output_path)
src/app/tools/video_composer.py CHANGED
@@ -304,25 +304,27 @@ def video_composer(
304
  video_clips_loaded = []
305
  expected_total_duration = 0.0
306
  actual_total_duration = 0.0
307
-
308
  for i, (clip_path, scene) in enumerate(zip(clip_paths, scenes)):
309
  if not os.path.exists(clip_path):
310
  raise FileNotFoundError(f"Video clip not found: {clip_path}")
311
-
312
  clip = VideoFileClip(clip_path)
313
  actual_duration = clip.duration
314
  expected_duration = scene.get("duration", actual_duration)
315
-
316
  # Use actual duration for calculations, not expected
317
  actual_total_duration += actual_duration
318
  expected_total_duration += expected_duration
319
-
320
  # Log duration mismatch if significant
321
  if abs(actual_duration - expected_duration) > 0.5:
322
- print(f"Warning: Scene {i+1} expected duration {expected_duration:.2f}s but actual clip duration is {actual_duration:.2f}s")
323
-
 
 
324
  video_clips_loaded.append(clip)
325
-
326
  print(f"Total expected duration from script: {expected_total_duration:.2f}s")
327
  print(f"Total actual duration from clips: {actual_total_duration:.2f}s")
328
 
@@ -387,26 +389,30 @@ def video_composer(
387
  else:
388
  # Use concatenate_videoclips for simple sequential composition
389
  final_video = concatenate_videoclips(processed_clips, method="compose")
390
-
391
  # Validate final video duration
392
  actual_final_duration = final_video.duration
393
  target_duration = script_data.get("total_duration", expected_total_duration)
394
-
395
  # Log duration information
396
  print(f"Final composed video duration: {actual_final_duration:.2f}s")
397
  print(f"Target duration from script: {target_duration:.2f}s")
398
-
399
  if abs(actual_final_duration - target_duration) > 1.0:
400
- print(f"Warning: Final video duration ({actual_final_duration:.2f}s) is shorter than target duration ({target_duration:.2f}s)")
 
 
401
  print(f"Expected total from scenes: {expected_total_duration:.2f}s")
402
  print(f"Actual total from clips: {actual_total_duration:.2f}s")
403
-
404
  # If the actual duration is significantly shorter, it might be due to:
405
  # 1. Frame reading issues in clipped videos
406
  # 2. Crossfade overlaps reducing duration
407
  # 3. Clips being truncated during extraction
408
  if actual_final_duration < actual_total_duration * 0.8:
409
- print(f"Warning: Final video is significantly shorter than sum of clip durations. This may indicate frame reading issues.")
 
 
410
 
411
  # Add thumbnail image to first frame if provided
412
  if thumbnail_path and os.path.exists(thumbnail_path):
 
304
  video_clips_loaded = []
305
  expected_total_duration = 0.0
306
  actual_total_duration = 0.0
307
+
308
  for i, (clip_path, scene) in enumerate(zip(clip_paths, scenes)):
309
  if not os.path.exists(clip_path):
310
  raise FileNotFoundError(f"Video clip not found: {clip_path}")
311
+
312
  clip = VideoFileClip(clip_path)
313
  actual_duration = clip.duration
314
  expected_duration = scene.get("duration", actual_duration)
315
+
316
  # Use actual duration for calculations, not expected
317
  actual_total_duration += actual_duration
318
  expected_total_duration += expected_duration
319
+
320
  # Log duration mismatch if significant
321
  if abs(actual_duration - expected_duration) > 0.5:
322
+ print(
323
+ f"Warning: Scene {i+1} expected duration {expected_duration:.2f}s but actual clip duration is {actual_duration:.2f}s"
324
+ )
325
+
326
  video_clips_loaded.append(clip)
327
+
328
  print(f"Total expected duration from script: {expected_total_duration:.2f}s")
329
  print(f"Total actual duration from clips: {actual_total_duration:.2f}s")
330
 
 
389
  else:
390
  # Use concatenate_videoclips for simple sequential composition
391
  final_video = concatenate_videoclips(processed_clips, method="compose")
392
+
393
  # Validate final video duration
394
  actual_final_duration = final_video.duration
395
  target_duration = script_data.get("total_duration", expected_total_duration)
396
+
397
  # Log duration information
398
  print(f"Final composed video duration: {actual_final_duration:.2f}s")
399
  print(f"Target duration from script: {target_duration:.2f}s")
400
+
401
  if abs(actual_final_duration - target_duration) > 1.0:
402
+ print(
403
+ f"Warning: Final video duration ({actual_final_duration:.2f}s) is shorter than target duration ({target_duration:.2f}s)"
404
+ )
405
  print(f"Expected total from scenes: {expected_total_duration:.2f}s")
406
  print(f"Actual total from clips: {actual_total_duration:.2f}s")
407
+
408
  # If the actual duration is significantly shorter, it might be due to:
409
  # 1. Frame reading issues in clipped videos
410
  # 2. Crossfade overlaps reducing duration
411
  # 3. Clips being truncated during extraction
412
  if actual_final_duration < actual_total_duration * 0.8:
413
+ print(
414
+ f"Warning: Final video is significantly shorter than sum of clip durations. This may indicate frame reading issues."
415
+ )
416
 
417
  # Add thumbnail image to first frame if provided
418
  if thumbnail_path and os.path.exists(thumbnail_path):
src/app/tools/video_script_generator.py CHANGED
@@ -206,7 +206,10 @@ def video_script_generator(
206
  # Check if it's wrapped in a tool response format
207
  if len(summary) == 1:
208
  key = list(summary.keys())[0]
209
- if "_tool_response" in key.lower() or "_response" in key.lower():
 
 
 
210
  # Extract the actual data from the wrapper
211
  summaries_list.append(summary[key])
212
  else:
@@ -382,13 +385,13 @@ Rules:
382
  video_durations = {}
383
  for i, summary in enumerate(summaries_list):
384
  video_durations[i] = summary.get("duration", 0.0)
385
-
386
  num_videos = len(summaries_list)
387
 
388
  # Validate and fix each scene
389
  for scene in script["scenes"]:
390
  source_video_idx = scene.get("source_video")
391
-
392
  # Validate and fix source_video index if it's an integer
393
  if isinstance(source_video_idx, int):
394
  # Clamp index to valid range (0 to num_videos - 1)
@@ -401,7 +404,7 @@ Rules:
401
  elif source_video_idx is None:
402
  # If source_video is missing, default to first video
403
  scene["source_video"] = 0
404
-
405
  # Now validate timestamps if we have a valid video index
406
  # Use the clamped value from scene (in case it was updated)
407
  validated_idx = scene.get("source_video")
@@ -427,8 +430,10 @@ Rules:
427
  scene["duration"] = video_duration - scene["start_time"]
428
  else:
429
  # Clamp start_time to be within bounds
430
- scene["start_time"] = max(0.0, min(start_time, video_duration - 0.1))
431
-
 
 
432
  # Calculate or validate end_time
433
  if end_time is None:
434
  if scene_duration:
@@ -437,10 +442,13 @@ Rules:
437
  calculated_end_time = video_duration
438
  else:
439
  calculated_end_time = end_time
440
-
441
  # Clamp end_time to be within bounds
442
- scene["end_time"] = max(scene["start_time"] + 0.1, min(calculated_end_time, video_duration))
443
-
 
 
 
444
  # Update duration to match
445
  scene["duration"] = scene["end_time"] - scene["start_time"]
446
 
 
206
  # Check if it's wrapped in a tool response format
207
  if len(summary) == 1:
208
  key = list(summary.keys())[0]
209
+ if (
210
+ "_tool_response" in key.lower()
211
+ or "_response" in key.lower()
212
+ ):
213
  # Extract the actual data from the wrapper
214
  summaries_list.append(summary[key])
215
  else:
 
385
  video_durations = {}
386
  for i, summary in enumerate(summaries_list):
387
  video_durations[i] = summary.get("duration", 0.0)
388
+
389
  num_videos = len(summaries_list)
390
 
391
  # Validate and fix each scene
392
  for scene in script["scenes"]:
393
  source_video_idx = scene.get("source_video")
394
+
395
  # Validate and fix source_video index if it's an integer
396
  if isinstance(source_video_idx, int):
397
  # Clamp index to valid range (0 to num_videos - 1)
 
404
  elif source_video_idx is None:
405
  # If source_video is missing, default to first video
406
  scene["source_video"] = 0
407
+
408
  # Now validate timestamps if we have a valid video index
409
  # Use the clamped value from scene (in case it was updated)
410
  validated_idx = scene.get("source_video")
 
430
  scene["duration"] = video_duration - scene["start_time"]
431
  else:
432
  # Clamp start_time to be within bounds
433
+ scene["start_time"] = max(
434
+ 0.0, min(start_time, video_duration - 0.1)
435
+ )
436
+
437
  # Calculate or validate end_time
438
  if end_time is None:
439
  if scene_duration:
 
442
  calculated_end_time = video_duration
443
  else:
444
  calculated_end_time = end_time
445
+
446
  # Clamp end_time to be within bounds
447
+ scene["end_time"] = max(
448
+ scene["start_time"] + 0.1,
449
+ min(calculated_end_time, video_duration),
450
+ )
451
+
452
  # Update duration to match
453
  scene["duration"] = scene["end_time"] - scene["start_time"]
454
 
src/app/workflow.py CHANGED
@@ -74,7 +74,7 @@ def agent_workflow(
74
  This workflow parallelizes operations where possible:
75
  - Video analysis: All videos are analyzed concurrently
76
  - Music generation and frame extraction: Run in parallel
77
-
78
  This is a generator function that yields progress updates as the workflow progresses.
79
  Each yield contains: (final_path, summary_json, script_json, thumbnail_path, status)
80
 
@@ -118,7 +118,7 @@ def agent_workflow(
118
  yield final_path, summary_json, script_json, thumbnail_path, status
119
 
120
  summaries = []
121
-
122
  def analyze_video(video_path, index):
123
  """Helper function to analyze a single video."""
124
  try:
@@ -137,20 +137,22 @@ def agent_workflow(
137
  executor.submit(analyze_video, video_path, i): (i, video_path)
138
  for i, video_path in enumerate(video_paths)
139
  }
140
-
141
  # Process results as they complete
142
  results = [None] * len(video_paths)
143
  for future in as_completed(future_to_video):
144
  index, summary_dict, error = future.result()
145
-
146
  if error:
147
- status += f" ⚠️ Warning: Video {index+1}/{len(video_paths)} - {error}\n"
 
 
148
  elif summary_dict:
149
  results[index] = summary_dict
150
  status += f" ✅ Completed video {index+1}/{len(video_paths)}\n"
151
  else:
152
  status += f" ⚠️ Warning: Video {index+1}/{len(video_paths)} - No summary generated\n"
153
-
154
  yield final_path, summary_json, script_json, thumbnail_path, status
155
 
156
  # Collect successful summaries in order
@@ -186,14 +188,20 @@ def agent_workflow(
186
  else:
187
  # Fallback: extract mood from first video summary
188
  if summaries and summaries[0].get("mood_tags"):
189
- music_mood = summaries[0]["mood_tags"][0] if summaries[0]["mood_tags"] else "energetic"
 
 
 
 
190
  else:
191
  music_mood = "energetic"
192
  except:
193
  music_mood = "energetic"
194
 
195
  # Step 3 & 4: Generate music and extract frame in parallel
196
- status += "\n🎵 Step 3 & 4: Generating music and extracting frame (in parallel)...\n"
 
 
197
  yield final_path, summary_json, script_json, thumbnail_path, status
198
 
199
  music_path = None
@@ -250,7 +258,7 @@ def agent_workflow(
250
  elif result:
251
  frame_path = result
252
  status += "✅ Frame extracted.\n"
253
-
254
  yield final_path, summary_json, script_json, thumbnail_path, status
255
 
256
  # Step 5: Generate thumbnail
 
74
  This workflow parallelizes operations where possible:
75
  - Video analysis: All videos are analyzed concurrently
76
  - Music generation and frame extraction: Run in parallel
77
+
78
  This is a generator function that yields progress updates as the workflow progresses.
79
  Each yield contains: (final_path, summary_json, script_json, thumbnail_path, status)
80
 
 
118
  yield final_path, summary_json, script_json, thumbnail_path, status
119
 
120
  summaries = []
121
+
122
  def analyze_video(video_path, index):
123
  """Helper function to analyze a single video."""
124
  try:
 
137
  executor.submit(analyze_video, video_path, i): (i, video_path)
138
  for i, video_path in enumerate(video_paths)
139
  }
140
+
141
  # Process results as they complete
142
  results = [None] * len(video_paths)
143
  for future in as_completed(future_to_video):
144
  index, summary_dict, error = future.result()
145
+
146
  if error:
147
+ status += (
148
+ f" ⚠️ Warning: Video {index+1}/{len(video_paths)} - {error}\n"
149
+ )
150
  elif summary_dict:
151
  results[index] = summary_dict
152
  status += f" ✅ Completed video {index+1}/{len(video_paths)}\n"
153
  else:
154
  status += f" ⚠️ Warning: Video {index+1}/{len(video_paths)} - No summary generated\n"
155
+
156
  yield final_path, summary_json, script_json, thumbnail_path, status
157
 
158
  # Collect successful summaries in order
 
188
  else:
189
  # Fallback: extract mood from first video summary
190
  if summaries and summaries[0].get("mood_tags"):
191
+ music_mood = (
192
+ summaries[0]["mood_tags"][0]
193
+ if summaries[0]["mood_tags"]
194
+ else "energetic"
195
+ )
196
  else:
197
  music_mood = "energetic"
198
  except:
199
  music_mood = "energetic"
200
 
201
  # Step 3 & 4: Generate music and extract frame in parallel
202
+ status += (
203
+ "\n🎵 Step 3 & 4: Generating music and extracting frame (in parallel)...\n"
204
+ )
205
  yield final_path, summary_json, script_json, thumbnail_path, status
206
 
207
  music_path = None
 
258
  elif result:
259
  frame_path = result
260
  status += "✅ Frame extracted.\n"
261
+
262
  yield final_path, summary_json, script_json, thumbnail_path, status
263
 
264
  # Step 5: Generate thumbnail