rairo commited on
Commit
33de861
Β·
verified Β·
1 Parent(s): 2d61850

Create styled_video_gen.py

Browse files
Files changed (1) hide show
  1. styled_video_gen.py +626 -0
styled_video_gen.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import tempfile
4
+ import io
5
+ import numpy as np
6
+ import cv2 # Still used for reading logo/watermark if needed
7
+ import logging
8
+ import uuid
9
+ import subprocess
10
+ from PIL import Image, ImageFont, ImageDraw
11
+
12
+ # --- MoviePy Imports ---
13
+ from moviepy.editor import *
14
+ # import moviepy.video.fx.all as vfx # Keep if you add more complex FX
15
+ from moviepy.video.tools.transitions import crossfadein, slide_in # Import specific transitions
16
+
17
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # --- Constants ---
21
+ DEFAULT_WIDTH = 1280
22
+ DEFAULT_HEIGHT = 720
23
+ DEFAULT_FPS = 24
24
+ DEFAULT_TRANSITION_DURATION = 0.75 # seconds
25
+ DEFAULT_FONT = "assets/fonts/lazy_dog.ttf" # *** ADJUST THIS PATH *** Needs to be accessible on your server
26
+ DEFAULT_SUBTITLE_FONT_SIZE = 36
27
+ DEFAULT_SUBTITLE_COLOR = 'white'
28
+ DEFAULT_SUBTITLE_BG_COLOR = 'rgba(0,0,0,0.5)' # Semi-transparent black background
29
+ DEFAULT_SUBTITLE_POSITION = ('center', 'bottom')
30
+ DEFAULT_LOGO_PATH = "assets/images/sozo_logo2.png" # *** ADJUST THIS PATH ***
31
+
32
+ # --- Helper Functions ---
33
+
34
+ def get_styled_audio_duration(audio_path):
35
+ """ Get duration of an audio file using MoviePy (preferred) or FFprobe fallback. """
36
+ if not audio_path or not os.path.exists(audio_path):
37
+ logger.warning(f"Audio path invalid or file not found: {audio_path}. Defaulting duration to 5.0s.")
38
+ return 5.0 # Default duration
39
+ try:
40
+ # Use MoviePy - more integrated & handles more formats potentially
41
+ with AudioFileClip(audio_path) as clip:
42
+ duration = clip.duration
43
+ # Sometimes duration might be None or 0 for very short/corrupt files
44
+ return duration if duration and duration > 0 else 5.0
45
+ except Exception as e:
46
+ logger.warning(f"MoviePy failed to get duration for {audio_path}: {e}. Trying ffprobe.")
47
+ try:
48
+ result = subprocess.run(
49
+ ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', audio_path],
50
+ stdout=subprocess.PIPE,
51
+ stderr=subprocess.PIPE,
52
+ check=True,
53
+ text=True # Get stdout as string
54
+ )
55
+ return float(result.stdout.strip())
56
+ except Exception as e_ffprobe:
57
+ logger.error(f"FFprobe also failed for {audio_path}: {e_ffprobe}. Returning default 5.0s.")
58
+ return 5.0
59
+
60
+ def resize_image_aspect_fill(img_pil, target_width, target_height):
61
+ """ Resizes PIL image to target dimensions, cropping if necessary to fill."""
62
+ try:
63
+ target_ratio = target_width / target_height
64
+ # Ensure image is in RGB for consistent processing
65
+ img_pil = img_pil.convert("RGB")
66
+ img_ratio = img_pil.width / img_pil.height
67
+
68
+ if abs(target_ratio - img_ratio) < 0.01: # If aspect ratios are very close, just resize
69
+ img_resized = img_pil.resize((target_width, target_height), Image.Resampling.LANCZOS)
70
+ return img_resized
71
+
72
+ if target_ratio > img_ratio: # Target is wider than image -> crop top/bottom
73
+ new_width = target_width
74
+ new_height = int(new_width / img_ratio)
75
+ img_resized = img_pil.resize((new_width, new_height), Image.Resampling.LANCZOS)
76
+ top = (new_height - target_height) // 2
77
+ bottom = top + target_height
78
+ img_cropped = img_resized.crop((0, top, target_width, bottom))
79
+ else: # Target is taller than image (or same ratio) -> crop sides
80
+ new_height = target_height
81
+ new_width = int(new_height * img_ratio)
82
+ img_resized = img_pil.resize((new_width, new_height), Image.Resampling.LANCZOS)
83
+ left = (new_width - target_width) // 2
84
+ right = left + target_width
85
+ img_cropped = img_resized.crop((left, 0, right, target_height))
86
+
87
+ return img_cropped
88
+ except Exception as e:
89
+ logger.error(f"Error resizing/cropping image: {e}. Returning original image resized without aspect correction.")
90
+ # Fallback: simple resize, may introduce distortion or black bars later
91
+ return img_pil.resize((target_width, target_height), Image.Resampling.LANCZOS)
92
+
93
+
94
+ def create_subtitle_clip(text, duration, width, height, options):
95
+ """Creates a MoviePy TextClip for subtitles."""
96
+ if not text or not isinstance(text, str) or not text.strip():
97
+ # logger.debug("Subtitle text is empty or invalid, skipping clip creation.")
98
+ return None
99
+
100
+ # Ensure defaults are handled robustly
101
+ subtitle_opts = options or {}
102
+ font_path = subtitle_opts.get("font", DEFAULT_FONT)
103
+ fontsize = int(subtitle_opts.get("fontsize", DEFAULT_SUBTITLE_FONT_SIZE))
104
+ color = subtitle_opts.get("color", DEFAULT_SUBTITLE_COLOR)
105
+ position = subtitle_opts.get("position", DEFAULT_SUBTITLE_POSITION)
106
+ bg_color = subtitle_opts.get("bg_color", DEFAULT_SUBTITLE_BG_COLOR) # Background for readability
107
+ stroke_color = subtitle_opts.get("stroke_color") # Optional outline
108
+ stroke_width = float(subtitle_opts.get("stroke_width", 0)) # Default 0 if not specified
109
+ method = subtitle_opts.get("method", "caption") # 'caption' automatically wraps text
110
+ align = subtitle_opts.get("align", "center")
111
+ margin = int(subtitle_opts.get("margin", 10)) # Margin from edge for position like ('center', 'bottom')
112
+
113
+ # Adjust position based on margin for common cases
114
+ if isinstance(position, (list, tuple)) and len(position) == 2:
115
+ x_pos, y_pos = position
116
+ if y_pos == 'bottom':
117
+ final_pos = (x_pos, height - margin) # Anchor point is bottom of text
118
+ elif y_pos == 'top':
119
+ final_pos = (x_pos, margin) # Anchor point is top of text
120
+ else:
121
+ final_pos = position
122
+ else:
123
+ final_pos = position # Use as is if not tuple or custom values
124
+
125
+ try:
126
+ # Check font existence early
127
+ if not os.path.exists(font_path):
128
+ logger.error(f"❌ Subtitle font not found at '{font_path}'. Using MoviePy default.")
129
+ font_path = None # Let MoviePy choose a default
130
+
131
+ # Use method='caption' for automatic wrapping based on size
132
+ # Limit width to 90% of video width for wrapping
133
+ text_width_limit = width * 0.9
134
+
135
+ subtitle = TextClip(
136
+ txt=text.strip(), # Ensure no leading/trailing whitespace
137
+ fontsize=fontsize,
138
+ color=color,
139
+ font=font_path,
140
+ size=(text_width_limit, None), # Width limit, height auto
141
+ method=method,
142
+ align=align,
143
+ bg_color=bg_color,
144
+ stroke_color=stroke_color if stroke_width > 0 else None, # Only apply if stroke_width > 0
145
+ stroke_width=stroke_width,
146
+ print_cmd=False # Suppress verbose ffmpeg command print
147
+ ).set_position(final_pos, relative=False).set_duration(duration) # relative=False if using pixel coords
148
+
149
+ # Optional fade in/out for subtitles
150
+ fade_duration = float(subtitle_opts.get("fade_duration", 0.3))
151
+ # Ensure fade doesn't exceed half the clip duration
152
+ fade_duration = min(fade_duration, duration / 2.1) # Ensure non-overlapping fades
153
+ if fade_duration > 0:
154
+ subtitle = subtitle.crossfadein(fade_duration).crossfadeout(fade_duration)
155
+
156
+ # logger.debug(f"βœ… Created subtitle clip for text: '{text[:30]}...'")
157
+ return subtitle
158
+
159
+ except Exception as e:
160
+ logger.error(f"❌ ERROR creating subtitle clip for text '{text[:30]}...': {e}")
161
+ # Optionally try PIL fallback here if needed, but TextClip is generally more robust
162
+ return None
163
+
164
+ def create_particle_overlay_clip(particle_type, duration, width, height, options):
165
+ """
166
+ Creates a particle overlay clip.
167
+ Placeholder: Loads a pre-existing video file based on type.
168
+ *** Requires actual particle video files on server ***
169
+ """
170
+ if not particle_type or particle_type == 'none':
171
+ return None
172
+
173
+ particle_opts = options or {}
174
+ # *** Define paths to your actual particle overlay videos here ***
175
+ base_particle_path = "assets/particles" # Example base directory
176
+ particle_files = {
177
+ "snow": os.path.join(base_particle_path, "snow_overlay.mp4"),
178
+ "sparkles": os.path.join(base_particle_path, "sparkles_overlay.mp4"), # Often .mov for alpha
179
+ "rain": os.path.join(base_particle_path, "rain_overlay.mp4"),
180
+ "confetti": os.path.join(base_particle_path, "confetti_overlay.mp4"),
181
+ # Add more mappings for types you support
182
+ }
183
+
184
+ particle_path = particle_files.get(str(particle_type).lower())
185
+
186
+ if not particle_path:
187
+ logger.warning(f"⚠️ Particle type '{particle_type}' not recognized or mapped.")
188
+ return None
189
+ if not os.path.exists(particle_path):
190
+ logger.warning(f"⚠️ Particle overlay video not found for type '{particle_type}' at path: {particle_path}")
191
+ return None
192
+
193
+ try:
194
+ logger.info(f"Creating particle overlay: {particle_type} from {particle_path}")
195
+ # Load the overlay video. Check for alpha channel (.mov often has it)
196
+ has_mask = particle_path.lower().endswith('.mov')
197
+ overlay = VideoFileClip(particle_path, has_mask=has_mask, target_resolution=(height, width))
198
+
199
+ # Loop or trim the overlay to match the required duration
200
+ if overlay.duration < duration:
201
+ # Loop requires careful handling if audio present in overlay, disable audio
202
+ overlay = overlay.loop(duration=duration).without_audio()
203
+ else:
204
+ overlay = overlay.subclip(0, duration).without_audio()
205
+
206
+ # Resize MUST happen after loop/subclip if original is smaller
207
+ overlay = overlay.resize(newsize=(width, height))
208
+
209
+ # Set opacity
210
+ opacity = float(particle_opts.get("opacity", 0.6)) # Default 60% opacity
211
+ if 0.0 <= opacity < 1.0:
212
+ overlay = overlay.set_opacity(opacity)
213
+ elif opacity < 0:
214
+ overlay = overlay.set_opacity(0) # Clamp at 0
215
+
216
+ # Set position (usually centered)
217
+ position = particle_opts.get("position", "center")
218
+ return overlay.set_position(position)
219
+
220
+ except Exception as e:
221
+ # Log the full traceback for debugging particle issues
222
+ import traceback
223
+ logger.error(f"❌ ERROR creating particle overlay for {particle_type}: {e}\n{traceback.format_exc()}")
224
+ return None
225
+
226
+
227
+ # --- Main Video Creation Function using MoviePy ---
228
+
229
+ def create_styled_video(images, audio_files, section_texts, output_path, config=None):
230
+ """
231
+ Creates a video from images, audio, and text using MoviePy.
232
+
233
+ Args:
234
+ images (list): List of PIL Image objects (MUST NOT contain None here).
235
+ audio_files (list): List of paths to audio files (can have None).
236
+ section_texts (list): List of text strings for subtitles (can have None).
237
+ output_path (str): Path to save the final video.
238
+ config (dict): Dictionary containing configuration options.
239
+
240
+ Returns:
241
+ str: Path to the generated video file, or None on failure.
242
+ """
243
+ # Ensure inputs are lists
244
+ images = images or []
245
+ audio_files = audio_files or []
246
+ section_texts = section_texts or []
247
+
248
+ if not images:
249
+ logger.error("❌ No images provided for video creation.")
250
+ return None
251
+
252
+ num_sections = len(images)
253
+ # Pad other lists to match image count if necessary (should be handled by caller ideally)
254
+ if len(audio_files) < num_sections:
255
+ audio_files.extend([None] * (num_sections - len(audio_files)))
256
+ if len(section_texts) < num_sections:
257
+ section_texts.extend([None] * (num_sections - len(section_texts)))
258
+
259
+ if config is None:
260
+ config = {}
261
+ logger.info(f"Starting video creation with config: {config}")
262
+
263
+ # --- Get Configuration ---
264
+ width = int(config.get("width", DEFAULT_WIDTH))
265
+ height = int(config.get("height", DEFAULT_HEIGHT))
266
+ fps = int(config.get("fps", DEFAULT_FPS))
267
+ transition_type = config.get("transition", "fade").lower()
268
+ transition_duration = float(config.get("transition_duration", DEFAULT_TRANSITION_DURATION))
269
+ # Ensure transition duration isn't negative
270
+ transition_duration = max(0, transition_duration)
271
+
272
+ font_path = config.get("font_path", DEFAULT_FONT) # Primary font path
273
+
274
+ subtitle_opts = config.get("subtitle_options", {})
275
+ subtitles_enabled = subtitle_opts.get("enabled", True)
276
+ if "font" not in subtitle_opts: subtitle_opts["font"] = font_path # Inherit default font
277
+
278
+ particle_opts = config.get("particle_options", {})
279
+ particles_enabled = particle_opts.get("enabled", False)
280
+ # Ensure particle types list exists and matches section count
281
+ particle_types = particle_opts.get("types_per_section", [])
282
+ if len(particle_types) != num_sections:
283
+ logger.warning(f"Particle types list length mismatch ({len(particle_types)} vs {num_sections} sections). Disabling particles for safety.")
284
+ particle_types = [None] * num_sections # Reset to None
285
+ particle_opts['types_per_section'] = particle_types # Store potentially corrected list
286
+
287
+
288
+ watermark_opts = config.get("watermark_options", {})
289
+ watermark_enabled = watermark_opts.get("enabled", False) and watermark_opts.get("path")
290
+ watermark_path = watermark_opts.get("path") if watermark_enabled else None
291
+
292
+ end_logo_opts = config.get("end_logo_options", {})
293
+ end_logo_enabled = end_logo_opts.get("enabled", True)
294
+ # Use default Sozo logo path if enabled but no path given
295
+ end_logo_path = end_logo_opts.get("path", DEFAULT_LOGO_PATH) if end_logo_enabled else None
296
+ end_logo_duration = float(end_logo_opts.get("duration", 3.0))
297
+
298
+
299
+ # --- Resource Management ---
300
+ clips_to_close = [] # Keep track of clips needing .close()
301
+
302
+ # --- Prepare Clips ---
303
+ section_video_clips = [] # Holds the final composited clip for each section
304
+ section_audio_clips = [] # Holds the audio clip for each section
305
+ final_durations = [] # Holds the calculated duration for each section
306
+
307
+ logger.info(f"Processing {num_sections} sections for video...")
308
+ total_duration_est = 0
309
+
310
+ for i in range(num_sections):
311
+ img_pil = images[i]
312
+ audio_path = audio_files[i]
313
+ text = section_texts[i]
314
+ particle_type = particle_types[i] if particles_enabled else None
315
+
316
+ logger.info(f"--- Section {i+1}/{num_sections} ---")
317
+
318
+ # Determine duration (based on audio or default)
319
+ duration = get_styled_audio_duration(audio_path)
320
+ final_durations.append(duration)
321
+ logger.info(f" Duration: {duration:.2f}s (Audio: {os.path.basename(str(audio_path)) if audio_path else 'No'})")
322
+ logger.info(f" Text: '{str(text)[:40]}...'")
323
+ logger.info(f" Particle: {particle_type if particle_type else 'None'}")
324
+
325
+ # --- Create Image Clip ---
326
+ img_clip = None
327
+ try:
328
+ img_resized_pil = resize_image_aspect_fill(img_pil, width, height)
329
+ img_np = np.array(img_resized_pil)
330
+ img_clip = ImageClip(img_np).set_duration(duration).set_fps(fps)
331
+ clips_to_close.append(img_clip) # Add base image clip for potential closing
332
+ except Exception as e:
333
+ logger.error(f"❌ Failed to process image {i+1}: {e}. Creating black frame.")
334
+ # Use a black frame as placeholder to avoid crashing
335
+ img_clip = ColorClip(size=(width, height), color=(0,0,0), duration=duration).set_fps(fps)
336
+ clips_to_close.append(img_clip)
337
+
338
+ # --- Create Audio Clip ---
339
+ section_audio_clip = None
340
+ if audio_path and os.path.exists(audio_path):
341
+ try:
342
+ # Load audio and ensure duration matches video segment exactly
343
+ # Use try-except for AudioFileClip as it can fail on corrupted files
344
+ temp_audio = AudioFileClip(audio_path)
345
+ clips_to_close.append(temp_audio) # Add for closing
346
+ # Trim or pad audio if necessary (MoviePy often handles slight discrepancies)
347
+ section_audio_clip = temp_audio.subclip(0, min(temp_audio.duration, duration))
348
+ # If audio is shorter than video, MoviePy usually pads with silence automatically when concatenating.
349
+ # If you need explicit looping/padding logic, add it here.
350
+ if section_audio_clip.duration < duration - 0.01: # Allow small tolerance
351
+ logger.warning(f"Audio duration ({section_audio_clip.duration:.2f}s) shorter than video ({duration:.2f}s). Silence will be added.")
352
+
353
+ section_audio_clips.append(section_audio_clip)
354
+
355
+ except Exception as e:
356
+ logger.error(f"❌ Failed to load audio '{os.path.basename(audio_path)}': {e}. Adding silence.")
357
+ # Add silence to keep timing consistent
358
+ silence = AudioClip(lambda t: 0, duration=duration, fps=44100) # Standard audio fps
359
+ section_audio_clips.append(silence)
360
+ clips_to_close.append(silence)
361
+ else:
362
+ # Add silence if no audio for this section
363
+ logger.info(" No audio path or file not found, adding silence.")
364
+ silence = AudioClip(lambda t: 0, duration=duration, fps=44100)
365
+ section_audio_clips.append(silence)
366
+ clips_to_close.append(silence)
367
+
368
+
369
+ # --- Create Subtitle Clip ---
370
+ subtitle_clip = None
371
+ if subtitles_enabled and text:
372
+ logger.info(" Creating subtitle clip...")
373
+ subtitle_clip = create_subtitle_clip(text, duration, width, height, subtitle_opts)
374
+ if subtitle_clip:
375
+ clips_to_close.append(subtitle_clip)
376
+
377
+
378
+ # --- Create Particle Overlay Clip ---
379
+ particle_clip = None
380
+ if particles_enabled and particle_type:
381
+ logger.info(f" Creating particle overlay: {particle_type}...")
382
+ particle_clip = create_particle_overlay_clip(particle_type, duration, width, height, particle_opts)
383
+ if particle_clip:
384
+ clips_to_close.append(particle_clip)
385
+
386
+
387
+ # --- Composite Section ---
388
+ # Layer order: Image -> Particles -> Subtitles
389
+ composited_layers = [img_clip] # Base image
390
+ if particle_clip:
391
+ composited_layers.append(particle_clip)
392
+ if subtitle_clip:
393
+ composited_layers.append(subtitle_clip)
394
+
395
+ # Only composite if more than one layer exists
396
+ if len(composited_layers) > 1:
397
+ final_section_clip = CompositeVideoClip(composited_layers, size=(width, height)).set_duration(duration).set_fps(fps)
398
+ clips_to_close.append(final_section_clip) # Add composite for closing
399
+ else:
400
+ final_section_clip = img_clip # Just the image clip if no overlays
401
+
402
+ section_video_clips.append(final_section_clip)
403
+ total_duration_est += duration
404
+
405
+ if not section_video_clips:
406
+ logger.error("❌ No valid video clips were created for any section.")
407
+ # Cleanup clips created so far
408
+ for clip in clips_to_close:
409
+ try: clip.close()
410
+ except: pass
411
+ return None
412
+
413
+ logger.info(f"Total estimated video duration (before transitions/end logo): {total_duration_est:.2f}s")
414
+
415
+ # --- Concatenate Video Clips with Transitions ---
416
+ final_video = None
417
+ if len(section_video_clips) > 1 and transition_type != 'none' and transition_duration > 0:
418
+ # Ensure transition isn't longer than the shortest clip involved
419
+ min_clip_dur = min(c.duration for c in section_video_clips)
420
+ safe_transition_duration = min(transition_duration, min_clip_dur / 2.01) # Ensure overlap is possible
421
+ if safe_transition_duration < transition_duration:
422
+ logger.warning(f"Requested transition duration ({transition_duration}s) too long for shortest clip ({min_clip_dur:.2f}s). Clamping to {safe_transition_duration:.2f}s.")
423
+ transition_duration = safe_transition_duration
424
+
425
+ logger.info(f"Applying '{transition_type}' transitions with duration {transition_duration:.2f}s...")
426
+
427
+ if transition_type == 'fade':
428
+ # Crossfade is best handled by concatenate_videoclips's transition argument
429
+ final_video = concatenate_videoclips(
430
+ section_video_clips,
431
+ method="compose",
432
+ transition=crossfadein(transition_duration)
433
+ )
434
+ elif transition_type.startswith('slide_'):
435
+ # Manual slide transitions require more complex composition
436
+ direction = transition_type.split('_')[1] # 'left', 'right', 'up', 'down'
437
+ processed_clips = []
438
+ current_time = 0
439
+ for i, clip in enumerate(section_video_clips):
440
+ clip = clip.set_start(current_time)
441
+ if i > 0: # Apply slide-in from second clip onwards
442
+ clip = slide_in(clip, duration=transition_duration, side=direction)
443
+ processed_clips.append(clip)
444
+ # Move next clip's start time back by transition duration for overlap
445
+ current_time += clip.duration - (transition_duration if i < len(section_video_clips) - 1 else 0)
446
+
447
+ final_video = CompositeVideoClip(processed_clips, size=(width, height))
448
+
449
+ else:
450
+ logger.warning(f"Unsupported transition type '{transition_type}', falling back to 'fade'.")
451
+ final_video = concatenate_videoclips(
452
+ section_video_clips,
453
+ method="compose",
454
+ transition=crossfadein(transition_duration)
455
+ )
456
+ else:
457
+ logger.info("Concatenating clips without transitions...")
458
+ final_video = concatenate_videoclips(section_video_clips, method="compose")
459
+
460
+ if not final_video:
461
+ logger.error("❌ Failed to concatenate video clips.")
462
+ # Cleanup clips
463
+ for clip in clips_to_close:
464
+ try: clip.close()
465
+ except: pass
466
+ return None
467
+
468
+ clips_to_close.append(final_video) # Add the main concatenated video for closing
469
+
470
+ # --- Concatenate Audio ---
471
+ final_audio = None
472
+ if section_audio_clips:
473
+ logger.info("Concatenating audio clips...")
474
+ try:
475
+ final_audio = concatenate_audioclips(section_audio_clips)
476
+ # Set the final video's audio
477
+ final_video = final_video.set_audio(final_audio)
478
+ logger.info(f"Combined audio duration: {final_audio.duration:.2f}s. Video duration: {final_video.duration:.2f}s")
479
+ # Moviepy usually handles slight mismatches, but log if significant
480
+ if abs(final_audio.duration - final_video.duration) > 0.1:
481
+ logger.warning("Significant mismatch between final video and audio durations detected.")
482
+ clips_to_close.append(final_audio)
483
+ except Exception as e:
484
+ logger.error(f"❌ Failed to concatenate or set audio: {e}. Video will be silent.")
485
+ final_video = final_video.set_audio(None) # Ensure no audio track if failed
486
+ else:
487
+ logger.warning("No audio clips found or generated. Video will be silent.")
488
+ final_video = final_video.set_audio(None)
489
+
490
+
491
+ # --- Add Watermark ---
492
+ watermark_clip_instance = None # Keep track for closing
493
+ if watermark_enabled and watermark_path and os.path.exists(watermark_path):
494
+ try:
495
+ logger.info(f"Adding watermark from: {watermark_path}")
496
+ # Use ismask=True if your watermark PNG has transparency
497
+ is_mask = watermark_path.lower().endswith(".png")
498
+ wm_img = ImageClip(watermark_path, ismask=is_mask, transparent=True)
499
+ clips_to_close.append(wm_img)
500
+
501
+ # Size
502
+ wm_size_param = watermark_opts.get("size", 0.15) # Default: 15% of video width
503
+ target_wm_width = None
504
+ target_wm_height = None
505
+ if isinstance(wm_size_param, float): # Relative size based on video width
506
+ target_wm_width = int(width * wm_size_param)
507
+ elif isinstance(wm_size_param, (tuple, list)) and len(wm_size_param) == 2: # Absolute size (w, h)
508
+ target_wm_width = int(wm_size_param[0])
509
+ target_wm_height = int(wm_size_param[1])
510
+
511
+ # Resize watermark maintaining aspect ratio
512
+ if target_wm_width and target_wm_height:
513
+ wm_img = wm_img.resize(newsize=(target_wm_width, target_wm_height))
514
+ elif target_wm_width:
515
+ wm_img = wm_img.resize(width=target_wm_width)
516
+ elif target_wm_height:
517
+ wm_img = wm_img.resize(height=target_wm_height)
518
+ # Else use original size if no specific size given
519
+
520
+
521
+ # Position with margin
522
+ wm_pos = watermark_opts.get("position", ("right", "bottom")) # Default bottom right
523
+ margin = int(watermark_opts.get("margin", 15)) # Pixels margin
524
+
525
+ # Convert position keywords to coordinates respecting margin
526
+ def get_coord(dim, size, pos_keyword, margin):
527
+ if pos_keyword == 'left': return margin
528
+ if pos_keyword == 'center': return (dim / 2) - (size / 2)
529
+ if pos_keyword == 'right': return dim - size - margin
530
+ # Allow numerical values (absolute or fractional)
531
+ if isinstance(pos_keyword, (int, float)): return pos_keyword
532
+ return margin # Default fallback
533
+
534
+ final_wm_pos = (
535
+ get_coord(width, wm_img.w, wm_pos[0], margin),
536
+ get_coord(height, wm_img.h, wm_pos[1], margin)
537
+ )
538
+ wm_img = wm_img.set_position(final_wm_pos)
539
+
540
+ # Opacity
541
+ wm_opacity = float(watermark_opts.get("opacity", 0.7))
542
+ if 0 <= wm_opacity < 1.0:
543
+ wm_img = wm_img.set_opacity(wm_opacity)
544
+
545
+ # Set duration to match video and composite
546
+ watermark_clip_instance = wm_img.set_duration(final_video.duration).set_start(0)
547
+
548
+ # Composite watermark on top
549
+ final_video = CompositeVideoClip([final_video, watermark_clip_instance], size=(width, height), use_bgclip=True)
550
+ clips_to_close.append(final_video) # The new composite needs closing too
551
+ logger.info("βœ… Watermark added.")
552
+
553
+ except Exception as e:
554
+ logger.error(f"❌ Failed to add watermark: {e}")
555
+ # Don't add watermark_clip_instance to clips_to_close if it failed
556
+
557
+
558
+ # --- Add End Logo Screen ---
559
+ end_logo_clip_instance = None # Keep track for closing
560
+ if end_logo_enabled and end_logo_path and os.path.exists(end_logo_path) and end_logo_duration > 0:
561
+ try:
562
+ logger.info(f"Adding end logo screen: {end_logo_path}")
563
+ end_logo_clip_instance = ImageClip(end_logo_path).set_duration(end_logo_duration).resize(newsize=(width, height))
564
+ clips_to_close.append(end_logo_clip_instance)
565
+
566
+ # Simple fade transition to end logo - Use concatenate again
567
+ final_video = concatenate_videoclips([final_video, end_logo_clip_instance], method="compose", transition=crossfadein(0.5))
568
+ clips_to_close.append(final_video) # The *new* final video needs closing
569
+ logger.info("βœ… End logo screen added.")
570
+ except Exception as e:
571
+ logger.error(f"❌ Failed to add end logo screen: {e}")
572
+
573
+
574
+ # --- Write Final Video ---
575
+ final_output_path = None
576
+ temp_audio_file_path = None # Keep track of temp audio file
577
+ try:
578
+ logger.info(f"Writing final video to: {output_path}...")
579
+ # Generate a unique temp audio filename
580
+ temp_audio_file_path = os.path.join(tempfile.gettempdir(), f"temp-audio-{uuid.uuid4().hex}.m4a")
581
+
582
+ final_video.write_videofile(
583
+ output_path,
584
+ codec='libx264', # Good compatibility
585
+ audio_codec='aac', # Standard audio codec
586
+ temp_audiofile=temp_audio_file_path, # Use explicit temp file path
587
+ remove_temp=True, # Let MoviePy handle removal on success
588
+ fps=fps,
589
+ preset='medium', # Balance quality and speed ('ultrafast'...'veryslow')
590
+ threads=4, # Use multiple threads if available
591
+ ffmpeg_params=[ # Ensure web compatibility
592
+ '-pix_fmt', 'yuv420p',
593
+ '-movflags', '+faststart' # Important for web streaming
594
+ ],
595
+ logger='bar' # Progress bar
596
+ )
597
+ logger.info(f"βœ… Final video saved successfully: {output_path}")
598
+ final_output_path = output_path # Set success path
599
+ except Exception as e:
600
+ # Log full traceback for write errors
601
+ import traceback
602
+ logger.error(f"❌ ERROR writing final video file: {e}\n{traceback.format_exc()}")
603
+ # Attempt to remove the potentially partially written output file
604
+ if os.path.exists(output_path):
605
+ try: os.remove(output_path)
606
+ except OSError: logger.error(f"Could not remove partially written file: {output_path}")
607
+ # Explicitly try removing temp audio if write failed and remove_temp=True might not have run
608
+ if temp_audio_file_path and os.path.exists(temp_audio_file_path):
609
+ try:
610
+ os.remove(temp_audio_file_path)
611
+ logger.info(f"Cleaned up temp audio file: {temp_audio_file_path}")
612
+ except OSError:
613
+ logger.error(f"Could not remove temp audio file: {temp_audio_file_path}")
614
+
615
+ finally:
616
+ # --- Close all opened clips ---
617
+ logger.debug(f"Closing {len(clips_to_close)} MoviePy clips...")
618
+ for clip in reversed(clips_to_close): # Close in reverse order (composites first)
619
+ try:
620
+ clip.close()
621
+ except Exception as e:
622
+ # Log closing errors but continue
623
+ logger.warning(f"Error closing a clip: {e}")
624
+ logger.debug("Finished closing clips.")
625
+
626
+ return final_output_path # Return path only on success