jebin2 commited on
Commit
ce4e0f0
Β·
1 Parent(s): 361c3a2

refactor: Remove explicit vertical video resizing and add `yuv420p` pixel format, and enhance social media publisher error reporting with API messages.

Browse files
social_media_publishers/instagram/publisher.py CHANGED
@@ -223,7 +223,7 @@ class InstagramPublisher(SocialPublisher):
223
  logger.info("⏳ Waiting for processing...")
224
 
225
  for attempt in range(1, max_attempts + 1):
226
- params = {'access_token': self.access_token, 'fields': 'status_code,status'}
227
  response = self._get_session().get(endpoint, params=params)
228
  result = response.json()
229
 
@@ -233,8 +233,8 @@ class InstagramPublisher(SocialPublisher):
233
  if status == 'FINISHED':
234
  return status
235
  elif status == 'ERROR':
236
- error_detail = result.get('status', '')
237
- logger.error(f"❌ Instagram processing error: {error_detail}")
238
  return status
239
 
240
  time.sleep(10)
 
223
  logger.info("⏳ Waiting for processing...")
224
 
225
  for attempt in range(1, max_attempts + 1):
226
+ params = {'access_token': self.access_token, 'fields': 'status_code,status,error_message'}
227
  response = self._get_session().get(endpoint, params=params)
228
  result = response.json()
229
 
 
233
  if status == 'FINISHED':
234
  return status
235
  elif status == 'ERROR':
236
+ error_detail = result.get('error_message') or result.get('status', '')
237
+ logger.error(f"❌ Instagram processing error: {error_detail} | Full response: {result}")
238
  return status
239
 
240
  time.sleep(10)
social_media_publishers/threads/publisher.py CHANGED
@@ -241,16 +241,18 @@ class ThreadsPublisher(SocialPublisher):
241
  print("⏳ Waiting for processing...")
242
 
243
  for attempt in range(1, max_attempts + 1):
244
- params = {'access_token': self.access_token, 'fields': 'status'}
245
  response = self._get_session().get(endpoint, params=params)
246
  result = response.json()
247
-
248
  status = result.get('status')
249
  print(f" Attempt {attempt}/{max_attempts}: {status}")
250
-
251
  if status == 'FINISHED':
252
  return status
253
  elif status == 'ERROR':
 
 
254
  return status
255
 
256
  time.sleep(5) # Threads might be faster/slower
 
241
  print("⏳ Waiting for processing...")
242
 
243
  for attempt in range(1, max_attempts + 1):
244
+ params = {'access_token': self.access_token, 'fields': 'status,error_message'}
245
  response = self._get_session().get(endpoint, params=params)
246
  result = response.json()
247
+
248
  status = result.get('status')
249
  print(f" Attempt {attempt}/{max_attempts}: {status}")
250
+
251
  if status == 'FINISHED':
252
  return status
253
  elif status == 'ERROR':
254
+ error_detail = result.get('error_message', '')
255
+ print(f"❌ Threads processing error: {error_detail} | Full response: {result}")
256
  return status
257
 
258
  time.sleep(5) # Threads might be faster/slower
src/video_renderer.py CHANGED
@@ -97,7 +97,6 @@ class VideoRenderer:
97
  HOOK_VIDEO_DURATION = 2
98
  hook_clip = VideoFileClip(assets["hook_video"]["veo_video_data"]["local_path"])
99
  runway_as_second_ai_video = VideoFileClip(assets["hook_video"]["local_path"]).subclip(0, 2)
100
- runway_as_second_ai_video = self._resize_for_vertical(runway_as_second_ai_video)
101
  else:
102
  hook_clip = VideoFileClip(assets["hook_video"]["local_path"])
103
 
@@ -141,7 +140,7 @@ class VideoRenderer:
141
  if lib_video.get("local_path"):
142
  try:
143
  lib_clip = VideoFileClip(lib_video["local_path"])
144
- original_clip = self._resize_for_vertical(lib_clip, target_size)
145
  lib_hook_start = None
146
  lib_hook_end = None
147
  prev_clip = None
@@ -149,13 +148,13 @@ class VideoRenderer:
149
 
150
  # Only apply hook logic if hook is available
151
  if hook_start and i == 0:
152
- lib_hook_start = self._resize_for_vertical(hook_start, target_size)
153
  original_clip = runway_as_second_ai_video if runway_as_second_ai_video else original_clip
154
  if i == 1 and get_config_value("use_veo", False) and runway_as_second_ai_video:
155
  if HOOK_START_ORIGINAL_CLIP_USED < runway_as_second_ai_video.duration-0.5:
156
  original_clip = runway_as_second_ai_video.subclip(HOOK_START_ORIGINAL_CLIP_USED, runway_as_second_ai_video.duration)
157
  if hook_end and i+1 == len(assets.get("selected_videos", [])):
158
- lib_hook_end = self._resize_for_vertical(hook_end, target_size)
159
  if len(clips) > 0:
160
  prev_clip = clips[-1][1]
161
  prev_clip_file = selected_videos[-2]["local_path"] if len(selected_videos) > 1 else None
@@ -363,7 +362,6 @@ class VideoRenderer:
363
  video_usage[lib_video['alternate_url']] = video_usage.get(lib_video['alternate_url'], 0) + 1
364
  set_config_value("video_usage_count", video_usage)
365
  alternate_clip = VideoFileClip(alternate_url_local_path)
366
- alternate_clip = self._resize_for_vertical(alternate_clip)
367
  reverse_alternate_clip = self.reverse_clip(alternate_url_local_path)
368
 
369
  combined = concatenate_videoclips([original_clip, alternate_clip, reverse_alternate_clip, original_clip], method="compose")
@@ -384,8 +382,7 @@ class VideoRenderer:
384
  interpolated_file = utils.interpolate_video(original_clip_path)
385
  if interpolated_file:
386
  interpolated = VideoFileClip(interpolated_file)
387
- interpolated = self._resize_for_vertical(interpolated)
388
-
389
  if interpolated.duration >= target_duration:
390
  logger.debug("Using interpolated clip for extension")
391
  result = interpolated.subclip(0, target_duration)
@@ -421,7 +418,7 @@ class VideoRenderer:
421
 
422
  def reverse_clip(self, clip_path):
423
  reversed_clip = VideoFileClip(utils_reverse_clip(clip_path))
424
- return self._resize_for_vertical(reversed_clip)
425
 
426
  def loop_clip(self, clip, target_duration):
427
  loop_count = int(target_duration // clip.duration) + 1 # how many loops needed
@@ -556,12 +553,8 @@ class VideoRenderer:
556
  f"πŸ“Š Total video sequence duration: {total_duration:.2f}s (target: {TARGET_MIN_DURATION}-{TARGET_MAX_DURATION}s)"
557
  )
558
 
559
- # Resize all clips to 9:16 vertical
560
- target_size = (1080, 1920)
561
- resized_clips = [self._resize_for_vertical(clip, target_size) for clip in sequence_clips]
562
-
563
  # FIXED: Remove transition_duration parameter
564
- final_sequence = concatenate_videoclips(resized_clips, method="compose")
565
  logger.debug(f"βœ… Created video sequence with NATURAL SPEED: {final_sequence.duration:.2f}s")
566
 
567
  return final_sequence
@@ -795,28 +788,6 @@ class VideoRenderer:
795
 
796
  return phrases
797
 
798
- def _resize_for_vertical(self, clip: VideoFileClip, target_size: tuple = (1080, 1920)) -> VideoFileClip:
799
- return clip # resize on the download itself
800
- """Resize clip to fit vertical 9:16 aspect ratio"""
801
- target_w, target_h = target_size
802
- clip_aspect = clip.w / clip.h
803
- target_aspect = target_w / target_h
804
-
805
- if clip_aspect > target_aspect:
806
- new_clip = clip.resize(height=target_h)
807
- else:
808
- new_clip = clip.resize(width=target_w)
809
-
810
- # Center crop
811
- try:
812
- new_clip = new_clip.crop(x_center=new_clip.w / 2, y_center=new_clip.h / 2, width=target_w, height=target_h)
813
- except Exception:
814
- x1 = (new_clip.w - target_w) // 2
815
- y1 = (new_clip.h - target_h) // 2
816
- new_clip = new_clip.crop(x1=x1, y1=y1, x2=x1 + target_w, y2=y1 + target_h)
817
-
818
- return new_clip
819
-
820
  async def add_audio_to_video(self, video_path: str) -> str:
821
  """
822
  Add audio track to pre-rendered video (NO speedup - video is already correct duration)
@@ -990,7 +961,15 @@ class VideoRenderer:
990
  try:
991
  output_path = self.temp_dir / f"{uuid.uuid4().hex}.mp4"
992
 
993
- video_clip.write_videofile(str(output_path), codec="libx264", audio_codec="aac", fps=25, verbose=False, logger=None)
 
 
 
 
 
 
 
 
994
 
995
  video_clip.close()
996
  return str(output_path)
@@ -1011,7 +990,12 @@ class VideoRenderer:
1011
  logger.debug(f"πŸ“Ή Rendering video (no audio): {filename}")
1012
 
1013
  video_clip.write_videofile(
1014
- str(output_path), codec="libx264", fps=25, verbose=False, logger=None
 
 
 
 
 
1015
  )
1016
 
1017
  return str(output_path)
 
97
  HOOK_VIDEO_DURATION = 2
98
  hook_clip = VideoFileClip(assets["hook_video"]["veo_video_data"]["local_path"])
99
  runway_as_second_ai_video = VideoFileClip(assets["hook_video"]["local_path"]).subclip(0, 2)
 
100
  else:
101
  hook_clip = VideoFileClip(assets["hook_video"]["local_path"])
102
 
 
140
  if lib_video.get("local_path"):
141
  try:
142
  lib_clip = VideoFileClip(lib_video["local_path"])
143
+ original_clip = lib_clip
144
  lib_hook_start = None
145
  lib_hook_end = None
146
  prev_clip = None
 
148
 
149
  # Only apply hook logic if hook is available
150
  if hook_start and i == 0:
151
+ lib_hook_start = hook_start
152
  original_clip = runway_as_second_ai_video if runway_as_second_ai_video else original_clip
153
  if i == 1 and get_config_value("use_veo", False) and runway_as_second_ai_video:
154
  if HOOK_START_ORIGINAL_CLIP_USED < runway_as_second_ai_video.duration-0.5:
155
  original_clip = runway_as_second_ai_video.subclip(HOOK_START_ORIGINAL_CLIP_USED, runway_as_second_ai_video.duration)
156
  if hook_end and i+1 == len(assets.get("selected_videos", [])):
157
+ lib_hook_end = hook_end
158
  if len(clips) > 0:
159
  prev_clip = clips[-1][1]
160
  prev_clip_file = selected_videos[-2]["local_path"] if len(selected_videos) > 1 else None
 
362
  video_usage[lib_video['alternate_url']] = video_usage.get(lib_video['alternate_url'], 0) + 1
363
  set_config_value("video_usage_count", video_usage)
364
  alternate_clip = VideoFileClip(alternate_url_local_path)
 
365
  reverse_alternate_clip = self.reverse_clip(alternate_url_local_path)
366
 
367
  combined = concatenate_videoclips([original_clip, alternate_clip, reverse_alternate_clip, original_clip], method="compose")
 
382
  interpolated_file = utils.interpolate_video(original_clip_path)
383
  if interpolated_file:
384
  interpolated = VideoFileClip(interpolated_file)
385
+
 
386
  if interpolated.duration >= target_duration:
387
  logger.debug("Using interpolated clip for extension")
388
  result = interpolated.subclip(0, target_duration)
 
418
 
419
  def reverse_clip(self, clip_path):
420
  reversed_clip = VideoFileClip(utils_reverse_clip(clip_path))
421
+ return reversed_clip
422
 
423
  def loop_clip(self, clip, target_duration):
424
  loop_count = int(target_duration // clip.duration) + 1 # how many loops needed
 
553
  f"πŸ“Š Total video sequence duration: {total_duration:.2f}s (target: {TARGET_MIN_DURATION}-{TARGET_MAX_DURATION}s)"
554
  )
555
 
 
 
 
 
556
  # FIXED: Remove transition_duration parameter
557
+ final_sequence = concatenate_videoclips(sequence_clips, method="compose")
558
  logger.debug(f"βœ… Created video sequence with NATURAL SPEED: {final_sequence.duration:.2f}s")
559
 
560
  return final_sequence
 
788
 
789
  return phrases
790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791
  async def add_audio_to_video(self, video_path: str) -> str:
792
  """
793
  Add audio track to pre-rendered video (NO speedup - video is already correct duration)
 
961
  try:
962
  output_path = self.temp_dir / f"{uuid.uuid4().hex}.mp4"
963
 
964
+ video_clip.write_videofile(
965
+ str(output_path),
966
+ codec="libx264",
967
+ audio_codec="aac",
968
+ fps=25,
969
+ verbose=False,
970
+ logger=None,
971
+ ffmpeg_params=["-pix_fmt", "yuv420p"]
972
+ )
973
 
974
  video_clip.close()
975
  return str(output_path)
 
990
  logger.debug(f"πŸ“Ή Rendering video (no audio): {filename}")
991
 
992
  video_clip.write_videofile(
993
+ str(output_path),
994
+ codec="libx264",
995
+ fps=25,
996
+ verbose=False,
997
+ logger=None,
998
+ ffmpeg_params=["-pix_fmt", "yuv420p"]
999
  )
1000
 
1001
  return str(output_path)