abdullahyang's picture
Upload folder using huggingface_hub
667baea verified
# import tempfile
# from moviepy import *
# def combine_video(sample_obj_path):
# gif1 = VideoFileClip(sample_obj_path+ "/shadow_gif/gt_obj_tranp.gif")
# gif2 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_closest_GT_obj_tranp.gif")
# gif3 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_far_0_obj_tranp.gif")
# common_duration = min(gif1.duration, gif2.duration, gif3.duration)
# gif1 = gif1.subclipped(0, common_duration)
# gif2 = gif2.subclipped(0, common_duration)
# gif3 = gif3.subclipped(0, common_duration)
# print(gif1.duration, gif2.duration, gif3.duration)
# text1 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Ground Truth", font_size=30, color='black').with_position('center').with_duration(common_duration)
# text2 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Generated_1", font_size=30, color='black').with_position('center').with_duration(common_duration)
# text3 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Generated_2", font_size=30, color='black').with_position('center').with_duration(common_duration)
# gif1_with_text = CompositeVideoClip([gif1, text1.with_position(("center", "top"))])
# gif2_with_text = CompositeVideoClip([gif2, text2.with_position(("center", "top"))])
# gif3_with_text = CompositeVideoClip([gif3, text3.with_position(("center", "top"))])
# combined = clips_array([[gif1_with_text, gif2_with_text, gif3_with_text]])
# # combined = clips_array([[gif1, gif2, gif3]])
# target_duration = 2.5
# original_duration = combined.duration
# speed_factor = original_duration / target_duration
# accelerated_video = combined.with_speed_scaled(factor=speed_factor*3)
# # accelerated_video.write_videofile("combined_video.mp4", fps=60)
# output_path = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
# accelerated_video.write_gif(output_path, fps=60)
# return output_path
# if __name__ == "__main__":
# combine_video("./9622_GRAB/")
import tempfile, os
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip, clips_array, vfx
def combine_video(sample_obj_path):
# os.environ['IMAGEMAGICK_BINARY'] = '/home/stud/yaji/storage/user/yaji/NonisotropicSkeletonDiffusion/magick'
gif1 = VideoFileClip(sample_obj_path+ "/shadow_gif/gt_obj_tranp.gif")
gif2 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_closest_GT_obj_tranp.gif")
gif3 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_far_0_obj_tranp.gif")
common_duration = min(gif1.duration, gif2.duration, gif3.duration)
gif1 = gif1.subclip(0, common_duration)
gif2 = gif2.subclip(0, common_duration)
gif3 = gif3.subclip(0, common_duration)
print(gif1.duration, gif2.duration, gif3.duration)
text1 = TextClip("Ground Truth", fontsize=30, color='black').set_position('center').set_duration(common_duration)
text2 = TextClip("Generated_1", fontsize=30, color='black').set_position('center').set_duration(common_duration)
text3 = TextClip("Generated_2", fontsize=30, color='black').set_position('center').set_duration(common_duration)
gif1_with_text = CompositeVideoClip([gif1, text1.set_position(("center", "top"))])
gif2_with_text = CompositeVideoClip([gif2, text2.set_position(("center", "top"))])
gif3_with_text = CompositeVideoClip([gif3, text3.set_position(("center", "top"))])
combined = clips_array([[gif1_with_text, gif2_with_text, gif3_with_text]])
# combined = clips_array([[gif1, gif2, gif3]])
target_duration = 2.5
original_duration = combined.duration
speed_factor = original_duration / target_duration
accelerated_video = combined.fx(vfx.speedx, factor=speed_factor)
output_path = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
accelerated_video.write_gif(output_path, fps=60)
return output_path
if __name__ == "__main__":
combine_video("./9622_GRAB/")