Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ import numpy as np
|
|
| 4 |
from PIL import Image
|
| 5 |
import os
|
| 6 |
|
|
|
|
| 7 |
def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
|
| 8 |
width, height = img.size
|
| 9 |
target_width, target_height = target_size
|
|
@@ -29,47 +30,59 @@ def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
|
|
| 29 |
|
| 30 |
return final_img
|
| 31 |
|
|
|
|
| 32 |
def apply_zoom_effect(image_clip):
|
| 33 |
zoomed_clip = image_clip.resize(lambda t: 1 + 0.05 * t) # Zoom in gradually
|
| 34 |
return zoomed_clip
|
| 35 |
|
| 36 |
def process_and_generate_video(audio_file, images):
|
| 37 |
try:
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
audio_duration = audio.duration
|
| 40 |
image_clips = []
|
| 41 |
image_count = len(images)
|
| 42 |
image_duration = audio_duration / image_count
|
| 43 |
-
|
| 44 |
print(f"Audio duration: {audio_duration} seconds, Image count: {image_count}")
|
| 45 |
-
|
|
|
|
| 46 |
for img_path in images:
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
| 59 |
print(f"Image clips: {len(image_clips)} clips created.")
|
| 60 |
-
|
|
|
|
| 61 |
video = mp.concatenate_videoclips(image_clips, method="compose")
|
| 62 |
video = video.set_audio(audio)
|
| 63 |
-
|
|
|
|
| 64 |
output_path = '/content/generated_video.mp4'
|
| 65 |
video.write_videofile(output_path, codec='libx264', audio_codec='aac', threads=4, fps=30, preset='ultrafast')
|
| 66 |
-
|
| 67 |
return output_path # Return the file path for Gradio output
|
| 68 |
-
|
| 69 |
except Exception as e:
|
| 70 |
print(f"Error during video generation: {str(e)}")
|
| 71 |
return f"Error generating video: {str(e)}"
|
| 72 |
|
|
|
|
| 73 |
def gradio_interface():
|
| 74 |
with gr.Blocks() as demo:
|
| 75 |
with gr.Row():
|
|
@@ -80,6 +93,7 @@ def gradio_interface():
|
|
| 80 |
|
| 81 |
output_video = gr.Video(label="Generated Video")
|
| 82 |
|
|
|
|
| 83 |
generate_button.click(fn=process_and_generate_video, inputs=[mp3_input, image_input], outputs=output_video)
|
| 84 |
|
| 85 |
demo.launch()
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
import os
|
| 6 |
|
| 7 |
+
# Resize image while maintaining aspect ratio
|
| 8 |
def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
|
| 9 |
width, height = img.size
|
| 10 |
target_width, target_height = target_size
|
|
|
|
| 30 |
|
| 31 |
return final_img
|
| 32 |
|
| 33 |
+
# Apply zoom effect to image clip
|
| 34 |
def apply_zoom_effect(image_clip):
|
| 35 |
zoomed_clip = image_clip.resize(lambda t: 1 + 0.05 * t) # Zoom in gradually
|
| 36 |
return zoomed_clip
|
| 37 |
|
| 38 |
def process_and_generate_video(audio_file, images):
|
| 39 |
try:
|
| 40 |
+
# Check if the file paths are strings, and convert to actual file-like objects
|
| 41 |
+
if isinstance(audio_file, str):
|
| 42 |
+
audio = mp.AudioFileClip(audio_file)
|
| 43 |
+
else:
|
| 44 |
+
raise ValueError("Expected a valid file path for audio.")
|
| 45 |
+
|
| 46 |
audio_duration = audio.duration
|
| 47 |
image_clips = []
|
| 48 |
image_count = len(images)
|
| 49 |
image_duration = audio_duration / image_count
|
| 50 |
+
|
| 51 |
print(f"Audio duration: {audio_duration} seconds, Image count: {image_count}")
|
| 52 |
+
|
| 53 |
+
# Iterate over images, resize them, and create video clips
|
| 54 |
for img_path in images:
|
| 55 |
+
if isinstance(img_path, str): # Ensure img_path is a string (file path)
|
| 56 |
+
img = Image.open(img_path)
|
| 57 |
+
img = resize_and_fit_image(img, target_size=(1280, 720))
|
| 58 |
+
|
| 59 |
+
# Create image clip and apply zoom effect
|
| 60 |
+
img_clip = mp.ImageClip(np.array(img)).set_duration(image_duration).set_fps(30)
|
| 61 |
+
img_clip = apply_zoom_effect(img_clip)
|
| 62 |
+
|
| 63 |
+
# Add transition effect - Crossfade In
|
| 64 |
+
if len(image_clips) > 0:
|
| 65 |
+
img_clip = img_clip.crossfadein(1)
|
| 66 |
+
|
| 67 |
+
image_clips.append(img_clip)
|
| 68 |
+
|
| 69 |
print(f"Image clips: {len(image_clips)} clips created.")
|
| 70 |
+
|
| 71 |
+
# Concatenate image clips with audio
|
| 72 |
video = mp.concatenate_videoclips(image_clips, method="compose")
|
| 73 |
video = video.set_audio(audio)
|
| 74 |
+
|
| 75 |
+
# Set output path
|
| 76 |
output_path = '/content/generated_video.mp4'
|
| 77 |
video.write_videofile(output_path, codec='libx264', audio_codec='aac', threads=4, fps=30, preset='ultrafast')
|
| 78 |
+
|
| 79 |
return output_path # Return the file path for Gradio output
|
| 80 |
+
|
| 81 |
except Exception as e:
|
| 82 |
print(f"Error during video generation: {str(e)}")
|
| 83 |
return f"Error generating video: {str(e)}"
|
| 84 |
|
| 85 |
+
# Gradio interface setup
|
| 86 |
def gradio_interface():
|
| 87 |
with gr.Blocks() as demo:
|
| 88 |
with gr.Row():
|
|
|
|
| 93 |
|
| 94 |
output_video = gr.Video(label="Generated Video")
|
| 95 |
|
| 96 |
+
# Generate button triggers video creation process
|
| 97 |
generate_button.click(fn=process_and_generate_video, inputs=[mp3_input, image_input], outputs=output_video)
|
| 98 |
|
| 99 |
demo.launch()
|