Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,9 +2,7 @@ import gradio as gr
|
|
| 2 |
import moviepy.editor as mp
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
-
import tempfile
|
| 6 |
import os
|
| 7 |
-
import traceback
|
| 8 |
|
| 9 |
# Resize image while maintaining aspect ratio
|
| 10 |
def resize_image_with_aspect_ratio(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
|
|
@@ -29,27 +27,20 @@ def resize_image_with_aspect_ratio(img, target_size=(1280, 720), padding_color=(
|
|
| 29 |
|
| 30 |
return final_img
|
| 31 |
|
| 32 |
-
# Video generation function
|
| 33 |
def process_and_generate_video(audio_file, images):
|
| 34 |
-
debug_log = []
|
| 35 |
|
| 36 |
try:
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
raise ValueError("Both audio and images are required for video generation.")
|
| 40 |
|
| 41 |
-
#
|
| 42 |
-
debug_log.append(f"Received audio file: {audio_file}")
|
| 43 |
-
debug_log.append(f"Received images: {[img for img in images]}")
|
| 44 |
-
|
| 45 |
-
# Load audio file
|
| 46 |
audio = mp.AudioFileClip(audio_file)
|
| 47 |
audio_duration = audio.duration
|
| 48 |
-
debug_log.append(f"Audio duration: {audio_duration:.2f} seconds")
|
| 49 |
-
|
| 50 |
-
# Process images
|
| 51 |
image_clips = []
|
| 52 |
image_duration = audio_duration / len(images)
|
|
|
|
| 53 |
for img_path in images:
|
| 54 |
debug_log.append(f"Processing image: {img_path}")
|
| 55 |
img = Image.open(img_path)
|
|
@@ -57,44 +48,39 @@ def process_and_generate_video(audio_file, images):
|
|
| 57 |
img_clip = mp.ImageClip(np.array(resized_img)).set_duration(image_duration).set_fps(24)
|
| 58 |
image_clips.append(img_clip)
|
| 59 |
|
| 60 |
-
debug_log.append(f"Created {len(image_clips)} image clips")
|
| 61 |
|
| 62 |
-
# Concatenate clips
|
| 63 |
video = mp.concatenate_videoclips(image_clips, method="compose")
|
| 64 |
video = video.set_audio(audio)
|
| 65 |
|
| 66 |
-
#
|
| 67 |
-
output_path =
|
| 68 |
-
video.write_videofile(output_path, codec=
|
| 69 |
-
debug_log.append(f"Video successfully saved to {output_path}")
|
| 70 |
|
| 71 |
-
|
|
|
|
|
|
|
| 72 |
|
| 73 |
except Exception as e:
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
return None, "\n".join(debug_log)
|
| 78 |
|
| 79 |
# Gradio interface setup
|
| 80 |
def gradio_interface():
|
| 81 |
with gr.Blocks() as demo:
|
| 82 |
with gr.Row():
|
| 83 |
with gr.Column():
|
| 84 |
-
mp3_input = gr.Audio(type="filepath", label="Upload MP3")
|
| 85 |
-
image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple")
|
| 86 |
-
generate_button = gr.Button("Generate Video")
|
| 87 |
|
| 88 |
-
output_video = gr.Video(label="Generated Video")
|
| 89 |
-
debug_logs = gr.Textbox(label="Debug Logs", interactive=False) # Display debug logs
|
| 90 |
|
| 91 |
-
generate_button.click(
|
| 92 |
-
fn=process_and_generate_video,
|
| 93 |
-
inputs=[mp3_input, image_input],
|
| 94 |
-
outputs=[output_video, debug_logs]
|
| 95 |
-
)
|
| 96 |
|
| 97 |
demo.launch()
|
| 98 |
|
| 99 |
-
# Run the
|
| 100 |
gradio_interface()
|
|
|
|
| 2 |
import moviepy.editor as mp
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
|
|
|
| 5 |
import os
|
|
|
|
| 6 |
|
| 7 |
# Resize image while maintaining aspect ratio
|
| 8 |
def resize_image_with_aspect_ratio(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
|
|
|
|
| 27 |
|
| 28 |
return final_img
|
| 29 |
|
| 30 |
+
# Video generation function with debug logging
|
| 31 |
def process_and_generate_video(audio_file, images):
|
| 32 |
+
debug_log = []
|
| 33 |
|
| 34 |
try:
|
| 35 |
+
debug_log.append(f"Audio file received: {audio_file}")
|
| 36 |
+
debug_log.append(f"Images received: {images}")
|
|
|
|
| 37 |
|
| 38 |
+
# Process audio file
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
audio = mp.AudioFileClip(audio_file)
|
| 40 |
audio_duration = audio.duration
|
|
|
|
|
|
|
|
|
|
| 41 |
image_clips = []
|
| 42 |
image_duration = audio_duration / len(images)
|
| 43 |
+
|
| 44 |
for img_path in images:
|
| 45 |
debug_log.append(f"Processing image: {img_path}")
|
| 46 |
img = Image.open(img_path)
|
|
|
|
| 48 |
img_clip = mp.ImageClip(np.array(resized_img)).set_duration(image_duration).set_fps(24)
|
| 49 |
image_clips.append(img_clip)
|
| 50 |
|
| 51 |
+
debug_log.append(f"Created {len(image_clips)} image clips.")
|
| 52 |
|
| 53 |
+
# Concatenate image clips
|
| 54 |
video = mp.concatenate_videoclips(image_clips, method="compose")
|
| 55 |
video = video.set_audio(audio)
|
| 56 |
|
| 57 |
+
# Output video path
|
| 58 |
+
output_path = '/tmp/generated_video.mp4'
|
| 59 |
+
video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|
|
|
|
| 60 |
|
| 61 |
+
debug_log.append(f"Video successfully generated: {output_path}")
|
| 62 |
+
print("\n".join(debug_log)) # Print logs to the console for debugging
|
| 63 |
+
return output_path
|
| 64 |
|
| 65 |
except Exception as e:
|
| 66 |
+
debug_log.append(f"Error: {str(e)}")
|
| 67 |
+
print("\n".join(debug_log)) # Print logs to the console for debugging
|
| 68 |
+
return f"Error: {str(e)}"
|
|
|
|
| 69 |
|
| 70 |
# Gradio interface setup
|
| 71 |
def gradio_interface():
|
| 72 |
with gr.Blocks() as demo:
|
| 73 |
with gr.Row():
|
| 74 |
with gr.Column():
|
| 75 |
+
mp3_input = gr.Audio(type="filepath", label="Upload MP3")
|
| 76 |
+
image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple")
|
| 77 |
+
generate_button = gr.Button("Generate Video")
|
| 78 |
|
| 79 |
+
output_video = gr.Video(label="Generated Video")
|
|
|
|
| 80 |
|
| 81 |
+
generate_button.click(fn=process_and_generate_video, inputs=[mp3_input, image_input], outputs=output_video)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
demo.launch()
|
| 84 |
|
| 85 |
+
# Run the interface
|
| 86 |
gradio_interface()
|