video-generator-testing / generator.py
freeai-app's picture
Update generator.py
ac1f2ce verified
# generator.py
import os, random, json
from datetime import datetime
from gtts import gTTS
from moviepy.editor import AudioFileClip, ImageClip, CompositeVideoClip, TextClip, CompositeAudioClip
from PIL import Image, ImageDraw, ImageFilter, ImageFont
# Folders (ensure these exist in repo)
OUTPUT_IMAGES = "outputs/images"
OUTPUT_AUDIO = "outputs/audio"
OUTPUT_VIDEOS = "outputs/videos"
BG_DIR = "backgrounds"
MUSIC_DIR = "music"
for d in (OUTPUT_IMAGES, OUTPUT_AUDIO, OUTPUT_VIDEOS, BG_DIR, MUSIC_DIR):
os.makedirs(d, exist_ok=True)
# helpers
def next_index(folder, prefix, ext):
files = [f for f in os.listdir(folder) if f.startswith(prefix) and f.endswith(ext)]
if not files: return 1
nums = []
for f in files:
try:
# expects prefix_N.ext
part = f[len(prefix) + 1: -len(ext)]
nums.append(int(part))
except:
pass
return max(nums, default=0) + 1
def generate_placeholder_image(prompt, out_path, size=(720,1280)):
h = abs(hash(prompt))
r = 90 + (h % 120)
g = 70 + ((h >> 8) % 140)
b = 100 + ((h >> 16) % 120)
img = Image.new("RGB", size, (r, g, b))
draw = ImageDraw.Draw(img)
# soft shapes
for i in range(5):
shape_color = ((r + i*10) % 256, (g + i*15) % 256, (b + i*20) % 256)
xy = [(random.randint(0,size[0]), random.randint(0,size[1])) for _ in range(3)]
draw.polygon(xy, fill=shape_color)
img = img.filter(ImageFilter.GaussianBlur(radius=6))
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 28)
except:
font = ImageFont.load_default()
caption = prompt if len(prompt) <= 60 else prompt[:57] + "..."
w,h_text = draw.textsize(caption, font=font)
draw.text(((size[0]-w)/2, size[1]*0.85), caption, font=font, fill=(255,255,255))
img.save(out_path)
return out_path
def create_tts(text, lang="en"):
idx = next_index(OUTPUT_AUDIO, "voice", ".mp3")
out = os.path.join(OUTPUT_AUDIO, f"voice_{idx}.mp3")
tts = gTTS(text=text, lang=lang)
tts.save(out)
return out
def pick_random_background():
files = [f for f in os.listdir(BG_DIR) if f.lower().endswith((".jpg", ".jpeg", ".png"))]
if not files: return None
return os.path.join(BG_DIR, random.choice(files))
def pick_random_music():
files = [f for f in os.listdir(MUSIC_DIR) if f.lower().endswith(".mp3")]
if not files: return None
return os.path.join(MUSIC_DIR, random.choice(files))
def add_looping_music(narration_path, music_path):
narration = AudioFileClip(narration_path)
if not music_path or not os.path.exists(music_path):
return narration
music = AudioFileClip(music_path).volumex(0.25)
reps = int(narration.duration // music.duration) + 2
# create one long clip by concatenation
clips = [music] * reps
# MoviePy: CompositeAudioClip won't concatenate; we use CompositeAudioClip of differently-started tracks
from moviepy.audio.AudioClip import concatenate_audioclips
looped = concatenate_audioclips(clips).subclip(0, narration.duration)
looped = looped.audio_fadeout(2)
final = CompositeAudioClip([looped, narration.set_start(0)])
return final
def build_vertical_video(text, image_path, narration_path, music_path, size=(720,1280)):
narration = AudioFileClip(narration_path)
audio_final = add_looping_music(narration_path, music_path)
dur = narration.duration
bg = ImageClip(image_path).set_duration(dur).resize(size)
txt = TextClip(text, fontsize=36, color='white', method='caption', size=(int(size[0]*0.85), None)).set_duration(dur).set_position(('center','center'))
video = CompositeVideoClip([bg, txt])
video = video.set_audio(audio_final)
vid_idx = next_index(OUTPUT_VIDEOS, "affirmation", ".mp4")
out = os.path.join(OUTPUT_VIDEOS, f"affirmation_{vid_idx}.mp4")
video.write_videofile(out, fps=24, codec='libx264', audio_codec='aac')
return out
def generate_one(text, prompt=None, chosen_bg=None, chosen_music=None, lang='en'):
# pick or create image
if chosen_bg and os.path.exists(chosen_bg):
img_path = chosen_bg
else:
bg = pick_random_background()
if bg:
img_path = bg
else:
idx = next_index(OUTPUT_IMAGES, "bg", ".png")
img_path = os.path.join(OUTPUT_IMAGES, f"bg_{idx}.png")
generate_placeholder_image(prompt or text, img_path, size=(720,1280))
voice_path = create_tts(text, lang)
# pick music priority: chosen_music path > random > None
music_path = chosen_music if (chosen_music and os.path.exists(chosen_music)) else pick_random_music()
print(f"🎨 Background used: {img_path}")
print(f"🎵 Music used: {music_path}")
video_path = build_vertical_video(text, img_path, voice_path, music_path)
# return paths
return {
"video": video_path,
"audio": voice_path,
"image": img_path,
"music": music_path,
"timestamp": datetime.now().isoformat()
}
# quick test (uncomment to run quick test)
# if __name__ == '__main__':
# print(generate_one("You are capable of amazing things", prompt="sunrise over mountains"))