import gradio as gr from gtts import gTTS from moviepy.editor import * from PIL import Image import tempfile import os # ----------------------------------------------------- # Function: Generate video (text → speech + static background) # ----------------------------------------------------- def generate_video(prompt, affirmation, with_visuals): # Create speech text = f"{prompt}. {affirmation}" if affirmation else prompt tts = gTTS(text=text, lang="en") temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(temp_audio.name) # Background (no AI visuals, just white image for now) bg_img = Image.new("RGB", (720, 480), color=(255, 255, 255)) bg_path = tempfile.NamedTemporaryFile(delete=False, suffix=".png").name bg_img.save(bg_path) # Make video clip = ImageClip(bg_path).set_duration(15) # 15 sec video audioclip = AudioFileClip(temp_audio.name) final = clip.set_audio(audioclip) out_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name final.write_videofile(out_path, fps=24) return out_path # ----------------------------------------------------- # Gradio Interface # ----------------------------------------------------- with gr.Blocks() as demo: gr.Markdown("## ✨ Journal & Affirmation App (CPU Safe Version) ✨") with gr.Row(): prompt_in = gr.Textbox(label="Journal Prompt / Affirmation") affirmation_in = gr.Textbox(label="Extra Affirmation (optional)") with gr.Row(): visuals_in = gr.Checkbox(label="(No AI visuals in this version)", value=False, interactive=False) generate_btn = gr.Button("🎬 Generate Video") output_video = gr.Video(label="Generated Video") def run(prompt, affirmation, visuals): return generate_video(prompt, affirmation, visuals) generate_btn.click(run, [prompt_in, affirmation_in, visuals_in], output_video) # Launch if __name__ == "__main__": demo.launch()