freeai-app's picture
Update app-sd.py
2440898 verified
import gradio as gr
from gtts import gTTS
from moviepy.editor import *
from PIL import Image
import numpy as np
import torch
from diffusers import StableDiffusionPipeline
import tempfile
import os
# -----------------------------------------------------
# Load Stable Diffusion model (this will work on GPU)
# -----------------------------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
sd_pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16 if device == "cuda" else torch.float32
).to(device)
# -----------------------------------------------------
# Function: Generate background image with Stable Diffusion
# -----------------------------------------------------
def generate_background(prompt: str):
image = sd_pipeline(prompt).images[0]
return image
# -----------------------------------------------------
# Function: Generate video with voice + background
# -----------------------------------------------------
def generate_video(prompt, affirmation, with_visuals):
# Create speech
text = f"{prompt}. {affirmation}" if affirmation else prompt
tts = gTTS(text=text, lang="en")
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
tts.save(temp_audio.name)
# Generate background
if with_visuals:
bg_img = generate_background(prompt)
else:
# fallback: plain white
bg_img = Image.new("RGB", (720, 480), color=(255, 255, 255))
bg_path = tempfile.NamedTemporaryFile(delete=False, suffix=".png").name
bg_img.save(bg_path)
# Make video
clip = ImageClip(bg_path).set_duration(15) # 15 sec video
audioclip = AudioFileClip(temp_audio.name)
final = clip.set_audio(audioclip)
out_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
final.write_videofile(out_path, fps=24)
return out_path
# -----------------------------------------------------
# Gradio Interface
# -----------------------------------------------------
with gr.Blocks() as demo:
gr.Markdown("## ✨ Future Journal & Affirmation App with AI Visuals ✨")
with gr.Row():
prompt_in = gr.Textbox(label="Journal Prompt / Affirmation")
affirmation_in = gr.Textbox(label="Extra Affirmation (optional)")
with gr.Row():
visuals_in = gr.Checkbox(label="Add AI Visuals (Stable Diffusion)", value=True)
generate_btn = gr.Button("🎬 Generate Video")
output_video = gr.Video(label="Generated Video")
def run(prompt, affirmation, visuals):
return generate_video(prompt, affirmation, visuals)
generate_btn.click(run, [prompt_in, affirmation_in, visuals_in], output_video)
# Launch
if __name__ == "__main__":
demo.launch()