Sword / app.py
sibel10's picture
Update app.py
8ea6377 verified
import torch
import gradio as gr
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
print("Running on:", device)
# Load motion adapter (video motion)
adapter = MotionAdapter.from_pretrained(
"guoyww/animatediff-motion-adapter-v1-5-2",
torch_dtype=dtype
)
# Load base Stable Diffusion model
pipe = AnimateDiffPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
motion_adapter=adapter,
torch_dtype=dtype
)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(device)
pipe.enable_vae_slicing()
pipe.enable_attention_slicing()
def generate(prompt, steps, guidance, frames, fps, seed):
generator = torch.Generator(device=device).manual_seed(int(seed))
video = pipe(
prompt=prompt,
num_inference_steps=int(steps),
guidance_scale=float(guidance),
num_frames=int(frames),
generator=generator
).frames[0]
return video
demo = gr.Interface(
fn=generate,
inputs=[
gr.Textbox(label="Prompt"),
gr.Slider(10, 40, value=20, step=1, label="Steps"),
gr.Slider(1, 12, value=7.5, step=0.5, label="Guidance"),
gr.Slider(8, 32, value=16, step=1, label="Frames"),
gr.Slider(4, 24, value=8, step=1, label="FPS"),
gr.Number(value=42, label="Seed"),
],
outputs=gr.Video(),
title="AnimateDiff Video Generator",
description="Text to Video using Stable Diffusion + AnimateDiff"
)
demo.launch()