| import gradio as gr |
| import torch |
| from diffusers import AnimateDiffPipeline |
| from PIL import Image |
| import imageio |
| import tempfile |
| import os |
|
|
| MODEL_ID = "guoyww/animatediff-motion-adapter-v1-5-2" |
|
|
| print("Loading pipeline...") |
|
|
| pipe = AnimateDiffPipeline.from_pretrained( |
| MODEL_ID, |
| torch_dtype=torch.float32 |
| ) |
|
|
| pipe.to("cpu") |
|
|
| |
| pipe.enable_attention_slicing() |
| pipe.enable_vae_slicing() |
|
|
| print("Model Ready") |
|
|
|
|
| def generate_video(image, prompt, frames, steps, seed): |
|
|
| generator = torch.Generator("cpu").manual_seed(int(seed)) |
|
|
| image = Image.fromarray(image).resize((384,384)) |
|
|
| with torch.inference_mode(): |
|
|
| result = pipe( |
| prompt=prompt, |
| image=image, |
| num_frames=frames, |
| num_inference_steps=steps, |
| generator=generator |
| ) |
|
|
| frames_list = result.frames |
|
|
| temp_dir = tempfile.mkdtemp() |
| video_path = os.path.join(temp_dir, "video.mp4") |
|
|
| imageio.mimsave(video_path, frames_list, fps=6) |
|
|
| return video_path |
|
|
|
|
| with gr.Blocks() as demo: |
|
|
| gr.Markdown("# π Image β Video AI (CPU Optimized)") |
|
|
| with gr.Row(): |
|
|
| with gr.Column(): |
|
|
| input_image = gr.Image(label="Upload Image") |
|
|
| prompt = gr.Textbox( |
| value="cinematic camera movement", |
| label="Motion Prompt" |
| ) |
|
|
| frames = gr.Slider(8,20,value=12,step=1,label="Frames") |
|
|
| steps = gr.Slider(4,8,value=6,step=1,label="Steps") |
|
|
| seed = gr.Number(value=42) |
|
|
| btn = gr.Button("Generate Video") |
|
|
| with gr.Column(): |
|
|
| output_video = gr.Video() |
|
|
| btn.click( |
| generate_video, |
| inputs=[input_image,prompt,frames,steps,seed], |
| outputs=output_video |
| ) |
|
|
| demo.launch() |