| import gradio as gr |
| import torch |
| import os |
| import random |
| import time |
| import math |
| import spaces |
| from glob import glob |
| from pathlib import Path |
| from typing import Optional |
|
|
| from diffusers import StableVideoDiffusionPipeline |
| from diffusers.utils import export_to_video |
| from PIL import Image |
|
|
| fps25Pipe = StableVideoDiffusionPipeline.from_pretrained( |
| "vdo/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16" |
| ) |
| fps25Pipe.to("cuda") |
|
|
| fps14Pipe = StableVideoDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-video-diffusion-img2vid", torch_dtype=torch.float16, variant="fp16" |
| ) |
| fps14Pipe.to("cuda") |
|
|
| max_64_bit_int = 2**63 - 1 |
|
|
| def animate( |
| image: Image, |
| seed: Optional[int] = 42, |
| randomize_seed: bool = True, |
| motion_bucket_id: int = 127, |
| fps_id: int = 6, |
| noise_aug_strength: float = 0.1, |
| decoding_t: int = 3, |
| video_format: str = "mp4", |
| frame_format: str = "webp", |
| version: str = "auto", |
| output_folder: str = "outputs", |
| ): |
| start = time.time() |
| if image.mode == "RGBA": |
| image = image.convert("RGB") |
| |
| if randomize_seed: |
| seed = random.randint(0, max_64_bit_int) |
|
|
| frames = animate_on_gpu( |
| image, |
| seed, |
| motion_bucket_id, |
| fps_id, |
| noise_aug_strength, |
| decoding_t, |
| version |
| ) |
| |
| os.makedirs(output_folder, exist_ok=True) |
| base_count = len(glob(os.path.join(output_folder, "*." + video_format))) |
| video_path = os.path.join(output_folder, f"{base_count:06d}." + video_format) |
|
|
| export_to_video(frames, video_path, fps=fps_id) |
| end = time.time() |
| secondes = int(end - start) |
| minutes = math.floor(secondes / 60) |
| secondes = secondes - (minutes * 60) |
| hours = math.floor(minutes / 60) |
| minutes = minutes - (hours * 60) |
| information = ("Start the process again if you want a different result. " if randomize_seed else "") + \ |
| "Wait 2 min before a new run to avoid quota penalty or use another computer. " + \ |
| "The video has been generated in " + \ |
| ((str(hours) + " h, ") if hours != 0 else "") + \ |
| ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \ |
| str(secondes) + " sec." |
| |
| return gr.update(value=video_path, format=video_format), gr.update(value=video_path, visible=True), gr.update(label="Generated frames in *." + frame_format + " format", format = frame_format, value = frames, visible=True), seed, gr.update(value = information, visible = True) |
|
|
| @spaces.GPU(duration=120) |
| def animate_on_gpu( |
| image: Image, |
| seed: Optional[int] = 42, |
| motion_bucket_id: int = 127, |
| fps_id: int = 6, |
| noise_aug_strength: float = 0.1, |
| decoding_t: int = 3, |
| version: str = "auto" |
| ): |
| generator = torch.manual_seed(seed) |
| |
| if version == "svdxt" or (14 < fps_id and version != "svd"): |
| return fps25Pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=noise_aug_strength, num_frames=25).frames[0] |
| else: |
| return fps14Pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=noise_aug_strength, num_frames=25).frames[0] |
|
|
|
|
| def resize_image(image, output_size=(1024, 576)): |
| |
| target_aspect = output_size[0] / output_size[1] |
| image_aspect = image.width / image.height |
|
|
| |
| if image.width == output_size[0] and image.height == output_size[1]: |
| return image |
|
|
| |
| if image_aspect > target_aspect: |
| |
| new_height = output_size[1] |
| new_width = int(new_height * image_aspect) |
| resized_image = image.resize((new_width, new_height), Image.LANCZOS) |
| |
| left = (new_width - output_size[0]) / 2 |
| top = 0 |
| right = (new_width + output_size[0]) / 2 |
| bottom = output_size[1] |
| else: |
| |
| new_width = output_size[0] |
| new_height = int(new_width / image_aspect) |
| resized_image = image.resize((new_width, new_height), Image.LANCZOS) |
| |
| left = 0 |
| top = (new_height - output_size[1]) / 2 |
| right = output_size[0] |
| bottom = (new_height + output_size[1]) / 2 |
|
|
| |
| cropped_image = resized_image.crop((left, top, right, bottom)) |
| return cropped_image |
|
|
| with gr.Blocks() as demo: |
| gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact)) |
| #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd). |
| ''') |
| with gr.Row(): |
| with gr.Column(): |
| image = gr.Image(label="Upload your image", type="pil") |
| with gr.Accordion("Advanced options", open=False): |
| fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30) |
| motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255) |
| noise_aug_strength = gr.Slider(label="Noise strength", info="The noise to add", value=0.1, minimum=0, maximum=1, step=0.1) |
| decoding_t = gr.Slider(label="Decoding", info="Number of frames decoded at a time; this eats more VRAM; reduce if necessary", value=3, minimum=1, maximum=5, step=1) |
| video_format = gr.Radio([["*.mp4", "mp4"], ["*.avi", "avi"]], label="Video format for result", info="File extention", value="mp4", interactive=True) |
| frame_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for frames", info="File extention", value="webp", interactive=True) |
| version = gr.Radio([["Auto", "auto"], ["🏃🏻♀️ SVD (trained on 14 f/s)", "svd"], ["🏃🏻♀️💨 SVD-XT (trained on 25 f/s)", "svdxt"]], label="Model", info="Trained model", value="auto", interactive=True) |
| seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1) |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) |
|
|
| generate_btn = gr.Button(value="🚀 Animate", variant="primary") |
|
|
| with gr.Column(): |
| video = gr.Video(label="Generated video", autoplay=True) |
| download_button = gr.DownloadButton(label="💾 Download video", visible=False) |
| information_msg = gr.HTML(visible = False) |
| gallery = gr.Gallery(label="Generated frames", visible=False) |
| |
| image.upload(fn=resize_image, inputs=image, outputs=image, queue=False) |
| generate_btn.click(fn=animate, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id, noise_aug_strength, decoding_t, video_format, frame_format, version], outputs=[video, download_button, gallery, seed, information_msg], api_name="video") |
| |
| gr.Examples( |
| examples=[ |
| ["Examples/Fire.webp", 42, True, 127, 25, 0.1, 3, "mp4", "png", "auto"], |
| ["Examples/Water.png", 42, True, 127, 25, 0.1, 3, "mp4", "png", "auto"], |
| ["Examples/Town.jpeg", 42, True, 127, 25, 0.1, 3, "mp4", "png", "auto"] |
| ], |
| inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id, noise_aug_strength, decoding_t, video_format, frame_format, version], |
| outputs=[video, download_button, gallery, seed, information_msg], |
| fn=animate, |
| run_on_click=True, |
| cache_examples=False, |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch(share=True, show_api=False) |