|
|
import gradio as gr |
|
|
import torch |
|
|
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler |
|
|
from diffusers.utils import export_to_video |
|
|
from huggingface_hub import hf_hub_download |
|
|
from safetensors.torch import load_file |
|
|
import spaces |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
device = "cuda" |
|
|
dtype = torch.float16 |
|
|
print("✅ GPU detected: Running in fast mode (float16)") |
|
|
else: |
|
|
device = "cpu" |
|
|
dtype = torch.float32 |
|
|
print("⚠️ No GPU detected: Running in slow mode (float32)") |
|
|
|
|
|
|
|
|
print("Loading AnimateDiff-Lightning...") |
|
|
|
|
|
|
|
|
adapter = MotionAdapter.from_pretrained( |
|
|
"guoyww/animatediff-motion-adapter-v1-5-2", |
|
|
torch_dtype=dtype |
|
|
) |
|
|
|
|
|
|
|
|
print("Downloading Lightning weights...") |
|
|
file_path = hf_hub_download( |
|
|
repo_id="ByteDance/AnimateDiff-Lightning", |
|
|
filename="animatediff_lightning_4step_diffusers.safetensors" |
|
|
) |
|
|
|
|
|
|
|
|
adapter.load_state_dict( |
|
|
load_file(file_path) |
|
|
) |
|
|
|
|
|
|
|
|
pipe = AnimateDiffPipeline.from_pretrained( |
|
|
"emilianJR/epiCRealism", |
|
|
motion_adapter=adapter, |
|
|
torch_dtype=dtype |
|
|
) |
|
|
|
|
|
|
|
|
pipe.scheduler = EulerDiscreteScheduler.from_config( |
|
|
pipe.scheduler.config, |
|
|
timestep_spacing="trailing", |
|
|
beta_schedule="linear" |
|
|
) |
|
|
|
|
|
|
|
|
pipe.to(device) |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
@spaces.GPU(duration=60) |
|
|
def generate_video(prompt, negative_prompt): |
|
|
return run_inference(prompt, negative_prompt) |
|
|
except Exception: |
|
|
|
|
|
def generate_video(prompt, negative_prompt): |
|
|
return run_inference(prompt, negative_prompt) |
|
|
|
|
|
def run_inference(prompt, negative_prompt): |
|
|
print(f"Generating video for: {prompt}") |
|
|
|
|
|
output = pipe( |
|
|
prompt=prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
num_inference_steps=4, |
|
|
guidance_scale=1.5, |
|
|
num_frames=16, |
|
|
) |
|
|
|
|
|
frames = output.frames[0] |
|
|
output_path = "output.mp4" |
|
|
export_to_video(frames, output_path) |
|
|
return output_path |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# ⚡ AnimateDiff Lightning") |
|
|
gr.Markdown("If this is running on CPU, it will take about 3-5 minutes per video.") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
prompt_input = gr.Textbox(label="Prompt", lines=3) |
|
|
neg_prompt_input = gr.Textbox(label="Negative Prompt", value="bad quality, deformed", lines=2) |
|
|
generate_btn = gr.Button("Generate Video") |
|
|
|
|
|
with gr.Column(): |
|
|
video_output = gr.Video(label="Generated Result") |
|
|
|
|
|
generate_btn.click( |
|
|
fn=generate_video, |
|
|
inputs=[prompt_input, neg_prompt_input], |
|
|
outputs=video_output |
|
|
) |
|
|
|
|
|
demo.launch() |
|
|
|
|
|
|
|
|
|