File size: 3,207 Bytes
76e1f83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import torch
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_video
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import spaces
import os

# 1. Hardware Detection (The Fix)
# This checks if you have a GPU. If not, it switches to CPU mode (float32).
if torch.cuda.is_available():
    device = "cuda"
    dtype = torch.float16
    print("✅ GPU detected: Running in fast mode (float16)")
else:
    device = "cpu"
    dtype = torch.float32
    print("⚠️ No GPU detected: Running in slow mode (float32)")

# 2. Load the Model Components
print("Loading AnimateDiff-Lightning...")

# STEP A: Load the standard adapter
adapter = MotionAdapter.from_pretrained(
    "guoyww/animatediff-motion-adapter-v1-5-2",
    torch_dtype=dtype  # Use the detected smart type
)

# STEP B: Download the Lightning weights
print("Downloading Lightning weights...")
file_path = hf_hub_download(
    repo_id="ByteDance/AnimateDiff-Lightning",
    filename="animatediff_lightning_4step_diffusers.safetensors"
)

# STEP C: Apply the Lightning update
adapter.load_state_dict(
    load_file(file_path)
)

# STEP D: Load the base model
pipe = AnimateDiffPipeline.from_pretrained(
    "emilianJR/epiCRealism",
    motion_adapter=adapter,
    torch_dtype=dtype  # Use the detected smart type
)

# Set up the scheduler
pipe.scheduler = EulerDiscreteScheduler.from_config(
    pipe.scheduler.config, 
    timestep_spacing="trailing", 
    beta_schedule="linear"
)

# Move to the detected device
pipe.to(device)

# 3. Define the Generation Function
# We wrap this in a try-except block to handle the @spaces decorator gracefully
try:
    @spaces.GPU(duration=60)
    def generate_video(prompt, negative_prompt):
        return run_inference(prompt, negative_prompt)
except Exception:
    # If @spaces fails (because we are on CPU), just run the function normally
    def generate_video(prompt, negative_prompt):
        return run_inference(prompt, negative_prompt)

def run_inference(prompt, negative_prompt):
    print(f"Generating video for: {prompt}")
    
    output = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        num_inference_steps=4, 
        guidance_scale=1.5,
        num_frames=16,
    )
    
    frames = output.frames[0]
    output_path = "output.mp4"
    export_to_video(frames, output_path)
    return output_path

# 4. Build the UI (Removed 'theme' to fix your second error)
with gr.Blocks() as demo:
    gr.Markdown("# ⚡ AnimateDiff Lightning")
    gr.Markdown("If this is running on CPU, it will take about 3-5 minutes per video.")
    
    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(label="Prompt", lines=3)
            neg_prompt_input = gr.Textbox(label="Negative Prompt", value="bad quality, deformed", lines=2)
            generate_btn = gr.Button("Generate Video")
        
        with gr.Column():
            video_output = gr.Video(label="Generated Result")

    generate_btn.click(
        fn=generate_video,
        inputs=[prompt_input, neg_prompt_input],
        outputs=video_output
    )

demo.launch()