File size: 2,823 Bytes
1631efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import torch
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_video
import spaces
import os

# 1. Load the Model Components
print("Loading AnimateDiff-Lightning... this will be fast.")

# Load the motion adapter (the "video" part of the brain)
adapter = MotionAdapter.from_pretrained(
    "ByteDance/AnimateDiff-Lightning-4step-T2V",
    torch_dtype=torch.float16
)

# Load the base model (the "image" part of the brain)
# We use epiCRealism for high-quality realistic style
pipe = AnimateDiffPipeline.from_pretrained(
    "emilianJR/epiCRealism",
    motion_adapter=adapter,
    torch_dtype=torch.float16
)

# Set up the scheduler specifically for Lightning (4-step generation)
pipe.scheduler = EulerDiscreteScheduler.from_config(
    pipe.scheduler.config, 
    timestep_spacing="trailing", 
    beta_schedule="linear"
)

# Move to GPU immediately to speed up loading (ZeroGPU handles the swap)
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe.to(device)

# 2. Define the Generation Function
# @spaces.GPU ensures you get a powerful GPU for this function
@spaces.GPU(duration=60)
def generate_video(prompt, negative_prompt):
    print(f"Generating video for: {prompt}")
    
    # Generate the video frames
    output = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        num_inference_steps=4,  # Lightning needs only 4 steps!
        guidance_scale=1.5,     # Keep guidance low for Lightning
        num_frames=16,          # Standard length for AnimateDiff
    )
    
    frames = output.frames[0]
    
    # Save to MP4
    output_path = "output.mp4"
    export_to_video(frames, output_path)
    
    return output_path

# 3. Build the User Interface
with gr.Blocks(theme="soft") as demo:
    gr.Markdown("# ⚡ AnimateDiff Lightning (Free & Fast)")
    gr.Markdown("A truly free, open-source video generator using ByteDance's Lightning technology. fast generation.")
    
    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(
                label="Prompt", 
                placeholder="Close up portrait of a cyberpunk woman, neon city background, rainfall, 8k, realistic",
                lines=3
            )
            neg_prompt_input = gr.Textbox(
                label="Negative Prompt", 
                value="bad quality, worst quality, deformed, distorted, watermark",
                lines=2
            )
            generate_btn = gr.Button("⚡ Generate Video", variant="primary")
        
        with gr.Column():
            video_output = gr.Video(label="Generated Result")

    generate_btn.click(
        fn=generate_video,
        inputs=[prompt_input, neg_prompt_input],
        outputs=video_output
    )

# Launch
demo.launch()