Smart44 commited on
Commit
1631efd
·
verified ·
1 Parent(s): 51d91e3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -0
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
4
+ from diffusers.utils import export_to_video
5
+ import spaces
6
+ import os
7
+
8
+ # 1. Load the Model Components
9
+ print("Loading AnimateDiff-Lightning... this will be fast.")
10
+
11
+ # Load the motion adapter (the "video" part of the brain)
12
+ adapter = MotionAdapter.from_pretrained(
13
+ "ByteDance/AnimateDiff-Lightning-4step-T2V",
14
+ torch_dtype=torch.float16
15
+ )
16
+
17
+ # Load the base model (the "image" part of the brain)
18
+ # We use epiCRealism for high-quality realistic style
19
+ pipe = AnimateDiffPipeline.from_pretrained(
20
+ "emilianJR/epiCRealism",
21
+ motion_adapter=adapter,
22
+ torch_dtype=torch.float16
23
+ )
24
+
25
+ # Set up the scheduler specifically for Lightning (4-step generation)
26
+ pipe.scheduler = EulerDiscreteScheduler.from_config(
27
+ pipe.scheduler.config,
28
+ timestep_spacing="trailing",
29
+ beta_schedule="linear"
30
+ )
31
+
32
+ # Move to GPU immediately to speed up loading (ZeroGPU handles the swap)
33
+ device = "cuda" if torch.cuda.is_available() else "cpu"
34
+ pipe.to(device)
35
+
36
+ # 2. Define the Generation Function
37
+ # @spaces.GPU ensures you get a powerful GPU for this function
38
+ @spaces.GPU(duration=60)
39
+ def generate_video(prompt, negative_prompt):
40
+ print(f"Generating video for: {prompt}")
41
+
42
+ # Generate the video frames
43
+ output = pipe(
44
+ prompt=prompt,
45
+ negative_prompt=negative_prompt,
46
+ num_inference_steps=4, # Lightning needs only 4 steps!
47
+ guidance_scale=1.5, # Keep guidance low for Lightning
48
+ num_frames=16, # Standard length for AnimateDiff
49
+ )
50
+
51
+ frames = output.frames[0]
52
+
53
+ # Save to MP4
54
+ output_path = "output.mp4"
55
+ export_to_video(frames, output_path)
56
+
57
+ return output_path
58
+
59
+ # 3. Build the User Interface
60
+ with gr.Blocks(theme="soft") as demo:
61
+ gr.Markdown("# ⚡ AnimateDiff Lightning (Free & Fast)")
62
+ gr.Markdown("A truly free, open-source video generator using ByteDance's Lightning technology. fast generation.")
63
+
64
+ with gr.Row():
65
+ with gr.Column():
66
+ prompt_input = gr.Textbox(
67
+ label="Prompt",
68
+ placeholder="Close up portrait of a cyberpunk woman, neon city background, rainfall, 8k, realistic",
69
+ lines=3
70
+ )
71
+ neg_prompt_input = gr.Textbox(
72
+ label="Negative Prompt",
73
+ value="bad quality, worst quality, deformed, distorted, watermark",
74
+ lines=2
75
+ )
76
+ generate_btn = gr.Button("⚡ Generate Video", variant="primary")
77
+
78
+ with gr.Column():
79
+ video_output = gr.Video(label="Generated Result")
80
+
81
+ generate_btn.click(
82
+ fn=generate_video,
83
+ inputs=[prompt_input, neg_prompt_input],
84
+ outputs=video_output
85
+ )
86
+
87
+ # Launch
88
+ demo.launch()
89
+
90
+