Smart44 commited on
Commit
c95ff6d
·
verified ·
1 Parent(s): 46ce40f

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -104
app.py DELETED
@@ -1,104 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
4
- from diffusers.utils import export_to_video
5
- from huggingface_hub import hf_hub_download
6
- from safetensors.torch import load_file
7
- import spaces
8
- import os
9
-
10
- # 1. Load the Model Components
11
- print("Loading AnimateDiff-Lightning... this will be fast.")
12
-
13
- # STEP A: Load the standard adapter first (the "skeleton")
14
- adapter = MotionAdapter.from_pretrained(
15
- "guoyww/animatediff-motion-adapter-v1-5-2",
16
- torch_dtype=torch.float16
17
- )
18
-
19
- # STEP B: Download the "Lightning" speed update
20
- print("Downloading Lightning weights...")
21
- file_path = hf_hub_download(
22
- repo_id="ByteDance/AnimateDiff-Lightning",
23
- filename="animatediff_lightning_4step_diffusers.safetensors"
24
- )
25
-
26
- # STEP C: Apply the Lightning update to the adapter
27
- print("Applying Lightning weights...")
28
- adapter.load_state_dict(
29
- load_file(file_path)
30
- )
31
-
32
- # STEP D: Load the base model (Realism) with the lightning adapter
33
- pipe = AnimateDiffPipeline.from_pretrained(
34
- "emilianJR/epiCRealism",
35
- motion_adapter=adapter,
36
- torch_dtype=torch.float16
37
- )
38
-
39
- # Set up the scheduler specifically for Lightning (4-step generation)
40
- pipe.scheduler = EulerDiscreteScheduler.from_config(
41
- pipe.scheduler.config,
42
- timestep_spacing="trailing",
43
- beta_schedule="linear"
44
- )
45
-
46
- # Move to GPU immediately
47
- device = "cuda" if torch.cuda.is_available() else "cpu"
48
- pipe.to(device)
49
-
50
- # 2. Define the Generation Function
51
- # NOTE: If you are on the CPU Free Tier, DELETE the line below starting with @spaces
52
- @spaces.GPU(duration=60)
53
- def generate_video(prompt, negative_prompt):
54
- print(f"Generating video for: {prompt}")
55
-
56
- # Generate the video frames
57
- output = pipe(
58
- prompt=prompt,
59
- negative_prompt=negative_prompt,
60
- num_inference_steps=4, # Lightning needs only 4 steps!
61
- guidance_scale=1.5, # Keep guidance low for Lightning
62
- num_frames=16, # Standard length for AnimateDiff
63
- )
64
-
65
- frames = output.frames[0]
66
-
67
- # Save to MP4
68
- output_path = "output.mp4"
69
- export_to_video(frames, output_path)
70
-
71
- return output_path
72
-
73
- # 3. Build the User Interface
74
- with gr.Blocks(theme="soft") as demo:
75
- gr.Markdown("# ⚡ AnimateDiff Lightning (Free & Fast)")
76
- gr.Markdown("A truly free, open-source video generator.")
77
-
78
- with gr.Row():
79
- with gr.Column():
80
- prompt_input = gr.Textbox(
81
- label="Prompt",
82
- placeholder="Close up portrait of a cyberpunk woman, neon city background, rainfall, 8k, realistic",
83
- lines=3
84
- )
85
- neg_prompt_input = gr.Textbox(
86
- label="Negative Prompt",
87
- value="bad quality, worst quality, deformed, distorted, watermark",
88
- lines=2
89
- )
90
- generate_btn = gr.Button("⚡ Generate Video", variant="primary")
91
-
92
- with gr.Column():
93
- video_output = gr.Video(label="Generated Result")
94
-
95
- generate_btn.click(
96
- fn=generate_video,
97
- inputs=[prompt_input, neg_prompt_input],
98
- outputs=video_output
99
- )
100
-
101
- # Launch
102
- demo.launch()
103
-
104
-