Smart44 commited on
Commit
76e1f83
·
verified ·
1 Parent(s): c95ff6d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -0
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
4
+ from diffusers.utils import export_to_video
5
+ from huggingface_hub import hf_hub_download
6
+ from safetensors.torch import load_file
7
+ import spaces
8
+ import os
9
+
10
+ # 1. Hardware Detection (The Fix)
11
+ # This checks if you have a GPU. If not, it switches to CPU mode (float32).
12
+ if torch.cuda.is_available():
13
+ device = "cuda"
14
+ dtype = torch.float16
15
+ print("✅ GPU detected: Running in fast mode (float16)")
16
+ else:
17
+ device = "cpu"
18
+ dtype = torch.float32
19
+ print("⚠️ No GPU detected: Running in slow mode (float32)")
20
+
21
+ # 2. Load the Model Components
22
+ print("Loading AnimateDiff-Lightning...")
23
+
24
+ # STEP A: Load the standard adapter
25
+ adapter = MotionAdapter.from_pretrained(
26
+ "guoyww/animatediff-motion-adapter-v1-5-2",
27
+ torch_dtype=dtype # Use the detected smart type
28
+ )
29
+
30
+ # STEP B: Download the Lightning weights
31
+ print("Downloading Lightning weights...")
32
+ file_path = hf_hub_download(
33
+ repo_id="ByteDance/AnimateDiff-Lightning",
34
+ filename="animatediff_lightning_4step_diffusers.safetensors"
35
+ )
36
+
37
+ # STEP C: Apply the Lightning update
38
+ adapter.load_state_dict(
39
+ load_file(file_path)
40
+ )
41
+
42
+ # STEP D: Load the base model
43
+ pipe = AnimateDiffPipeline.from_pretrained(
44
+ "emilianJR/epiCRealism",
45
+ motion_adapter=adapter,
46
+ torch_dtype=dtype # Use the detected smart type
47
+ )
48
+
49
+ # Set up the scheduler
50
+ pipe.scheduler = EulerDiscreteScheduler.from_config(
51
+ pipe.scheduler.config,
52
+ timestep_spacing="trailing",
53
+ beta_schedule="linear"
54
+ )
55
+
56
+ # Move to the detected device
57
+ pipe.to(device)
58
+
59
+ # 3. Define the Generation Function
60
+ # We wrap this in a try-except block to handle the @spaces decorator gracefully
61
+ try:
62
+ @spaces.GPU(duration=60)
63
+ def generate_video(prompt, negative_prompt):
64
+ return run_inference(prompt, negative_prompt)
65
+ except Exception:
66
+ # If @spaces fails (because we are on CPU), just run the function normally
67
+ def generate_video(prompt, negative_prompt):
68
+ return run_inference(prompt, negative_prompt)
69
+
70
+ def run_inference(prompt, negative_prompt):
71
+ print(f"Generating video for: {prompt}")
72
+
73
+ output = pipe(
74
+ prompt=prompt,
75
+ negative_prompt=negative_prompt,
76
+ num_inference_steps=4,
77
+ guidance_scale=1.5,
78
+ num_frames=16,
79
+ )
80
+
81
+ frames = output.frames[0]
82
+ output_path = "output.mp4"
83
+ export_to_video(frames, output_path)
84
+ return output_path
85
+
86
+ # 4. Build the UI (Removed 'theme' to fix your second error)
87
+ with gr.Blocks() as demo:
88
+ gr.Markdown("# ⚡ AnimateDiff Lightning")
89
+ gr.Markdown("If this is running on CPU, it will take about 3-5 minutes per video.")
90
+
91
+ with gr.Row():
92
+ with gr.Column():
93
+ prompt_input = gr.Textbox(label="Prompt", lines=3)
94
+ neg_prompt_input = gr.Textbox(label="Negative Prompt", value="bad quality, deformed", lines=2)
95
+ generate_btn = gr.Button("Generate Video")
96
+
97
+ with gr.Column():
98
+ video_output = gr.Video(label="Generated Result")
99
+
100
+ generate_btn.click(
101
+ fn=generate_video,
102
+ inputs=[prompt_input, neg_prompt_input],
103
+ outputs=video_output
104
+ )
105
+
106
+ demo.launch()
107
+
108
+