GiorgioV commited on
Commit
4967707
·
verified ·
1 Parent(s): 0a6c372

Upload example.py

Browse files
Files changed (1) hide show
  1. example.py +294 -0
example.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Models
4
+ Datasets
5
+ Spaces
6
+ Community
7
+ Docs
8
+ Enterprise
9
+ Pricing
10
+
11
+
12
+ Spaces:
13
+ dream2589632147
14
+ /
15
+ Dream-wan2-2-faster-Pro
16
+
17
+
18
+ like
19
+ 232
20
+ App
21
+ Files
22
+ Community
23
+ 2
24
+ Dream-wan2-2-faster-Pro
25
+ /
26
+ app.py
27
+
28
+ dream2589632147's picture
29
+ dream2589632147
30
+ Update app.py
31
+ 5b9c736
32
+ verified
33
+ 5 days ago
34
+ raw
35
+
36
+ Copy download link
37
+ history
38
+ blame
39
+ contribute
40
+ delete
41
+
42
+ 10.2 kB
43
+ import os
44
+ import spaces
45
+ import torch
46
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
47
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
48
+ from diffusers.utils.export_utils import export_to_video
49
+ import gradio as gr
50
+ import tempfile
51
+ import numpy as np
52
+ from PIL import Image
53
+ import random
54
+ import gc
55
+
56
+ from torchao.quantization import quantize_
57
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
58
+ import aoti
59
+
60
+ # =========================================================
61
+ # MODEL CONFIGURATION
62
+ # =========================================================
63
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers" # المسار الجديد للنموذج
64
+ HF_TOKEN = os.environ.get("HF_TOKEN") # ضع توكن Hugging Face هنا إذا كان النموذج خاصًا
65
+
66
+ MAX_DIM = 832
67
+ MIN_DIM = 480
68
+ SQUARE_DIM = 640
69
+ MULTIPLE_OF = 16
70
+
71
+ MAX_SEED = np.iinfo(np.int32).max
72
+
73
+ FIXED_FPS = 16
74
+ MIN_FRAMES_MODEL = 8
75
+ MAX_FRAMES_MODEL = 7720
76
+
77
+ MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
78
+ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
79
+
80
+ # =========================================================
81
+ # LOAD PIPELINE
82
+ # =========================================================
83
+ pipe = WanImageToVideoPipeline.from_pretrained(
84
+ MODEL_ID,
85
+ transformer=WanTransformer3DModel.from_pretrained(
86
+ MODEL_ID,
87
+ subfolder="transformer",
88
+ torch_dtype=torch.bfloat16,
89
+ device_map="cuda",
90
+ token=HF_TOKEN
91
+ ),
92
+ transformer_2=WanTransformer3DModel.from_pretrained(
93
+ MODEL_ID,
94
+ subfolder="transformer_2",
95
+ torch_dtype=torch.bfloat16,
96
+ device_map="cuda",
97
+ token=HF_TOKEN
98
+ ),
99
+ torch_dtype=torch.bfloat16,
100
+ ).to("cuda")
101
+
102
+ # =========================================================
103
+ # LOAD LORA ADAPTERS
104
+ # =========================================================
105
+ pipe.load_lora_weights(
106
+ "Kijai/WanVideo_comfy",
107
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
108
+ adapter_name="lightx2v"
109
+ )
110
+ pipe.load_lora_weights(
111
+ "Kijai/WanVideo_comfy",
112
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
113
+ adapter_name="lightx2v_2",
114
+ load_into_transformer_2=True
115
+ )
116
+
117
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
118
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
119
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
120
+ pipe.unload_lora_weights()
121
+
122
+ # =========================================================
123
+ # QUANTIZATION & AOT OPTIMIZATION
124
+ # =========================================================
125
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
126
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
127
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
128
+
129
+ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
130
+ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
131
+
132
+ # =========================================================
133
+ # DEFAULT PROMPTS
134
+ # =========================================================
135
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
136
+ default_negative_prompt = (
137
+ "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, "
138
+ "最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, "
139
+ "畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
140
+ )
141
+
142
+ # =========================================================
143
+ # IMAGE RESIZING LOGIC
144
+ # =========================================================
145
+ def resize_image(image: Image.Image) -> Image.Image:
146
+ width, height = image.size
147
+ if width == height:
148
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
149
+
150
+ aspect_ratio = width / height
151
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
152
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
153
+
154
+ image_to_resize = image
155
+
156
+ if aspect_ratio > MAX_ASPECT_RATIO:
157
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
158
+ left = (width - crop_width) // 2
159
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
160
+ elif aspect_ratio < MIN_ASPECT_RATIO:
161
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
162
+ top = (height - crop_height) // 2
163
+ image_to_resize = image.crop((0, top, width, top + crop_height))
164
+
165
+ if width > height:
166
+ target_w = MAX_DIM
167
+ target_h = int(round(target_w / aspect_ratio))
168
+ else:
169
+ target_h = MAX_DIM
170
+ target_w = int(round(target_h * aspect_ratio))
171
+
172
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
173
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
174
+
175
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
176
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
177
+
178
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
179
+
180
+ # =========================================================
181
+ # UTILITY FUNCTIONS
182
+ # =========================================================
183
+ def get_num_frames(duration_seconds: float):
184
+ return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
185
+
186
+ def get_duration(
187
+ input_image, prompt, steps, negative_prompt,
188
+ duration_seconds, guidance_scale, guidance_scale_2,
189
+ seed, randomize_seed, progress,
190
+ ):
191
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
192
+ BASE_STEP_DURATION = 15
193
+ width, height = resize_image(input_image).size
194
+ frames = get_num_frames(duration_seconds)
195
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
196
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
197
+ return 10 + int(steps) * step_duration
198
+
199
+ # =========================================================
200
+ # MAIN GENERATION FUNCTION
201
+ # =========================================================
202
+ @spaces.GPU(duration=get_duration)
203
+ def generate_video(
204
+ input_image,
205
+ prompt,
206
+ steps=4,
207
+ negative_prompt=default_negative_prompt,
208
+ duration_seconds=MAX_DURATION,
209
+ guidance_scale=1,
210
+ guidance_scale_2=1,
211
+ seed=42,
212
+ randomize_seed=False,
213
+ progress=gr.Progress(track_tqdm=True),
214
+ ):
215
+ if input_image is None:
216
+ raise gr.Error("Please upload an input image.")
217
+
218
+ num_frames = get_num_frames(duration_seconds)
219
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
220
+ resized_image = resize_image(input_image)
221
+
222
+ output_frames_list = pipe(
223
+ image=resized_image,
224
+ prompt=prompt,
225
+ negative_prompt=negative_prompt,
226
+ height=resized_image.height,
227
+ width=resized_image.width,
228
+ num_frames=num_frames,
229
+ guidance_scale=float(guidance_scale),
230
+ guidance_scale_2=float(guidance_scale_2),
231
+ num_inference_steps=int(steps),
232
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
233
+ ).frames[0]
234
+
235
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
236
+ video_path = tmpfile.name
237
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
238
+ return video_path, current_seed
239
+
240
+ # =========================================================
241
+ # GRADIO UI
242
+ # =========================================================
243
+ with gr.Blocks() as demo:
244
+ gr.Markdown("# 🚀 Dream Wan 2.2 Faster Pro (14B) — Ultra Fast I2V with Lightning LoRA")
245
+ gr.Markdown("Optimized FP8 quantized pipeline with AoT blocks & 4-step fast inference ⚡")
246
+
247
+ with gr.Row():
248
+ with gr.Column():
249
+ input_image_component = gr.Image(type="pil", label="Input Image")
250
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
251
+ duration_seconds_input = gr.Slider(
252
+ minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5,
253
+ label="Duration (seconds)",
254
+ info=f"Model range: {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
255
+ )
256
+
257
+ with gr.Accordion("Advanced Settings", open=False):
258
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
259
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
260
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
261
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
262
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale (high noise)")
263
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 (low noise)")
264
+
265
+ generate_button = gr.Button("🎬 Generate Video", variant="primary")
266
+
267
+ with gr.Column():
268
+ video_output = gr.Video(label="Generated Video", autoplay=True)
269
+
270
+ ui_inputs = [
271
+ input_image_component, prompt_input, steps_slider,
272
+ negative_prompt_input, duration_seconds_input,
273
+ guidance_scale_input, guidance_scale_2_input,
274
+ seed_input, randomize_seed_checkbox
275
+ ]
276
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
277
+
278
+ gr.Examples(
279
+ examples=[
280
+ [
281
+ "wan_i2v_input.JPG",
282
+ "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
283
+ 4,
284
+ ],
285
+ ],
286
+ inputs=[input_image_component, prompt_input, steps_slider],
287
+ outputs=[video_output, seed_input],
288
+ fn=generate_video,
289
+ cache_examples="lazy"
290
+ )
291
+
292
+ if __name__ == "__main__":
293
+ demo.queue().launch(mcp_server=True)
294
+