GiorgioV commited on
Commit
6229826
·
verified ·
1 Parent(s): 05ecb51

Upload app-7.py

Browse files
Files changed (1) hide show
  1. app-7.py +337 -0
app-7.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from transformers import (
4
+ AutoTokenizer,
5
+ AutoImageProcessor,
6
+ T5EncoderModel,
7
+ )
8
+
9
+ from diffusers import (
10
+ WanImageToVideoPipeline,
11
+ WanTransformer3DModel,
12
+ AutoencoderKL,
13
+ EulerDiscreteScheduler,
14
+ )
15
+ import gradio as gr
16
+ import tempfile
17
+ import numpy as np
18
+ from PIL import Image
19
+ import random
20
+ import gc
21
+
22
+ from diffusers.utils.export_utils import export_to_video
23
+ from huggingface_hub import hf_hub_download
24
+ from safetensors.torch import load_file as safetensors_load
25
+
26
+ from torchao.quantization import quantize_
27
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
28
+ from torchao.quantization import Int8WeightOnlyConfig
29
+
30
+ import aoti
31
+
32
+
33
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
34
+
35
+ MAX_DIM = 768
36
+ MIN_DIM = 448
37
+ SQUARE_DIM = 576
38
+ MULTIPLE_OF = 16
39
+
40
+ MAX_SEED = np.iinfo(np.int32).max
41
+
42
+ FIXED_FPS = 16
43
+ MIN_FRAMES_MODEL = 8
44
+ MAX_FRAMES_MODEL = 80
45
+
46
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
47
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
48
+
49
+ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
50
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
51
+ subfolder='transformer',
52
+ torch_dtype=torch.bfloat16,
53
+ device_map='cuda',
54
+ ),
55
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
56
+ subfolder='transformer_2',
57
+ torch_dtype=torch.bfloat16,
58
+ device_map='cuda',
59
+ ),
60
+ torch_dtype=torch.bfloat16,
61
+ ).to('cuda')
62
+
63
+ pipe.load_lora_weights(
64
+ "Kijai/WanVideo_comfy",
65
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
66
+ adapter_name="lightx2v"
67
+ )
68
+
69
+ kwargs_lora_h = {}
70
+ kwargs_lora_h["load_into_transformer"] = True
71
+ pipe.load_lora_weights(
72
+ "GiorgioV/LoRA_for_WAN_22",
73
+ weight_name="I2V_14B_HIGH.safetensors",
74
+ adapter_name="lora_h", **kwargs_lora_h
75
+ )
76
+
77
+ kwargs_lora_gh = {}
78
+ kwargs_lora_gh["load_into_transformer"] = True
79
+ pipe.load_lora_weights(
80
+ "GiorgioV/LoRA_for_WAN_22",
81
+ weight_name="Wan2.2 - I2V - GH - HIGH 14B.safetensors",
82
+ adapter_name="lora_gh", **kwargs_lora_gh
83
+ )
84
+
85
+ kwargs_lora = {}
86
+ kwargs_lora["load_into_transformer_2"] = True
87
+ pipe.load_lora_weights(
88
+ "Kijai/WanVideo_comfy",
89
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
90
+ adapter_name="lightx2v_2", **kwargs_lora
91
+ )
92
+
93
+ kwargs_lora_l = {}
94
+ kwargs_lora_l["load_into_transformer_2"] = True
95
+ pipe.load_lora_weights(
96
+ "GiorgioV/LoRA_for_WAN_22",
97
+ weight_name="I2V_14B_LOW.safetensors",
98
+ adapter_name="lora_l", **kwargs_lora_l
99
+ )
100
+
101
+ kwargs_lora_gl = {}
102
+ kwargs_lora_gl["load_into_transformer_2"] = True
103
+ pipe.load_lora_weights(
104
+ "GiorgioV/LoRA_for_WAN_22",
105
+ weight_name="Wan2.2 - I2V - GH - LOW 14B.safetensors",
106
+ adapter_name="lora_gl", **kwargs_lora_gl
107
+ )
108
+
109
+ pipe.set_adapters(["lightx2v", "lora_h", "lora_gh", "lightx2v_2", "lora_l", "lora_gl"], adapter_weights=[1., 1., 1., 1., 1., 1.])
110
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
111
+ pipe.fuse_lora(adapter_names=["lora_h"], lora_scale=0.3, components=["transformer"])
112
+ pipe.fuse_lora(adapter_names=["lora_gh"], lora_scale=0.3, components=["transformer"])
113
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
114
+ pipe.fuse_lora(adapter_names=["lora_l"], lora_scale=1., components=["transformer_2"])
115
+ pipe.fuse_lora(adapter_names=["lora_gl"], lora_scale=0.8, components=["transformer_2"])
116
+ pipe.unload_lora_weights()
117
+
118
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
119
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
120
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
121
+
122
+ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
123
+ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
124
+
125
+
126
+
127
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
128
+ default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
129
+
130
+ def resize_image(image: Image.Image) -> Image.Image:
131
+ """
132
+ Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
133
+ """
134
+ width, height = image.size
135
+
136
+ # Handle square case
137
+ if width == height:
138
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
139
+
140
+ aspect_ratio = width / height
141
+
142
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
143
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
144
+
145
+ image_to_resize = image
146
+
147
+ if aspect_ratio > MAX_ASPECT_RATIO:
148
+ # Very wide image -> crop width to fit 832x480 aspect ratio
149
+ target_w, target_h = MAX_DIM, MIN_DIM
150
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
151
+ left = (width - crop_width) // 2
152
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
153
+ elif aspect_ratio < MIN_ASPECT_RATIO:
154
+ # Very tall image -> crop height to fit 480x832 aspect ratio
155
+ target_w, target_h = MIN_DIM, MAX_DIM
156
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
157
+ top = (height - crop_height) // 2
158
+ image_to_resize = image.crop((0, top, width, top + crop_height))
159
+ else:
160
+ if width > height: # Landscape
161
+ target_w = MAX_DIM
162
+ target_h = int(round(target_w / aspect_ratio))
163
+ else: # Portrait
164
+ target_h = MAX_DIM
165
+ target_w = int(round(target_h * aspect_ratio))
166
+
167
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
168
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
169
+
170
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
171
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
172
+
173
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
174
+
175
+
176
+ def get_num_frames(duration_seconds: float):
177
+ return 1 + int(np.clip(
178
+ int(round(duration_seconds * FIXED_FPS)),
179
+ MIN_FRAMES_MODEL,
180
+ MAX_FRAMES_MODEL,
181
+ ))
182
+
183
+
184
+ def get_duration(
185
+ input_image,
186
+ prompt,
187
+ steps,
188
+ negative_prompt,
189
+ duration_seconds,
190
+ guidance_scale,
191
+ guidance_scale_2,
192
+ seed,
193
+ randomize_seed,
194
+ progress
195
+ ):
196
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
197
+ BASE_STEP_DURATION = 12
198
+
199
+ width, height = resize_image(input_image).size
200
+ frames = get_num_frames(duration_seconds)
201
+
202
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
203
+ step_duration = BASE_STEP_DURATION * factor
204
+
205
+ return 10 + int(steps) * step_duration
206
+
207
+ @spaces.GPU(duration=get_duration)
208
+ def generate_video(
209
+ input_image,
210
+ prompt,
211
+ steps = 4,
212
+ negative_prompt=default_negative_prompt,
213
+ duration_seconds = MAX_DURATION,
214
+ guidance_scale = 1,
215
+ guidance_scale_2 = 1,
216
+ seed = 42,
217
+ randomize_seed = False,
218
+ progress=gr.Progress(track_tqdm=True)
219
+ ):
220
+ """
221
+ Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
222
+
223
+ This function takes an input image and generates a video animation based on the provided
224
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
225
+ for fast generation in 4-8 steps.
226
+
227
+ Args:
228
+ input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
229
+ prompt (str): Text prompt describing the desired animation or motion.
230
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
231
+ Defaults to 4. Range: 1-30.
232
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
233
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
234
+ duration_seconds (float, optional): Duration of the generated video in seconds.
235
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
236
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
237
+ Defaults to 1.0. Range: 0.0-20.0.
238
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
239
+ Defaults to 1.0. Range: 0.0-20.0.
240
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
241
+ Range: 0 to MAX_SEED (2147483647).
242
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
243
+ Defaults to False.
244
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
245
+
246
+ Returns:
247
+ tuple: A tuple containing:
248
+ - video_path (str): Path to the generated video file (.mp4)
249
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
250
+
251
+ Raises:
252
+ gr.Error: If input_image is None (no image uploaded).
253
+
254
+ Note:
255
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
256
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
257
+ - The function uses GPU acceleration via the @spaces.GPU decorator
258
+ - Generation time varies based on steps and duration (see get_duration function)
259
+ """
260
+ if input_image is None:
261
+ raise gr.Error("Please upload an input image.")
262
+
263
+ num_frames = get_num_frames(duration_seconds)
264
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
265
+ resized_image = resize_image(input_image)
266
+
267
+ output_frames_list = pipe(
268
+ image=resized_image,
269
+ prompt=prompt,
270
+ negative_prompt=negative_prompt,
271
+ height=resized_image.height,
272
+ width=resized_image.width,
273
+ num_frames=num_frames,
274
+ guidance_scale=float(guidance_scale),
275
+ guidance_scale_2=float(guidance_scale_2),
276
+ num_inference_steps=int(steps),
277
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
278
+ ).frames[0]
279
+
280
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
281
+ video_path = tmpfile.name
282
+
283
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
284
+
285
+ return video_path, current_seed
286
+
287
+ with gr.Blocks() as demo:
288
+ gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA")
289
+ gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
290
+ with gr.Row():
291
+ with gr.Column():
292
+ input_image_component = gr.Image(type="pil", label="Input Image")
293
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
294
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=5.0, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
295
+
296
+ with gr.Accordion("Advanced Settings", open=False):
297
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
298
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
299
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
300
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
301
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
302
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
303
+
304
+ generate_button = gr.Button("Generate Video", variant="primary")
305
+ with gr.Column():
306
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
307
+
308
+ ui_inputs = [
309
+ input_image_component, prompt_input, steps_slider,
310
+ negative_prompt_input, duration_seconds_input,
311
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
312
+ ]
313
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
314
+
315
+ gr.Examples(
316
+ examples=[
317
+ [
318
+ "wan_i2v_input.JPG",
319
+ "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
320
+ 4,
321
+ ],
322
+ [
323
+ "wan22_input_2.jpg",
324
+ "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
325
+ 4,
326
+ ],
327
+ [
328
+ "kill_bill.jpeg",
329
+ "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented",
330
+ 6,
331
+ ],
332
+ ],
333
+ inputs=[input_image_component, prompt_input, steps_slider], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
334
+ )
335
+
336
+ if __name__ == "__main__":
337
+ demo.queue().launch(mcp_server=True)