johny6969 commited on
Commit
5ad05b1
·
verified ·
1 Parent(s): d3b426f

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +369 -0
app.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ I'll create a complete, working Gradio application based on the provided code for the Wan 2.2 I2V (Image-to-Video) generation model. This application allows users to upload an image and generate animated videos with AI.
2
+
3
+ ```python
4
+ import spaces
5
+ import torch
6
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
7
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
8
+ from diffusers.utils.export_utils import export_to_video
9
+ import gradio as gr
10
+ import tempfile
11
+ import numpy as np
12
+ from PIL import Image
13
+ import random
14
+ import gc
15
+
16
+ from torchao.quantization import quantize_
17
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
18
+ from torchao.quantization import Int8WeightOnlyConfig
19
+
20
+ import aoti
21
+
22
+
23
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
24
+
25
+ MAX_DIM = 832
26
+ MIN_DIM = 480
27
+ SQUARE_DIM = 640
28
+ MULTIPLE_OF = 16
29
+
30
+ MAX_SEED = np.iinfo(np.int32).max
31
+
32
+ FIXED_FPS = 16
33
+ MIN_FRAMES_MODEL = 8
34
+ MAX_FRAMES_MODEL = 80
35
+
36
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
37
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
38
+
39
+
40
+ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
41
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
42
+ subfolder='transformer',
43
+ torch_dtype=torch.bfloat16,
44
+ device_map='cuda',
45
+ ),
46
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
47
+ subfolder='transformer_2',
48
+ torch_dtype=torch.bfloat16,
49
+ device_map='cuda',
50
+ ),
51
+ torch_dtype=torch.bfloat16,
52
+ ).to('cuda')
53
+
54
+ pipe.load_lora_weights(
55
+ "Kijai/WanVideo_comfy",
56
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
57
+ adapter_name="lightx2v"
58
+ )
59
+ kwargs_lora = {}
60
+ kwargs_lora["load_into_transformer_2"] = True
61
+ pipe.load_lora_weights(
62
+ "Kijai/WanVideo_comfy",
63
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
64
+ adapter_name="lightx2v_2", **kwargs_lora
65
+ )
66
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
67
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
68
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
69
+ pipe.unload_lora_weights()
70
+
71
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
72
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
73
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
74
+
75
+ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
76
+ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
77
+
78
+
79
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
80
+ default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
81
+
82
+ def resize_image(image: Image.Image) -> Image.Image:
83
+ """
84
+ Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
85
+ """
86
+ width, height = image.size
87
+
88
+ # Handle square case
89
+ if width == height:
90
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
91
+
92
+ aspect_ratio = width / height
93
+
94
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
95
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
96
+
97
+ image_to_resize = image
98
+
99
+ if aspect_ratio > MAX_ASPECT_RATIO:
100
+ # Very wide image -> crop width to fit 832x480 aspect ratio
101
+ target_w, target_h = MAX_DIM, MIN_DIM
102
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
103
+ left = (width - crop_width) // 2
104
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
105
+ elif aspect_ratio < MIN_ASPECT_RATIO:
106
+ # Very tall image -> crop height to fit 480x832 aspect ratio
107
+ target_w, target_h = MIN_DIM, MAX_DIM
108
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
109
+ top = (height - crop_height) // 2
110
+ image_to_resize = image.crop((0, top, width, top + crop_height))
111
+ else:
112
+ if width > height: # Landscape
113
+ target_w = MAX_DIM
114
+ target_h = int(round(target_w / aspect_ratio))
115
+ else: # Portrait
116
+ target_h = MAX_DIM
117
+ target_w = int(round(target_h * aspect_ratio))
118
+
119
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
120
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
121
+
122
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
123
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
124
+
125
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
126
+
127
+
128
+ def get_num_frames(duration_seconds: float):
129
+ return 1 + int(np.clip(
130
+ int(round(duration_seconds * FIXED_FPS)),
131
+ MIN_FRAMES_MODEL,
132
+ MAX_FRAMES_MODEL,
133
+ ))
134
+
135
+
136
+ def get_duration(
137
+ input_image,
138
+ prompt,
139
+ steps,
140
+ negative_prompt,
141
+ duration_seconds,
142
+ guidance_scale,
143
+ guidance_scale_2,
144
+ seed,
145
+ randomize_seed,
146
+ progress,
147
+ ):
148
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
149
+ BASE_STEP_DURATION = 15
150
+ width, height = resize_image(input_image).size
151
+ frames = get_num_frames(duration_seconds)
152
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
153
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
154
+ return 10 + int(steps) * step_duration
155
+
156
+ @spaces.GPU(duration=get_duration)
157
+ def generate_video(
158
+ input_image,
159
+ prompt,
160
+ steps = 4,
161
+ negative_prompt=default_negative_prompt,
162
+ duration_seconds = MAX_DURATION,
163
+ guidance_scale = 1,
164
+ guidance_scale_2 = 1,
165
+ seed = 42,
166
+ randomize_seed = False,
167
+ progress=gr.Progress(track_tqdm=True),
168
+ ):
169
+ """
170
+ Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
171
+
172
+ This function takes an input image and generates a video animation based on the provided
173
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
174
+ for fast generation in 4-8 steps.
175
+
176
+ Args:
177
+ input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
178
+ prompt (str): Text prompt describing the desired animation or motion.
179
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
180
+ Defaults to 4. Range: 1-30.
181
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
182
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
183
+ duration_seconds (float, optional): Duration of the generated video in seconds.
184
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
185
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
186
+ Defaults to 1.0. Range: 0.0-20.0.
187
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
188
+ Defaults to 1.0. Range: 0.0-20.0.
189
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
190
+ Range: 0 to MAX_SEED (2147483647).
191
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
192
+ Defaults to False.
193
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
194
+
195
+ Returns:
196
+ tuple: A tuple containing:
197
+ - video_path (str): Path to the generated video file (.mp4)
198
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
199
+
200
+ Raises:
201
+ gr.Error: If input_image is None (no image uploaded).
202
+
203
+ Note:
204
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
205
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
206
+ - The function uses GPU acceleration via the @spaces.GPU decorator
207
+ - Generation time varies based on steps and duration (see get_duration function)
208
+ """
209
+ if input_image is None:
210
+ raise gr.Error("Please upload an input image.")
211
+
212
+ num_frames = get_num_frames(duration_seconds)
213
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
214
+ resized_image = resize_image(input_image)
215
+
216
+ output_frames_list = pipe(
217
+ image=resized_image,
218
+ prompt=prompt,
219
+ negative_prompt=negative_prompt,
220
+ height=resized_image.height,
221
+ width=resized_image.width,
222
+ num_frames=num_frames,
223
+ guidance_scale=float(guidance_scale),
224
+ guidance_scale_2=float(guidance_scale_2),
225
+ num_inference_steps=int(steps),
226
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
227
+ ).frames[0]
228
+
229
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
230
+ video_path = tmpfile.name
231
+
232
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
233
+
234
+ return video_path, current_seed
235
+
236
+ with gr.Blocks() as demo:
237
+ gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA")
238
+ gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
239
+ with gr.Row():
240
+ with gr.Column():
241
+ input_image_component = gr.Image(type="pil", label="Input Image")
242
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
243
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
244
+
245
+ with gr.Accordion("Advanced Settings", open=False):
246
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
247
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
248
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
249
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
250
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
251
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
252
+
253
+ generate_button = gr.Button("Generate Video", variant="primary")
254
+ with gr.Column():
255
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
256
+
257
+ ui_inputs = [
258
+ input_image_component, prompt_input, steps_slider,
259
+ negative_prompt_input, duration_seconds_input,
260
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
261
+ ]
262
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
263
+
264
+ gr.Examples(
265
+ examples=[
266
+ [
267
+ "wan_i2v_input.JPG",
268
+ "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat's face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
269
+ 4,
270
+ ],
271
+ [
272
+ "wan22_input_2.jpg",
273
+ "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
274
+ 4,
275
+ ],
276
+ [
277
+ "kill_bill.jpeg",
278
+ "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented.",
279
+ 6,
280
+ ],
281
+ ],
282
+ inputs=[input_image_component, prompt_input, steps_slider], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
283
+ )
284
+
285
+ if __name__ == "__main__":
286
+ demo.queue().launch(mcp_server=True)
287
+ ```
288
+
289
+ ```
290
+ git+https://github.com/huggingface/diffusers.git
291
+ transformers
292
+ accelerate
293
+ safetensors
294
+ sentencepiece
295
+ peft
296
+ ftfy
297
+ imageio-ffmpeg
298
+ opencv-python
299
+ torchao==0.11.0
300
+ gradio[mcp]
301
+ ```
302
+
303
+ ```
304
+ from typing import cast
305
+
306
+ import torch
307
+ from huggingface_hub import hf_hub_download
308
+ from spaces.zero.torch.aoti import ZeroGPUCompiledModel
309
+ from spaces.zero.torch.aoti import ZeroGPUWeights
310
+ from torch._functorch._aot_autograd.subclass_parametrization import unwrap_tensor_subclass_parameters
311
+
312
+
313
+ def _shallow_clone_module(module: torch.nn.Module) -> torch.nn.Module:
314
+ clone = object.__new__(module.__class__)
315
+ clone.__dict__ = module.__dict__.copy()
316
+ clone._parameters = module._parameters.copy()
317
+ clone._buffers = module._buffers.copy()
318
+ clone._modules = {k: _shallow_clone_module(v) for k, v in module._modules.items() if v is not None}
319
+ return clone
320
+
321
+
322
+ def aoti_blocks_load(module: torch.nn.Module, repo_id: str, variant: str | None = None):
323
+ repeated_blocks = cast(list[str], module._repeated_blocks)
324
+ aoti_files = {name: hf_hub_download(
325
+ repo_id=repo_id,
326
+ filename='package.pt2',
327
+ subfolder=name if variant is None else f'{name}.{variant}',
328
+ ) for name in repeated_blocks}
329
+ for block_name, aoti_file in aoti_files.items():
330
+ for block in module.modules():
331
+ if block.__class__.__name__ == block_name:
332
+ block_ = _shallow_clone_module(block)
333
+ unwrap_tensor_subclass_parameters(block_)
334
+ weights = ZeroGPUWeights(block_.state_dict())
335
+ block.forward = ZeroGPUCompiledModel(aoti_file, weights)
336
+ ```
337
+
338
+ This is a sophisticated Gradio application that implements the Wan 2.2 Image-to-Video generation model with several advanced optimizations:
339
+
340
+ ## Key Features:
341
+
342
+ 1. **Model & Optimizations**:
343
+ - Uses Wan 2.2 14B I2V model with Lightning LoRA for fast 4-8 step generation
344
+ - FP8 quantization for memory efficiency
345
+ - AoT (Ahead-of-Time) compilation for performance
346
+ - ZeroGPU compatibility for efficient resource usage
347
+
348
+ 2. **User Interface**:
349
+ - Clean, intuitive layout with input image upload
350
+ - Prompt input with default suggestion
351
+ - Duration control slider (0.5-5 seconds)
352
+ - Advanced settings accordion for fine-tuning
353
+ - Real-time video output with autoplay
354
+
355
+ 3. **Advanced Controls**:
356
+ - Negative prompt for quality control
357
+ - Seed control with randomization option
358
+ - Inference steps slider (1-30)
359
+ - Dual guidance scales for different noise stages
360
+ - Example prompts included
361
+
362
+ 4. **Technical Features**:
363
+ - Automatic image resizing to model constraints
364
+ - Frame count calculation based on duration
365
+ - Progress tracking with tqdm integration
366
+ - MCP server enabled for tool compatibility
367
+ - Lazy caching for examples
368
+
369
+ The application efficiently transforms static images into animated videos using state-of-the-art AI technology, with optimizations that make it accessible on various hardware configurations through ZeroGPU.