GiorgioV commited on
Commit
bd43c49
·
verified ·
1 Parent(s): 0dd7e99

Upload 3 files

Browse files
Files changed (2) hide show
  1. app-11.py +283 -0
  2. app_lora_cpu.py +501 -0
app-11.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
4
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
+ from diffusers.utils.export_utils import export_to_video
6
+ import gradio as gr
7
+ import tempfile
8
+ import numpy as np
9
+ from PIL import Image
10
+ import random
11
+ import gc
12
+
13
+ from torchao.quantization import quantize_
14
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
15
+ from torchao.quantization import Int8WeightOnlyConfig
16
+
17
+ import aoti
18
+
19
+
20
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
21
+
22
+ MAX_DIM = 832
23
+ MIN_DIM = 480
24
+ SQUARE_DIM = 640
25
+ MULTIPLE_OF = 16
26
+
27
+ MAX_SEED = np.iinfo(np.int32).max
28
+
29
+ FIXED_FPS = 16
30
+ MIN_FRAMES_MODEL = 8
31
+ MAX_FRAMES_MODEL = 80
32
+
33
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
34
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
35
+
36
+
37
+ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
38
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
39
+ subfolder='transformer',
40
+ torch_dtype=torch.bfloat16,
41
+ device_map='cuda',
42
+ ),
43
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
44
+ subfolder='transformer_2',
45
+ torch_dtype=torch.bfloat16,
46
+ device_map='cuda',
47
+ ),
48
+ torch_dtype=torch.bfloat16,
49
+ ).to('cuda')
50
+
51
+ pipe.load_lora_weights(
52
+ "Kijai/WanVideo_comfy",
53
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
54
+ adapter_name="lightx2v"
55
+ )
56
+ kwargs_lora = {}
57
+ kwargs_lora["load_into_transformer_2"] = True
58
+ pipe.load_lora_weights(
59
+ "Kijai/WanVideo_comfy",
60
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
61
+ adapter_name="lightx2v_2", **kwargs_lora
62
+ )
63
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
64
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
65
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
66
+ pipe.unload_lora_weights()
67
+
68
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
69
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
70
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
71
+
72
+ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
73
+ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
74
+
75
+
76
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
77
+ default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
78
+
79
+ def resize_image(image: Image.Image) -> Image.Image:
80
+ """
81
+ Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
82
+ """
83
+ width, height = image.size
84
+
85
+ # Handle square case
86
+ if width == height:
87
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
88
+
89
+ aspect_ratio = width / height
90
+
91
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
92
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
93
+
94
+ image_to_resize = image
95
+
96
+ if aspect_ratio > MAX_ASPECT_RATIO:
97
+ # Very wide image -> crop width to fit 832x480 aspect ratio
98
+ target_w, target_h = MAX_DIM, MIN_DIM
99
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
100
+ left = (width - crop_width) // 2
101
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
102
+ elif aspect_ratio < MIN_ASPECT_RATIO:
103
+ # Very tall image -> crop height to fit 480x832 aspect ratio
104
+ target_w, target_h = MIN_DIM, MAX_DIM
105
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
106
+ top = (height - crop_height) // 2
107
+ image_to_resize = image.crop((0, top, width, top + crop_height))
108
+ else:
109
+ if width > height: # Landscape
110
+ target_w = MAX_DIM
111
+ target_h = int(round(target_w / aspect_ratio))
112
+ else: # Portrait
113
+ target_h = MAX_DIM
114
+ target_w = int(round(target_h * aspect_ratio))
115
+
116
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
117
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
118
+
119
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
120
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
121
+
122
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
123
+
124
+
125
+ def get_num_frames(duration_seconds: float):
126
+ return 1 + int(np.clip(
127
+ int(round(duration_seconds * FIXED_FPS)),
128
+ MIN_FRAMES_MODEL,
129
+ MAX_FRAMES_MODEL,
130
+ ))
131
+
132
+
133
+ def get_duration(
134
+ input_image,
135
+ prompt,
136
+ steps,
137
+ negative_prompt,
138
+ duration_seconds,
139
+ guidance_scale,
140
+ guidance_scale_2,
141
+ seed,
142
+ randomize_seed,
143
+ progress,
144
+ ):
145
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
146
+ BASE_STEP_DURATION = 15
147
+ width, height = resize_image(input_image).size
148
+ frames = get_num_frames(duration_seconds)
149
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
150
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
151
+ return 10 + int(steps) * step_duration
152
+
153
+ @spaces.GPU(duration=get_duration)
154
+ def generate_video(
155
+ input_image,
156
+ prompt,
157
+ steps = 4,
158
+ negative_prompt=default_negative_prompt,
159
+ duration_seconds = MAX_DURATION,
160
+ guidance_scale = 1,
161
+ guidance_scale_2 = 1,
162
+ seed = 42,
163
+ randomize_seed = False,
164
+ progress=gr.Progress(track_tqdm=True),
165
+ ):
166
+ """
167
+ Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
168
+
169
+ This function takes an input image and generates a video animation based on the provided
170
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
171
+ for fast generation in 4-8 steps.
172
+
173
+ Args:
174
+ input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
175
+ prompt (str): Text prompt describing the desired animation or motion.
176
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
177
+ Defaults to 4. Range: 1-30.
178
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
179
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
180
+ duration_seconds (float, optional): Duration of the generated video in seconds.
181
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
182
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
183
+ Defaults to 1.0. Range: 0.0-20.0.
184
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
185
+ Defaults to 1.0. Range: 0.0-20.0.
186
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
187
+ Range: 0 to MAX_SEED (2147483647).
188
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
189
+ Defaults to False.
190
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
191
+
192
+ Returns:
193
+ tuple: A tuple containing:
194
+ - video_path (str): Path to the generated video file (.mp4)
195
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
196
+
197
+ Raises:
198
+ gr.Error: If input_image is None (no image uploaded).
199
+
200
+ Note:
201
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
202
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
203
+ - The function uses GPU acceleration via the @spaces.GPU decorator
204
+ - Generation time varies based on steps and duration (see get_duration function)
205
+ """
206
+ if input_image is None:
207
+ raise gr.Error("Please upload an input image.")
208
+
209
+ num_frames = get_num_frames(duration_seconds)
210
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
211
+ resized_image = resize_image(input_image)
212
+
213
+ output_frames_list = pipe(
214
+ image=resized_image,
215
+ prompt=prompt,
216
+ negative_prompt=negative_prompt,
217
+ height=resized_image.height,
218
+ width=resized_image.width,
219
+ num_frames=num_frames,
220
+ guidance_scale=float(guidance_scale),
221
+ guidance_scale_2=float(guidance_scale_2),
222
+ num_inference_steps=int(steps),
223
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
224
+ ).frames[0]
225
+
226
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
227
+ video_path = tmpfile.name
228
+
229
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
230
+
231
+ return video_path, current_seed
232
+
233
+ with gr.Blocks() as demo:
234
+ gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA")
235
+ gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
236
+ with gr.Row():
237
+ with gr.Column():
238
+ input_image_component = gr.Image(type="pil", label="Input Image")
239
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
240
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
241
+
242
+ with gr.Accordion("Advanced Settings", open=False):
243
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
244
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
245
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
246
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
247
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
248
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
249
+
250
+ generate_button = gr.Button("Generate Video", variant="primary")
251
+ with gr.Column():
252
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
253
+
254
+ ui_inputs = [
255
+ input_image_component, prompt_input, steps_slider,
256
+ negative_prompt_input, duration_seconds_input,
257
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
258
+ ]
259
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
260
+
261
+ gr.Examples(
262
+ examples=[
263
+ [
264
+ "wan_i2v_input.JPG",
265
+ "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
266
+ 4,
267
+ ],
268
+ [
269
+ "wan22_input_2.jpg",
270
+ "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
271
+ 4,
272
+ ],
273
+ [
274
+ "kill_bill.jpeg",
275
+ "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented.",
276
+ 6,
277
+ ],
278
+ ],
279
+ inputs=[input_image_component, prompt_input, steps_slider], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
280
+ )
281
+
282
+ if __name__ == "__main__":
283
+ demo.queue().launch(mcp_server=True)
app_lora_cpu.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
4
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
+ from diffusers.utils.export_utils import export_to_video
6
+ import gradio as gr
7
+ import tempfile
8
+ import numpy as np
9
+ from PIL import Image
10
+ import random
11
+ import gc
12
+ import os
13
+
14
+ from torchao.quantization import quantize_
15
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
16
+ from torchao.quantization import Int8WeightOnlyConfig
17
+
18
+ import aoti
19
+
20
+
21
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
22
+
23
+ MAX_DIM = 832
24
+ MIN_DIM = 480
25
+ SQUARE_DIM = 640
26
+ MULTIPLE_OF = 16
27
+
28
+ MAX_SEED = np.iinfo(np.int32).max
29
+
30
+ FIXED_FPS = 16
31
+ MIN_FRAMES_MODEL = 8
32
+ MAX_FRAMES_MODEL = 176#80
33
+
34
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
35
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
36
+
37
+
38
+ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
39
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
40
+ subfolder='transformer',
41
+ torch_dtype=torch.bfloat16,
42
+ device_map='cuda',
43
+ ),
44
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
45
+ subfolder='transformer_2',
46
+ torch_dtype=torch.bfloat16,
47
+ device_map='cuda',
48
+ ),
49
+ torch_dtype=torch.bfloat16,
50
+ ).to('cuda')
51
+
52
+ # 加载并融合你的LoRA模型
53
+ #weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
54
+ pipe.load_lora_weights(
55
+ "Kijai/WanVideo_comfy",
56
+ weight_name="LoRAs/Wan22_Lightx2v/Wan_2_2_I2V_A14B_HIGH_lightx2v_4step_lora_v1030_rank_64_bf16.safetensors",
57
+ adapter_name="lightx2v"
58
+ )
59
+ kwargs_lora = {}
60
+ kwargs_lora["load_into_transformer_2"] = True
61
+
62
+ #weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
63
+ pipe.load_lora_weights(
64
+ "Kijai/WanVideo_comfy",
65
+ weight_name="LoRAs/Wan22-Lightning/old/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors",
66
+ adapter_name="lightx2v_2", **kwargs_lora
67
+ )
68
+
69
+
70
+
71
+ # 新增:加载你提供的high noise LoRA
72
+
73
+ pipe.load_lora_weights(
74
+ "rahul7star/wan2.2Lora",
75
+ weight_name="DR34ML4Y_I2V_14B_HIGH.safetensors",
76
+ adapter_name="high_noise_lora",
77
+ token=os.environ.get("HF_TOKEN")
78
+ )
79
+ # 新增:加载你提供的low noise LoRA
80
+ pipe.load_lora_weights(
81
+ "rahul7star/wan2.2Lora",
82
+ weight_name="DR34ML4Y_I2V_14B_LOW.safetensors",
83
+ adapter_name="low_noise_lora",
84
+ token=os.environ.get("HF_TOKEN"),
85
+ load_into_transformer_2=True
86
+ )
87
+
88
+
89
+ ## 2 attempt
90
+ pipe.load_lora_weights(
91
+ "rahul7star/wan2.2Lora",
92
+ weight_name="wan2.2_i2v_highnoise_pov_missionary_v1.0.safetensors",
93
+ adapter_name="high_noise_lora1",
94
+ token=os.environ.get("HF_TOKEN")
95
+ )
96
+ # 新增:加载你提供的low noise LoRA
97
+ pipe.load_lora_weights(
98
+ "rahul7star/wan2.2Lora",
99
+ weight_name="wan2.2_i2v_lownoise_pov_missionary_v1.0.safetensors",
100
+ adapter_name="low_noise_lora1",
101
+ token=os.environ.get("HF_TOKEN"),
102
+ load_into_transformer_2=True
103
+ )
104
+
105
+
106
+
107
+ # pipe.set_adapters(["lightx2v", "lightx2v_2", "high_noise_lora", "low_noise_lora","high_noise_lora1", "low_noise_lora1","high_noise_lora2", "low_noise_lora2"], adapter_weights=[1., 1., 1., 1.,1.,1.,1.,1.])
108
+ # # 修改了lora_scale
109
+ # pipe.fuse_lora(adapter_names=["lightx2v", "high_noise_lora","high_noise_lora1","high_noise_lora2"], lora_scales=[3.0, 3.0,3.0,1.0], components=["transformer"])
110
+ # # 修改了lora_scale
111
+ # pipe.fuse_lora(adapter_names=["lightx2v_2", "low_noise_lora","low_noise_lora1","low_noise_lora2"], lora_scales=[1.0, 1.0,1.0,1.0], components=["transformer_2"])
112
+
113
+
114
+ ###### use this for 3rd Lora
115
+
116
+ # # ## 3rd
117
+ # pipe.load_lora_weights(
118
+ # "rahul7star/wan2.2Lora",
119
+ # weight_name="Wan2.2-Doggy_high_noise.safetensors",
120
+ # adapter_name="high_noise_lora2",
121
+ # token=os.environ.get("HF_TOKEN")
122
+ # )
123
+ # # 新增:加载你提供的low noise LoRA
124
+ # pipe.load_lora_weights(
125
+ # "rahul7star/wan2.2Lora",
126
+ # weight_name="Wan2.2-Doggy_low_noise.safetensors",
127
+ # adapter_name="low_noise_lora2",
128
+ # token=os.environ.get("HF_TOKEN"),
129
+ # load_into_transformer_2=True
130
+ # )
131
+
132
+
133
+ # pipe.set_adapters(["lightx2v", "lightx2v_2", "high_noise_lora", "low_noise_lora","high_noise_lora1", "low_noise_lora1","high_noise_lora2", "low_noise_lora2"], adapter_weights=[1., 1., 1., 1.,1.,1.,1.,1.])
134
+ # # 修改了lora_scale
135
+ # pipe.fuse_lora(adapter_names=["lightx2v", "high_noise_lora","high_noise_lora1","high_noise_lora2"], lora_scales=[3.0, 3.0,3.0,3.0], components=["transformer"])
136
+ # # 修改了lora_scale
137
+ # pipe.fuse_lora(adapter_names=["lightx2v_2", "low_noise_lora","low_noise_lora1","low_noise_lora2"], lora_scales=[1.0, 1.0,1.0,1.0], components=["transformer_2"])
138
+
139
+ # #### 3rd lora ends @######
140
+
141
+
142
+
143
+ pipe.set_adapters(["lightx2v", "lightx2v_2", "high_noise_lora", "low_noise_lora","high_noise_lora1", "low_noise_lora1"], adapter_weights=[1.5, 1., 1., 1.,1.,1.])
144
+ # 修改了lora_scale
145
+ pipe.fuse_lora(adapter_names=["lightx2v", "high_noise_lora","high_noise_lora1"], lora_scales=[3.0, 3.0,3.0], components=["transformer"])
146
+ # 修改了lora_scale
147
+ pipe.fuse_lora(adapter_names=["lightx2v_2", "low_noise_lora","low_noise_lora1"], lora_scales=[1.0, 1.0,1.0], components=["transformer_2"])
148
+
149
+
150
+
151
+ ########testing all. 4 together
152
+
153
+
154
+ # 原始 v8normal LoRA
155
+
156
+ # pipe.load_lora_weights(
157
+ # "rahul7star/wan2.2Lora", weight_name="DR34ML4Y_I2V_14B_HIGH.safetensors", adapter_name="high_noise_lora", token=os.environ.get("HF_TOKEN")
158
+ # )
159
+ # pipe.load_lora_weights(
160
+ # "rahul7star/wan2.2Lora", weight_name="DR34ML4Y_I2V_14B_LOW.safetensors", adapter_name="low_noise_lora", token=os.environ.get("HF_TOKEN"), load_into_transformer_2=True
161
+ # )
162
+ # # dremal LoRA
163
+ # pipe.load_lora_weights(
164
+ # "rahul7star/wan2.2Lora", weight_name="wan2.2_i2v_highnoise_pov_missionary_v1.0.safetensors", adapter_name="high_dremal_lora", token=os.environ.get("HF_TOKEN")
165
+ # )
166
+ # pipe.load_lora_weights(
167
+ # "rahul7star/wan2.2Lora", weight_name="wan2.2_i2v_lownoise_pov_missionary_v1.0.safetensors", adapter_name="low_dremal_lora", token=os.environ.get("HF_TOKEN"), load_into_transformer_2=True
168
+ # )
169
+ # # missimd LoRA
170
+ # pipe.load_lora_weights(
171
+ # "rahul7star/wan2.2Lora", weight_name="Wan2.2-Doggy_high_noise.safetensors", adapter_name="high_missimd_lora", token=os.environ.get("HF_TOKEN")
172
+ # )
173
+ # pipe.load_lora_weights(
174
+ # "rahul7star/wan2.2Lora", weight_name="Wan2.2-Doggy_low_noise.safetensors", adapter_name="low_missimd_lora", token=os.environ.get("HF_TOKEN"), load_into_transformer_2=True
175
+ # )
176
+ # # ultrade LoRA
177
+ # pipe.load_lora_weights(
178
+ # "rahul7star/wan2.2Lora", weight_name="WAN-2.2-I2V-POV-Titfuck-Paizuri-HIGH-v1.0.safetensors", adapter_name="high_ultrade_lora", token=os.environ.get("HF_TOKEN")
179
+ # )
180
+ # pipe.load_lora_weights(
181
+ # "rahul7star/wan2.2Lora", weight_name="WAN-2.2-I2V-POV-Titfuck-Paizuri-LOW-v1.0.safetensors", adapter_name="low_ultrade_lora", token=os.environ.get("HF_TOKEN"), load_into_transformer_2=True
182
+ # )
183
+
184
+ # # 设置所有 LoRA 权重
185
+ # pipe.set_adapters(
186
+ # [
187
+ # "lightx2v", "lightx2v_2",
188
+ # "high_noise_lora", "low_noise_lora",
189
+ # "high_dremal_lora", "low_dremal_lora",
190
+ # "high_missimd_lora", "low_missimd_lora",
191
+ # "high_ultrade_lora", "low_ultrade_lora"
192
+ # ],
193
+ # adapter_weights=[1.7, 1.5, 0.4, 0.4, 0, 0, 0.7, 0.7, 0.4, 0.4]
194
+ # )
195
+ # # 融合 LoRA 到 transformer
196
+ # pipe.fuse_lora(
197
+ # adapter_names=[
198
+ # "lightx2v",
199
+ # "high_noise_lora",
200
+ # "high_dremal_lora",
201
+ # "high_missimd_lora",
202
+ # "high_ultrade_lora"
203
+ # ],
204
+ # lora_scales=[4.0, 3.0, 2.0, 2.0, 2.0],
205
+ # components=["transformer"]
206
+ # )
207
+ # # 融合 LoRA 到 transformer_2
208
+ # pipe.fuse_lora(
209
+ # adapter_names=[
210
+ # "lightx2v_2",
211
+ # "low_noise_lora",
212
+ # "low_dremal_lora",
213
+ # "low_missimd_lora",
214
+ # "low_ultrade_lora"
215
+ # ],
216
+ # lora_scales=[2.0, 1.5, 1.0, 1.0, 1.0],
217
+ # components=["transformer_2"]
218
+ # )
219
+
220
+
221
+ #############
222
+
223
+
224
+
225
+
226
+
227
+
228
+ pipe.unload_lora_weights()
229
+
230
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
231
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
232
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
233
+
234
+ aoti.aoti_blocks_load(pipe.transformer, 'rahul7star/WanAot', variant='fp8da')
235
+ aoti.aoti_blocks_load(pipe.transformer_2, 'rahul7star/WanAot', variant='fp8da')
236
+
237
+
238
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
239
+ default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
240
+
241
+ def resize_image(image: Image.Image) -> Image.Image:
242
+ """
243
+ Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
244
+ """
245
+ width, height = image.size
246
+
247
+ # Handle square case
248
+ if width == height:
249
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
250
+
251
+ aspect_ratio = width / height
252
+
253
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
254
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
255
+
256
+ image_to_resize = image
257
+
258
+ if aspect_ratio > MAX_ASPECT_RATIO:
259
+ # Very wide image -> crop width to fit 832x480 aspect ratio
260
+ target_w, target_h = MAX_DIM, MIN_DIM
261
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
262
+ left = (width - crop_width) // 2
263
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
264
+ elif aspect_ratio < MIN_ASPECT_RATIO:
265
+ # Very tall image -> crop height to fit 480x832 aspect ratio
266
+ target_w, target_h = MIN_DIM, MAX_DIM
267
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
268
+ top = (height - crop_height) // 2
269
+ image_to_resize = image.crop((0, top, width, top + crop_height))
270
+ else:
271
+ if width > height: # Landscape
272
+ target_w = MAX_DIM
273
+ target_h = int(round(target_w / aspect_ratio))
274
+ else: # Portrait
275
+ target_h = MAX_DIM
276
+ target_w = int(round(target_h * aspect_ratio))
277
+
278
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
279
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
280
+
281
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
282
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
283
+
284
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
285
+
286
+
287
+ HF_MODEL = os.environ.get("HF_UPLOAD_REPO", "rahul7star/wan22-aot-image-2025-dec")
288
+
289
+
290
+ # --- CPU-only upload function ---
291
+ def upload_image_and_prompt_cpu(input_image, prompt_text) -> str:
292
+ from datetime import datetime
293
+ import tempfile, os, uuid, shutil
294
+ from huggingface_hub import HfApi
295
+
296
+ # Instantiate the HfApi class
297
+ api = HfApi()
298
+
299
+ today_str = datetime.now().strftime("%Y-%m-%d")
300
+ unique_subfolder = f"Upload-Image-{uuid.uuid4().hex[:8]}"
301
+ hf_folder = f"{today_str}/{unique_subfolder}"
302
+
303
+ # Save image temporarily
304
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_img:
305
+ if isinstance(input_image, str):
306
+ shutil.copy(input_image, tmp_img.name)
307
+ else:
308
+ input_image.save(tmp_img.name, format="PNG")
309
+ tmp_img_path = tmp_img.name
310
+
311
+ # Upload image using HfApi instance
312
+ api.upload_file(
313
+ path_or_fileobj=tmp_img_path,
314
+ path_in_repo=f"{hf_folder}/input_image.png",
315
+ repo_id=HF_MODEL,
316
+ repo_type="model",
317
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN")
318
+ )
319
+
320
+ # Save prompt as summary.txt
321
+ summary_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name
322
+ with open(summary_file, "w", encoding="utf-8") as f:
323
+ f.write(prompt_text)
324
+
325
+ api.upload_file(
326
+ path_or_fileobj=summary_file,
327
+ path_in_repo=f"{hf_folder}/summary.txt",
328
+ repo_id=HF_MODEL,
329
+ repo_type="model",
330
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN")
331
+ )
332
+
333
+ # Cleanup
334
+ os.remove(tmp_img_path)
335
+ os.remove(summary_file)
336
+
337
+ return hf_folder
338
+
339
+ def get_num_frames(duration_seconds: float):
340
+ return 1 + int(np.clip(
341
+ int(round(duration_seconds * FIXED_FPS)),
342
+ MIN_FRAMES_MODEL,
343
+ MAX_FRAMES_MODEL,
344
+ ))
345
+
346
+
347
+ # --- Wrapper to upload image/prompt on CPU before GPU generation ---
348
+ def generate_video_with_upload(input_image, prompt, height, width,
349
+ negative_prompt=default_negative_prompt,
350
+ duration_seconds=2, guidance_scale=0, steps=4,
351
+ seed=44, randomize_seed=False):
352
+ # Upload on CPU (hidden, no UI)
353
+ try:
354
+ upload_image_and_prompt_cpu(input_image, prompt)
355
+ except Exception as e:
356
+ print("Upload failed:", e)
357
+
358
+ # Proceed with GPU video generation
359
+ return generate_video(input_image, prompt, height, width,
360
+ negative_prompt, duration_seconds,
361
+ guidance_scale, steps, seed, randomize_seed)
362
+
363
+ def get_duration(
364
+ input_image,
365
+ prompt,
366
+ steps,
367
+ negative_prompt,
368
+ duration_seconds,
369
+ guidance_scale,
370
+ guidance_scale_2,
371
+ seed,
372
+ randomize_seed,
373
+ progress,
374
+ ):
375
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
376
+ BASE_STEP_DURATION = 15
377
+ width, height = resize_image(input_image).size
378
+ frames = get_num_frames(duration_seconds)
379
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
380
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
381
+ return 10 + int(steps) * step_duration
382
+
383
+
384
+
385
+ @spaces.GPU(duration=get_duration)
386
+ def generate_video(
387
+ input_image,
388
+ prompt,
389
+ steps = 4,
390
+ negative_prompt=default_negative_prompt,
391
+ duration_seconds = MAX_DURATION,
392
+ guidance_scale = 1,
393
+ guidance_scale_2 = 1,
394
+ seed = 42,
395
+ randomize_seed = False,
396
+ progress=gr.Progress(track_tqdm=True),
397
+ ):
398
+ """
399
+ Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
400
+
401
+ This function takes an input image and generates a video animation based on the provided
402
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
403
+ for fast generation in 4-8 steps.
404
+
405
+ Args:
406
+ input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
407
+ prompt (str): Text prompt describing the desired animation or motion.
408
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
409
+ Defaults to 4. Range: 1-30.
410
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
411
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
412
+ duration_seconds (float, optional): Duration of the generated video in seconds.
413
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
414
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
415
+ Defaults to 1.0. Range: 0.0-20.0.
416
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
417
+ Defaults to 1.0. Range: 0.0-20.0.
418
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
419
+ Range: 0 to MAX_SEED (2147483647).
420
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
421
+ Defaults to False.
422
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
423
+
424
+ Returns:
425
+ tuple: A tuple containing:
426
+ - video_path (str): Path to the generated video file (.mp4)
427
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
428
+
429
+ Raises:
430
+ gr.Error: If input_image is None (no image uploaded).
431
+
432
+ Note:
433
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
434
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
435
+ - The function uses GPU acceleration via the @spaces.GPU decorator
436
+ - Generation time varies based on steps and duration (see get_duration function)
437
+ """
438
+ if input_image is None:
439
+ raise gr.Error("Please upload an input image.")
440
+
441
+ num_frames = get_num_frames(duration_seconds)
442
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
443
+ resized_image = resize_image(input_image)
444
+ print("pompt is")
445
+ print(prompt)
446
+ if "child" in prompt.lower():
447
+ print("Found 'child' in prompt. Exiting loop.")
448
+ return
449
+
450
+ output_frames_list = pipe(
451
+ image=resized_image,
452
+ prompt=prompt,
453
+ negative_prompt=negative_prompt,
454
+ height=resized_image.height,
455
+ width=resized_image.width,
456
+ num_frames=num_frames,
457
+ guidance_scale=float(guidance_scale),
458
+ guidance_scale_2=float(guidance_scale_2),
459
+ num_inference_steps=int(steps),
460
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
461
+ ).frames[0]
462
+
463
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
464
+ video_path = tmpfile.name
465
+
466
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
467
+
468
+ return video_path, current_seed
469
+
470
+ with gr.Blocks() as demo:
471
+ gr.Markdown("# Wan22 AOT")
472
+ #gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
473
+ with gr.Row():
474
+ with gr.Column():
475
+ input_image_component = gr.Image(type="pil", label="Input Image")
476
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
477
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
478
+
479
+ with gr.Accordion("Advanced Settings", open=False):
480
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
481
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
482
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
483
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
484
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
485
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
486
+
487
+ generate_button = gr.Button("Generate Video", variant="primary")
488
+ with gr.Column():
489
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
490
+
491
+ #upload_image_and_prompt(input_image_component, prompt_input)
492
+ ui_inputs = [
493
+ input_image_component, prompt_input, steps_slider,
494
+ negative_prompt_input, duration_seconds_input,
495
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
496
+ ]
497
+
498
+ generate_button.click(fn=generate_video_with_upload, inputs=ui_inputs, outputs=[video_output, seed_input])
499
+
500
+ if __name__ == "__main__":
501
+ demo.queue().launch(mcp_server=True)