GiorgioV commited on
Commit
5bc650d
·
verified ·
1 Parent(s): ca25b4f

Upload app_lora.py

Browse files
Files changed (1) hide show
  1. app_lora.py +374 -0
app_lora.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
4
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
+ from diffusers.utils.export_utils import export_to_video
6
+ import gradio as gr
7
+ import tempfile
8
+ import numpy as np
9
+ from PIL import Image
10
+ import random
11
+ import gc
12
+ import os
13
+
14
+ from torchao.quantization import quantize_
15
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
16
+ from torchao.quantization import Int8WeightOnlyConfig
17
+
18
+ import aoti
19
+
20
+
21
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
22
+
23
+ MAX_DIM = 832
24
+ MIN_DIM = 480
25
+ SQUARE_DIM = 640
26
+ MULTIPLE_OF = 16
27
+
28
+ MAX_SEED = np.iinfo(np.int32).max
29
+
30
+ FIXED_FPS = 16
31
+ MIN_FRAMES_MODEL = 8
32
+ MAX_FRAMES_MODEL = 80
33
+
34
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
35
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
36
+
37
+
38
+ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
39
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
40
+ subfolder='transformer',
41
+ torch_dtype=torch.bfloat16,
42
+ device_map='cuda',
43
+ ),
44
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
45
+ subfolder='transformer_2',
46
+ torch_dtype=torch.bfloat16,
47
+ device_map='cuda',
48
+ ),
49
+ torch_dtype=torch.bfloat16,
50
+ ).to('cuda')
51
+
52
+ # 加载并融合你的LoRA模型
53
+ pipe.load_lora_weights(
54
+ "Kijai/WanVideo_comfy",
55
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
56
+ adapter_name="lightx2v"
57
+ )
58
+ kwargs_lora = {}
59
+ kwargs_lora["load_into_transformer_2"] = True
60
+
61
+
62
+ pipe.load_lora_weights(
63
+ "Kijai/WanVideo_comfy",
64
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
65
+ adapter_name="lightx2v_2", **kwargs_lora
66
+ )
67
+
68
+
69
+
70
+ # 新增:加载你提供的high noise LoRA
71
+
72
+ pipe.load_lora_weights(
73
+ "rahul7star/wan2.2Lora",
74
+ weight_name="DR34ML4Y_I2V_14B_HIGH.safetensors",
75
+ adapter_name="high_noise_lora",
76
+ token=os.environ.get("HF_TOKEN")
77
+ )
78
+ # 新增:加载你提供的low noise LoRA
79
+ pipe.load_lora_weights(
80
+ "rahul7star/wan2.2Lora",
81
+ weight_name="DR34ML4Y_I2V_14B_LOW.safetensors",
82
+ adapter_name="low_noise_lora",
83
+ token=os.environ.get("HF_TOKEN"),
84
+ load_into_transformer_2=True
85
+ )
86
+
87
+
88
+ ## thi s still gpood
89
+ # pipe.load_lora_weights(
90
+ # "rahul7star/wan2.2Lora",
91
+ # weight_name="wan2.2_i2v_highnoise_pov_missionary_v1.0.safetensors",
92
+ # adapter_name="high_noise_lora",
93
+ # token=os.environ.get("HF_TOKEN")
94
+ # )
95
+ # # 新增:加载你提供的low noise LoRA
96
+ # pipe.load_lora_weights(
97
+ # "rahul7star/wan2.2Lora",
98
+ # weight_name="wan2.2_i2v_lownoise_pov_missionary_v1.0.safetensors",
99
+ # adapter_name="low_noise_lora",
100
+ # token=os.environ.get("HF_TOKEN"),
101
+ # load_into_transformer_2=True
102
+ # )
103
+
104
+ pipe.set_adapters(["lightx2v", "lightx2v_2", "high_noise_lora", "low_noise_lora"], adapter_weights=[1., 1., 1., 1.])
105
+ # 修改了lora_scale
106
+ pipe.fuse_lora(adapter_names=["lightx2v", "high_noise_lora"], lora_scales=[3.0, 3.0], components=["transformer"])
107
+ # 修改了lora_scale
108
+ pipe.fuse_lora(adapter_names=["lightx2v_2", "low_noise_lora"], lora_scales=[1.0, 1.0], components=["transformer_2"])
109
+ pipe.unload_lora_weights()
110
+
111
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
112
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
113
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
114
+
115
+ aoti.aoti_blocks_load(pipe.transformer, 'rahul7star/WanAot', variant='fp8da')
116
+ aoti.aoti_blocks_load(pipe.transformer_2, 'rahul7star/WanAot', variant='fp8da')
117
+
118
+
119
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
120
+ default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
121
+
122
+ def resize_image(image: Image.Image) -> Image.Image:
123
+ """
124
+ Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
125
+ """
126
+ width, height = image.size
127
+
128
+ # Handle square case
129
+ if width == height:
130
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
131
+
132
+ aspect_ratio = width / height
133
+
134
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
135
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
136
+
137
+ image_to_resize = image
138
+
139
+ if aspect_ratio > MAX_ASPECT_RATIO:
140
+ # Very wide image -> crop width to fit 832x480 aspect ratio
141
+ target_w, target_h = MAX_DIM, MIN_DIM
142
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
143
+ left = (width - crop_width) // 2
144
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
145
+ elif aspect_ratio < MIN_ASPECT_RATIO:
146
+ # Very tall image -> crop height to fit 480x832 aspect ratio
147
+ target_w, target_h = MIN_DIM, MAX_DIM
148
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
149
+ top = (height - crop_height) // 2
150
+ image_to_resize = image.crop((0, top, width, top + crop_height))
151
+ else:
152
+ if width > height: # Landscape
153
+ target_w = MAX_DIM
154
+ target_h = int(round(target_w / aspect_ratio))
155
+ else: # Portrait
156
+ target_h = MAX_DIM
157
+ target_w = int(round(target_h * aspect_ratio))
158
+
159
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
160
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
161
+
162
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
163
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
164
+
165
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
166
+
167
+
168
+ HF_MODEL = os.environ.get("HF_UPLOAD_REPO", "rahul7star/wan22-aot-image")
169
+ def upload_image_and_prompt(input_image, prompt_text) -> str:
170
+ """
171
+ Upload an image and a prompt text to Hugging Face Hub in a date-based folder.
172
+
173
+ Args:
174
+ input_image (PIL.Image.Image or path-like): The image to upload.
175
+ prompt_text (str): Text prompt or summary associated with the image.
176
+ Returns:
177
+ str: Hugging Face folder path where the image and prompt were uploaded.
178
+ """
179
+ import tempfile
180
+ import os
181
+ import uuid
182
+ from datetime import datetime
183
+ from huggingface_hub import upload_file
184
+
185
+ # Create a date-based folder on HF
186
+ today_str = datetime.now().strftime("%Y-%m-%d")
187
+ unique_subfolder = f"Upload-Image-{uuid.uuid4().hex[:8]}"
188
+ hf_folder = f"{today_str}/{unique_subfolder}"
189
+
190
+ # Save the image temporarily
191
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_img:
192
+ if isinstance(input_image, str):
193
+ # If path provided, just copy
194
+ import shutil
195
+ shutil.copy(input_image, tmp_img.name)
196
+ else:
197
+ # PIL.Image.Image
198
+ input_image.save(tmp_img.name, format="PNG")
199
+ tmp_img_path = tmp_img.name
200
+
201
+ # Upload image
202
+ image_filename = "input_image.png"
203
+ image_hf_path = f"{hf_folder}/{image_filename}"
204
+ upload_file(
205
+ path_or_fileobj=tmp_img_path,
206
+ path_in_repo=image_hf_path,
207
+ repo_id=HF_MODEL,
208
+ repo_type="model",
209
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
210
+ )
211
+
212
+ # Upload prompt as summary.txt
213
+ summary_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name
214
+ with open(summary_file, "w", encoding="utf-8") as f:
215
+ f.write(prompt_text)
216
+ summary_hf_path = f"{hf_folder}/summary.txt"
217
+ upload_file(
218
+ path_or_fileobj=summary_file,
219
+ path_in_repo=summary_hf_path,
220
+ repo_id=HF_MODEL,
221
+ repo_type="model",
222
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
223
+ )
224
+
225
+ # Cleanup
226
+ os.remove(tmp_img_path)
227
+ os.remove(summary_file)
228
+
229
+ return hf_folder
230
+
231
+
232
+ def get_num_frames(duration_seconds: float):
233
+ return 1 + int(np.clip(
234
+ int(round(duration_seconds * FIXED_FPS)),
235
+ MIN_FRAMES_MODEL,
236
+ MAX_FRAMES_MODEL,
237
+ ))
238
+
239
+
240
+ def get_duration(
241
+ input_image,
242
+ prompt,
243
+ steps,
244
+ negative_prompt,
245
+ duration_seconds,
246
+ guidance_scale,
247
+ guidance_scale_2,
248
+ seed,
249
+ randomize_seed,
250
+ progress,
251
+ ):
252
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
253
+ BASE_STEP_DURATION = 15
254
+ width, height = resize_image(input_image).size
255
+ frames = get_num_frames(duration_seconds)
256
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
257
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
258
+ return 10 + int(steps) * step_duration
259
+
260
+
261
+
262
+ @spaces.GPU(duration=get_duration)
263
+ def generate_video(
264
+ input_image,
265
+ prompt,
266
+ steps = 4,
267
+ negative_prompt=default_negative_prompt,
268
+ duration_seconds = MAX_DURATION,
269
+ guidance_scale = 1,
270
+ guidance_scale_2 = 1,
271
+ seed = 42,
272
+ randomize_seed = False,
273
+ progress=gr.Progress(track_tqdm=True),
274
+ ):
275
+ """
276
+ Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
277
+
278
+ This function takes an input image and generates a video animation based on the provided
279
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
280
+ for fast generation in 4-8 steps.
281
+
282
+ Args:
283
+ input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
284
+ prompt (str): Text prompt describing the desired animation or motion.
285
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
286
+ Defaults to 4. Range: 1-30.
287
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
288
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
289
+ duration_seconds (float, optional): Duration of the generated video in seconds.
290
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
291
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
292
+ Defaults to 1.0. Range: 0.0-20.0.
293
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
294
+ Defaults to 1.0. Range: 0.0-20.0.
295
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
296
+ Range: 0 to MAX_SEED (2147483647).
297
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
298
+ Defaults to False.
299
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
300
+
301
+ Returns:
302
+ tuple: A tuple containing:
303
+ - video_path (str): Path to the generated video file (.mp4)
304
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
305
+
306
+ Raises:
307
+ gr.Error: If input_image is None (no image uploaded).
308
+
309
+ Note:
310
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
311
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
312
+ - The function uses GPU acceleration via the @spaces.GPU decorator
313
+ - Generation time varies based on steps and duration (see get_duration function)
314
+ """
315
+ if input_image is None:
316
+ raise gr.Error("Please upload an input image.")
317
+
318
+ num_frames = get_num_frames(duration_seconds)
319
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
320
+ resized_image = resize_image(input_image)
321
+ print("pompt is")
322
+ print(prompt)
323
+
324
+ output_frames_list = pipe(
325
+ image=resized_image,
326
+ prompt=prompt,
327
+ negative_prompt=negative_prompt,
328
+ height=resized_image.height,
329
+ width=resized_image.width,
330
+ num_frames=num_frames,
331
+ guidance_scale=float(guidance_scale),
332
+ guidance_scale_2=float(guidance_scale_2),
333
+ num_inference_steps=int(steps),
334
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
335
+ ).frames[0]
336
+
337
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
338
+ video_path = tmpfile.name
339
+
340
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
341
+
342
+ return video_path, current_seed
343
+
344
+ with gr.Blocks() as demo:
345
+ gr.Markdown("# Wan22 AOT")
346
+ #gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
347
+ with gr.Row():
348
+ with gr.Column():
349
+ input_image_component = gr.Image(type="pil", label="Input Image")
350
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
351
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
352
+
353
+ with gr.Accordion("Advanced Settings", open=False):
354
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
355
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
356
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
357
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
358
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
359
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
360
+
361
+ generate_button = gr.Button("Generate Video", variant="primary")
362
+ with gr.Column():
363
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
364
+
365
+ #upload_image_and_prompt(input_image_component, prompt_input)
366
+ ui_inputs = [
367
+ input_image_component, prompt_input, steps_slider,
368
+ negative_prompt_input, duration_seconds_input,
369
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
370
+ ]
371
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
372
+
373
+ if __name__ == "__main__":
374
+ demo.queue().launch(mcp_server=True)