r3gm commited on
Commit
470848d
·
verified ·
1 Parent(s): 8c42839

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -45
app.py CHANGED
@@ -54,55 +54,13 @@ SCHEDULER_MAP = {
54
  "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
55
  }
56
 
57
-
58
  pipe = WanImageToVideoPipeline.from_pretrained(
59
- MODEL_ID,
60
- transformer=WanTransformer3DModel.from_pretrained(
61
- 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
62
- subfolder='transformer',
63
- torch_dtype=torch.bfloat16,
64
- device_map='cuda',
65
- ),
66
- transformer_2=WanTransformer3DModel.from_pretrained(
67
- 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
68
- subfolder='transformer_2',
69
- torch_dtype=torch.bfloat16,
70
- device_map='cuda',
71
- ),
72
  torch_dtype=torch.bfloat16,
73
  ).to('cuda')
74
-
75
  original_scheduler = copy.deepcopy(pipe.scheduler)
76
  print(original_scheduler)
77
 
78
- pipe.load_lora_weights(
79
- "Kijai/WanVideo_comfy",
80
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
81
- adapter_name="lightx2v"
82
- )
83
- kwargs_lora = {}
84
- kwargs_lora["load_into_transformer_2"] = True
85
- pipe.load_lora_weights(
86
- "Kijai/WanVideo_comfy",
87
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
88
- adapter_name="lightx2v_2", **kwargs_lora
89
- )
90
-
91
- pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
92
- pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
93
- pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
94
- pipe.unload_lora_weights()
95
-
96
- # livewallpaper
97
- pipe.load_lora_weights(
98
- "voxvici/flux-lora",
99
- weight_name="livewallpaper_wan22_14b_i2v_low_model_0_1_e26.safetensors",
100
- adapter_name="livewallpaper"
101
- )
102
- pipe.set_adapters(["livewallpaper"], adapter_weights=[1.])
103
- pipe.fuse_lora(adapter_names=["livewallpaper"], lora_scale=.7, components=["transformer"])
104
- pipe.unload_lora_weights()
105
-
106
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
107
  quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
108
  quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
@@ -342,9 +300,9 @@ def generate_video(
342
 
343
 
344
  with gr.Blocks() as demo:
345
- gr.Markdown("# Wan 2.2 I2V (14B)")
346
  gr.Markdown("## ℹ️ **A Note on Performance:** This version prioritizes a straightforward setup over maximum speed, so performance may vary.")
347
- gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
348
  with gr.Row():
349
  with gr.Column():
350
  input_image_component = gr.Image(type="pil", label="Input Image")
 
54
  "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
55
  }
56
 
 
57
  pipe = WanImageToVideoPipeline.from_pretrained(
58
+ "TestOrganizationPleaseIgnore/WAMU_v1_WAN2.2_I2V_LIGHTNING",
 
 
 
 
 
 
 
 
 
 
 
 
59
  torch_dtype=torch.bfloat16,
60
  ).to('cuda')
 
61
  original_scheduler = copy.deepcopy(pipe.scheduler)
62
  print(original_scheduler)
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
65
  quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
66
  quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
 
300
 
301
 
302
  with gr.Blocks() as demo:
303
+ gr.Markdown("# WAMU - Wan 2.2 I2V (14B)")
304
  gr.Markdown("## ℹ️ **A Note on Performance:** This version prioritizes a straightforward setup over maximum speed, so performance may vary.")
305
+ gr.Markdown("run Wan 2.2 in just 4-8 steps, fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
306
  with gr.Row():
307
  with gr.Column():
308
  input_image_component = gr.Image(type="pil", label="Input Image")