multimodalart HF Staff commited on
Commit
53a376c
·
verified ·
1 Parent(s): 26917c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -29,24 +29,34 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
29
 
30
  pipe = QwenImageEditPlusPipeline.from_pretrained(
31
  "Qwen/Qwen-Image-Edit-2509",
32
- transformer=QwenImageTransformer2DModel.from_pretrained(
33
- "linoyts/Qwen-Image-Edit-Rapid-AIO",
34
- subfolder='transformer',
35
- torch_dtype=dtype,
36
- device_map='cuda'
37
- ),
38
  torch_dtype=dtype
39
  ).to(device)
40
 
41
- # Load Light Migration LoRA
42
  pipe.load_lora_weights(
43
  "dx8152/Qwen-Edit-2509-Light-Migration",
44
  weight_name="参考色调.safetensors",
45
  adapter_name="light_migration"
46
  )
47
 
48
- pipe.set_adapters(["light_migration"], adapter_weights=[1.])
49
- pipe.fuse_lora(adapter_names=["light_migration"], lora_scale=1.25)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  pipe.unload_lora_weights()
51
 
52
  #spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
@@ -74,10 +84,10 @@ def infer_light_migration(
74
  seed: int = 0,
75
  randomize_seed: bool = True,
76
  true_guidance_scale: float = 1.0,
77
- num_inference_steps: int = 4,
78
  height: Optional[int] = None,
79
  width: Optional[int] = None,
80
- progress: gr.Progress = gr.Progress(track_tqdm=True)
81
  ) -> Tuple[Image.Image, int]:
82
  """
83
  Transfer lighting and color tones from a reference image to a source image
@@ -252,7 +262,7 @@ with gr.Blocks() as demo:
252
  minimum=1,
253
  maximum=40,
254
  step=1,
255
- value=4
256
  )
257
  height = gr.Slider(
258
  label="Height",
 
29
 
30
  pipe = QwenImageEditPlusPipeline.from_pretrained(
31
  "Qwen/Qwen-Image-Edit-2509",
 
 
 
 
 
 
32
  torch_dtype=dtype
33
  ).to(device)
34
 
35
+ # Load first LoRA
36
  pipe.load_lora_weights(
37
  "dx8152/Qwen-Edit-2509-Light-Migration",
38
  weight_name="参考色调.safetensors",
39
  adapter_name="light_migration"
40
  )
41
 
42
+ # Load second LoRA
43
+ pipe.load_lora_weights(
44
+ "lightx2v/Qwen-Image-Lightning",
45
+ weight_name="Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors",
46
+ adapter_name="lightning"
47
+ )
48
+
49
+ # Set both adapters active with their weights
50
+ pipe.set_adapters(
51
+ ["light_migration", "lightning"],
52
+ adapter_weights=[1.0, 1.0]
53
+ )
54
+
55
+ # Fuse both LoRAs (adjust scales as needed)
56
+ pipe.fuse_lora(
57
+ adapter_names=["light_migration", "lightning"],
58
+ lora_scale=1.0
59
+ )
60
  pipe.unload_lora_weights()
61
 
62
  #spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
 
84
  seed: int = 0,
85
  randomize_seed: bool = True,
86
  true_guidance_scale: float = 1.0,
87
+ num_inference_steps: int = 8,
88
  height: Optional[int] = None,
89
  width: Optional[int] = None,
90
+ progress: Optional[gr.Progress] = gr.Progress(track_tqdm=True)
91
  ) -> Tuple[Image.Image, int]:
92
  """
93
  Transfer lighting and color tones from a reference image to a source image
 
262
  minimum=1,
263
  maximum=40,
264
  step=1,
265
+ value=8
266
  )
267
  height = gr.Slider(
268
  label="Height",