prithivMLmods commited on
Commit
ec03d3a
·
verified ·
1 Parent(s): 0f7d5a1

update app

Browse files
Files changed (1) hide show
  1. app.py +28 -20
app.py CHANGED
@@ -80,10 +80,6 @@ orange_red_theme = OrangeRedTheme()
80
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
83
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
84
- print("torch.__version__ =", torch.__version__)
85
- print("torch.version.cuda =", torch.version.cuda)
86
- print("cuda available:", torch.cuda.is_available())
87
  print("Using device:", device)
88
 
89
  from diffusers import FlowMatchEulerDiscreteScheduler
@@ -104,15 +100,29 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
104
  torch_dtype=dtype
105
  ).to(device)
106
 
 
 
 
 
 
 
 
 
 
 
107
  try:
108
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
109
- print("Flash Attention 3 Processor set successfully.")
110
  except Exception as e:
111
- print(f"Warning: Could not set FA3 processor: {e}")
112
 
113
  MAX_SEED = np.iinfo(np.int32).max
114
 
115
  ADAPTER_SPECS = {
 
 
 
 
 
116
  "Photo-to-Anime": {
117
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
118
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
@@ -159,15 +169,15 @@ def infer(
159
  if input_image is None:
160
  raise gr.Error("Please upload an image to edit.")
161
 
162
- if lora_adapter == "Base Model":
163
- pipe.set_adapters([], adapter_weights=[])
164
- else:
165
- spec = ADAPTER_SPECS.get(lora_adapter)
166
- if not spec:
167
- raise gr.Error(f"Configuration not found for: {lora_adapter}")
168
 
169
- adapter_name = spec["adapter_name"]
170
 
 
 
 
171
  if adapter_name not in LOADED_ADAPTERS:
172
  print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
173
  try:
@@ -179,9 +189,7 @@ def infer(
179
  LOADED_ADAPTERS.add(adapter_name)
180
  except Exception as e:
181
  raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
182
- else:
183
- print(f"--- Adapter {lora_adapter} is already loaded. ---")
184
-
185
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
186
 
187
  if randomize_seed:
@@ -234,8 +242,8 @@ css="""
234
 
235
  with gr.Blocks() as demo:
236
  with gr.Column(elem_id="col-container"):
237
- gr.Markdown("# **Qwen-Image-Edit-2511-LoRA-Fast**", elem_id="main-title")
238
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) model.")
239
 
240
  with gr.Row(equal_height=True):
241
  with gr.Column():
@@ -253,10 +261,9 @@ with gr.Blocks() as demo:
253
  output_image = gr.Image(label="Output Image", interactive=False, format="png", height=353)
254
 
255
  with gr.Row():
256
- choices_list = ["Base Model"] + list(ADAPTER_SPECS.keys())
257
  lora_adapter = gr.Dropdown(
258
  label="Choose Editing Style",
259
- choices=choices_list,
260
  value="Base Model"
261
  )
262
  with gr.Accordion("Advanced Settings", open=False, visible=False):
@@ -267,6 +274,7 @@ with gr.Blocks() as demo:
267
 
268
  gr.Examples(
269
  examples=[
 
270
  ["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
271
  ],
272
  inputs=[input_image, prompt, lora_adapter],
 
80
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
 
 
 
 
83
  print("Using device:", device)
84
 
85
  from diffusers import FlowMatchEulerDiscreteScheduler
 
100
  torch_dtype=dtype
101
  ).to(device)
102
 
103
+ try:
104
+ pipe.enable_vae_tiling()
105
+ except Exception as e:
106
+ print(f"VAE Tiling warning: {e}")
107
+
108
+ pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
109
+ weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
110
+ adapter_name="lightning")
111
+ pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
112
+
113
  try:
114
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
115
  except Exception as e:
116
+ print(f"FA3 Warning: {e}")
117
 
118
  MAX_SEED = np.iinfo(np.int32).max
119
 
120
  ADAPTER_SPECS = {
121
+ "Base Model": {
122
+ "repo": None,
123
+ "weights": None,
124
+ "adapter_name": "base"
125
+ },
126
  "Photo-to-Anime": {
127
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
128
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
 
169
  if input_image is None:
170
  raise gr.Error("Please upload an image to edit.")
171
 
172
+ spec = ADAPTER_SPECS.get(lora_adapter)
173
+ if not spec:
174
+ raise gr.Error(f"Configuration not found for: {lora_adapter}")
 
 
 
175
 
176
+ adapter_name = spec["adapter_name"]
177
 
178
+ if adapter_name == "base":
179
+ pipe.set_adapters([], adapter_weights=[])
180
+ else:
181
  if adapter_name not in LOADED_ADAPTERS:
182
  print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
183
  try:
 
189
  LOADED_ADAPTERS.add(adapter_name)
190
  except Exception as e:
191
  raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
192
+
 
 
193
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
194
 
195
  if randomize_seed:
 
242
 
243
  with gr.Blocks() as demo:
244
  with gr.Column(elem_id="col-container"):
245
+ gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
246
+ gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) model.")
247
 
248
  with gr.Row(equal_height=True):
249
  with gr.Column():
 
261
  output_image = gr.Image(label="Output Image", interactive=False, format="png", height=353)
262
 
263
  with gr.Row():
 
264
  lora_adapter = gr.Dropdown(
265
  label="Choose Editing Style",
266
+ choices=list(ADAPTER_SPECS.keys()),
267
  value="Base Model"
268
  )
269
  with gr.Accordion("Advanced Settings", open=False, visible=False):
 
274
 
275
  gr.Examples(
276
  examples=[
277
+ ["examples/1.jpg", "Make it snowy.", "Base Model"],
278
  ["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
279
  ],
280
  inputs=[input_image, prompt, lora_adapter],