prithivMLmods commited on
Commit
ed6cd4c
·
verified ·
1 Parent(s): ec03d3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -30
app.py CHANGED
@@ -80,6 +80,15 @@ orange_red_theme = OrangeRedTheme()
80
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
 
 
 
 
 
 
 
 
 
83
  print("Using device:", device)
84
 
85
  from diffusers import FlowMatchEulerDiscreteScheduler
@@ -100,29 +109,15 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
100
  torch_dtype=dtype
101
  ).to(device)
102
 
103
- try:
104
- pipe.enable_vae_tiling()
105
- except Exception as e:
106
- print(f"VAE Tiling warning: {e}")
107
-
108
- pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
109
- weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
110
- adapter_name="lightning")
111
- pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
112
-
113
  try:
114
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
115
  except Exception as e:
116
- print(f"FA3 Warning: {e}")
117
 
118
  MAX_SEED = np.iinfo(np.int32).max
119
 
120
  ADAPTER_SPECS = {
121
- "Base Model": {
122
- "repo": None,
123
- "weights": None,
124
- "adapter_name": "base"
125
- },
126
  "Photo-to-Anime": {
127
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
128
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
@@ -169,15 +164,15 @@ def infer(
169
  if input_image is None:
170
  raise gr.Error("Please upload an image to edit.")
171
 
172
- spec = ADAPTER_SPECS.get(lora_adapter)
173
- if not spec:
174
- raise gr.Error(f"Configuration not found for: {lora_adapter}")
175
-
176
- adapter_name = spec["adapter_name"]
177
-
178
- if adapter_name == "base":
179
  pipe.set_adapters([], adapter_weights=[])
180
  else:
 
 
 
 
 
 
181
  if adapter_name not in LOADED_ADAPTERS:
182
  print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
183
  try:
@@ -189,7 +184,9 @@ def infer(
189
  LOADED_ADAPTERS.add(adapter_name)
190
  except Exception as e:
191
  raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
192
-
 
 
193
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
194
 
195
  if randomize_seed:
@@ -242,8 +239,8 @@ css="""
242
 
243
  with gr.Blocks() as demo:
244
  with gr.Column(elem_id="col-container"):
245
- gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
246
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) model.")
247
 
248
  with gr.Row(equal_height=True):
249
  with gr.Column():
@@ -252,7 +249,7 @@ with gr.Blocks() as demo:
252
  prompt = gr.Text(
253
  label="Edit Prompt",
254
  show_label=True,
255
- placeholder="e.g., transform into anime..",
256
  )
257
 
258
  run_button = gr.Button("Edit Image", variant="primary")
@@ -263,7 +260,7 @@ with gr.Blocks() as demo:
263
  with gr.Row():
264
  lora_adapter = gr.Dropdown(
265
  label="Choose Editing Style",
266
- choices=list(ADAPTER_SPECS.keys()),
267
  value="Base Model"
268
  )
269
  with gr.Accordion("Advanced Settings", open=False, visible=False):
@@ -274,8 +271,8 @@ with gr.Blocks() as demo:
274
 
275
  gr.Examples(
276
  examples=[
277
- ["examples/1.jpg", "Make it snowy.", "Base Model"],
278
- ["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
279
  ],
280
  inputs=[input_image, prompt, lora_adapter],
281
  outputs=[output_image, seed],
 
80
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
83
+ print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
84
+ print("torch.__version__ =", torch.__version__)
85
+ print("torch.version.cuda =", torch.version.cuda)
86
+ print("cuda available:", torch.cuda.is_available())
87
+ print("cuda device count:", torch.cuda.device_count())
88
+ if torch.cuda.is_available():
89
+ print("current device:", torch.cuda.current_device())
90
+ print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
91
+
92
  print("Using device:", device)
93
 
94
  from diffusers import FlowMatchEulerDiscreteScheduler
 
109
  torch_dtype=dtype
110
  ).to(device)
111
 
 
 
 
 
 
 
 
 
 
 
112
  try:
113
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
114
+ print("Flash Attention 3 Processor set successfully.")
115
  except Exception as e:
116
+ print(f"Warning: Could not set FA3 processor: {e}")
117
 
118
  MAX_SEED = np.iinfo(np.int32).max
119
 
120
  ADAPTER_SPECS = {
 
 
 
 
 
121
  "Photo-to-Anime": {
122
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
123
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
 
164
  if input_image is None:
165
  raise gr.Error("Please upload an image to edit.")
166
 
167
+ if lora_adapter == "Base Model":
 
 
 
 
 
 
168
  pipe.set_adapters([], adapter_weights=[])
169
  else:
170
+ spec = ADAPTER_SPECS.get(lora_adapter)
171
+ if not spec:
172
+ raise gr.Error(f"Configuration not found for: {lora_adapter}")
173
+
174
+ adapter_name = spec["adapter_name"]
175
+
176
  if adapter_name not in LOADED_ADAPTERS:
177
  print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
178
  try:
 
184
  LOADED_ADAPTERS.add(adapter_name)
185
  except Exception as e:
186
  raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
187
+ else:
188
+ print(f"--- Adapter {lora_adapter} is already loaded. ---")
189
+
190
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
191
 
192
  if randomize_seed:
 
239
 
240
  with gr.Blocks() as demo:
241
  with gr.Column(elem_id="col-container"):
242
+ gr.Markdown("# **Qwen-Image-Edit-2511-LoRA**", elem_id="main-title")
243
+ gr.Markdown("Perform diverse image edits using the **Qwen-Image-Edit-2511** model with optional LoRA adapters.")
244
 
245
  with gr.Row(equal_height=True):
246
  with gr.Column():
 
249
  prompt = gr.Text(
250
  label="Edit Prompt",
251
  show_label=True,
252
+ placeholder="e.g., make it look cinematic...",
253
  )
254
 
255
  run_button = gr.Button("Edit Image", variant="primary")
 
260
  with gr.Row():
261
  lora_adapter = gr.Dropdown(
262
  label="Choose Editing Style",
263
+ choices=["Base Model", "Photo-to-Anime"],
264
  value="Base Model"
265
  )
266
  with gr.Accordion("Advanced Settings", open=False, visible=False):
 
271
 
272
  gr.Examples(
273
  examples=[
274
+ ["examples/1.jpg", "A cinematic shot of a cyberpunk city.", "Base Model"],
275
+ ["examples/2.jpg", "Transform into anime style.", "Photo-to-Anime"],
276
  ],
277
  inputs=[input_image, prompt, lora_adapter],
278
  outputs=[output_image, seed],