prithivMLmods commited on
Commit
9efbe02
·
verified ·
1 Parent(s): 5f022ce

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -467
app.py DELETED
@@ -1,467 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import requests
5
- import random
6
- import numpy as np
7
- import spaces
8
- import torch
9
- from PIL import Image
10
- import gradio as gr
11
-
12
- # --- Qwen Specific Imports ---
13
- from diffusers import FlowMatchEulerDiscreteScheduler
14
- # Assuming the qwenimage package is available in the environment
15
- from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
16
- from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
17
- from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
18
-
19
- from huggingface_hub import (
20
- hf_hub_download,
21
- HfFileSystem,
22
- ModelCard
23
- )
24
-
25
- from typing import Iterable
26
- from gradio.themes import Soft
27
- from gradio.themes.utils import colors, fonts, sizes
28
-
29
- # =========================================
30
- # THEME CONFIGURATION
31
- # =========================================
32
-
33
- colors.orange_red = colors.Color(
34
- name="orange_red",
35
- c50="#FFF0E5",
36
- c100="#FFE0CC",
37
- c200="#FFC299",
38
- c300="#FFA366",
39
- c400="#FF8533",
40
- c500="#FF4500",
41
- c600="#E63E00",
42
- c700="#CC3700",
43
- c800="#B33000",
44
- c900="#992900",
45
- c950="#802200",
46
- )
47
-
48
- class OrangeRedTheme(Soft):
49
- def __init__(
50
- self,
51
- *,
52
- primary_hue: colors.Color | str = colors.gray,
53
- secondary_hue: colors.Color | str = colors.orange_red,
54
- neutral_hue: colors.Color | str = colors.slate,
55
- text_size: sizes.Size | str = sizes.text_lg,
56
- font: fonts.Font | str | Iterable[fonts.Font | str] = (
57
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
58
- ),
59
- font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
60
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
61
- ),
62
- ):
63
- super().__init__(
64
- primary_hue=primary_hue,
65
- secondary_hue=secondary_hue,
66
- neutral_hue=neutral_hue,
67
- text_size=text_size,
68
- font=font,
69
- font_mono=font_mono,
70
- )
71
- super().set(
72
- background_fill_primary="*primary_50",
73
- background_fill_primary_dark="*primary_900",
74
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
75
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
76
- button_primary_text_color="white",
77
- button_primary_text_color_hover="white",
78
- button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
79
- button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
80
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
81
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
82
- button_secondary_text_color="black",
83
- button_secondary_text_color_hover="white",
84
- button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
85
- button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
86
- button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
87
- button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
88
- slider_color="*secondary_500",
89
- slider_color_dark="*secondary_600",
90
- block_title_text_weight="600",
91
- block_border_width="3px",
92
- block_shadow="*shadow_drop_lg",
93
- button_primary_shadow="*shadow_drop_lg",
94
- button_large_padding="11px",
95
- color_accent_soft="*primary_100",
96
- block_label_background_fill="*primary_200",
97
- )
98
-
99
- orange_red_theme = OrangeRedTheme()
100
-
101
- # =========================================
102
- # LORA CONFIGURATION (The "DLC" List)
103
- # =========================================
104
-
105
- loras = [
106
- {
107
- "image": "https://huggingface.co/autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime/resolve/main/images/example.jpg",
108
- "title": "Photo to Anime",
109
- "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
110
- "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
111
- "trigger_word": "Transform into anime"
112
- },
113
- {
114
- "image": "https://huggingface.co/dx8152/Qwen-Edit-2509-Multiple-angles/resolve/main/images/example.jpg",
115
- "title": "Multiple Angles",
116
- "repo": "dx8152/Qwen-Edit-2509-Multiple-angles",
117
- "weights": "镜头转换.safetensors",
118
- "trigger_word": "Rotate camera"
119
- },
120
- {
121
- "image": "https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Light_restoration/resolve/main/images/example.jpg",
122
- "title": "Light Restoration",
123
- "repo": "dx8152/Qwen-Image-Edit-2509-Light_restoration",
124
- "weights": "移除光影.safetensors",
125
- "trigger_word": "Remove shadows"
126
- },
127
- {
128
- "image": "https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Relight/resolve/main/images/example.jpg",
129
- "title": "Relight",
130
- "repo": "dx8152/Qwen-Image-Edit-2509-Relight",
131
- "weights": "Qwen-Edit-Relight.safetensors",
132
- "trigger_word": "Relight the image"
133
- },
134
- {
135
- "image": "https://huggingface.co/dx8152/Qwen-Edit-2509-Multi-Angle-Lighting/resolve/main/images/example.jpg",
136
- "title": "Multi-Angle Lighting",
137
- "repo": "dx8152/Qwen-Edit-2509-Multi-Angle-Lighting",
138
- "weights": "多角度灯光-251116.safetensors",
139
- "trigger_word": "Light source from"
140
- },
141
- {
142
- "image": "https://huggingface.co/tlennon-ie/qwen-edit-skin/resolve/main/images/example.jpg",
143
- "title": "Edit Skin",
144
- "repo": "tlennon-ie/qwen-edit-skin",
145
- "weights": "qwen-edit-skin_1.1_000002750.safetensors",
146
- "trigger_word": "Make skin details prominent"
147
- },
148
- {
149
- "image": "https://huggingface.co/lovis93/next-scene-qwen-image-lora-2509/resolve/main/images/example.jpg",
150
- "title": "Next Scene",
151
- "repo": "lovis93/next-scene-qwen-image-lora-2509",
152
- "weights": "next-scene_lora-v2-3000.safetensors",
153
- "trigger_word": "Next scene cinematic"
154
- },
155
- {
156
- "image": "https://huggingface.co/vafipas663/Qwen-Edit-2509-Upscale-LoRA/resolve/main/images/example.jpg",
157
- "title": "Upscale Image",
158
- "repo": "vafipas663/Qwen-Edit-2509-Upscale-LoRA",
159
- "weights": "qwen-edit-enhance_64-v3_000001000.safetensors",
160
- "trigger_word": "Upscale the image"
161
- },
162
- ]
163
-
164
- # =========================================
165
- # MODEL SETUP
166
- # =========================================
167
-
168
- dtype = torch.bfloat16
169
- device = "cuda" if torch.cuda.is_available() else "cpu"
170
- base_model = "Qwen/Qwen-Image-Edit-2509"
171
-
172
- print(f"Loading {base_model} pipeline...")
173
-
174
- # Initialize Pipeline
175
- pipe = QwenImageEditPlusPipeline.from_pretrained(
176
- base_model,
177
- transformer=QwenImageTransformer2DModel.from_pretrained(
178
- "linoyts/Qwen-Image-Edit-Rapid-AIO",
179
- subfolder='transformer',
180
- torch_dtype=dtype,
181
- device_map=device
182
- ),
183
- torch_dtype=dtype,
184
- ).to(device)
185
-
186
- # Apply Optimization
187
- try:
188
- print("Applying FA3 Processor...")
189
- pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
190
- print("Optimization applied successfully.")
191
- except Exception as e:
192
- print(f"Optimization warning: {e}. Continuing with standard pipeline.")
193
-
194
- MAX_SEED = np.iinfo(np.int32).max
195
-
196
- # =========================================
197
- # HELPER FUNCTIONS
198
- # =========================================
199
-
200
- class calculateDuration:
201
- def __init__(self, activity_name=""):
202
- self.activity_name = activity_name
203
-
204
- def __enter__(self):
205
- self.start_time = time.time()
206
- return self
207
-
208
- def __exit__(self, exc_type, exc_value, traceback):
209
- self.end_time = time.time()
210
- self.elapsed_time = self.end_time - self.start_time
211
- if self.activity_name:
212
- print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
213
- else:
214
- print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
215
-
216
- def update_dimensions_on_upload(image):
217
- if image is None:
218
- return 1024, 1024
219
-
220
- original_width, original_height = image.size
221
-
222
- if original_width > original_height:
223
- new_width = 1024
224
- aspect_ratio = original_height / original_width
225
- new_height = int(new_width * aspect_ratio)
226
- else:
227
- new_height = 1024
228
- aspect_ratio = original_width / original_height
229
- new_width = int(new_height * aspect_ratio)
230
-
231
- # Ensure dimensions are multiples of 8 (Qwen requirement)
232
- new_width = (new_width // 8) * 8
233
- new_height = (new_height // 8) * 8
234
-
235
- return new_width, new_height
236
-
237
- def update_selection(evt: gr.SelectData, current_prompt):
238
- selected_lora = loras[evt.index]
239
- trigger = selected_lora.get("trigger_word", "")
240
- new_placeholder = f"Type a prompt for {selected_lora['title']}"
241
- lora_repo = selected_lora["repo"]
242
- updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
243
-
244
- # Append trigger word to prompt if not present
245
- new_prompt = current_prompt
246
- if trigger and trigger not in current_prompt:
247
- if current_prompt:
248
- new_prompt = f"{trigger} {current_prompt}"
249
- else:
250
- new_prompt = trigger
251
-
252
- return (
253
- gr.update(placeholder=new_placeholder, value=new_prompt),
254
- updated_text,
255
- evt.index
256
- )
257
-
258
- def check_custom_model(link):
259
- if link.startswith("https://"):
260
- if "huggingface.co" in link:
261
- parts = link.split("huggingface.co/")
262
- repo_part = parts[1].strip()
263
- return repo_part, link
264
- return link, link
265
-
266
- # =========================================
267
- # INFERENCE LOGIC
268
- # =========================================
269
-
270
- @spaces.GPU(duration=60)
271
- def run_lora(
272
- input_image,
273
- prompt,
274
- steps,
275
- guidance_scale,
276
- selected_index,
277
- randomize_seed,
278
- seed,
279
- custom_lora_path,
280
- progress=gr.Progress(track_tqdm=True)
281
- ):
282
- if input_image is None:
283
- raise gr.Error("Input image is required for Qwen Image Edit.")
284
-
285
- # 1. Clean up previous LoRAs
286
- with calculateDuration("Unloading LoRA"):
287
- try:
288
- pipe.unload_lora_weights()
289
- except Exception:
290
- pass
291
-
292
- # 2. Determine which LoRA to load
293
- lora_repo = None
294
- weight_name = None
295
- adapter_name = "default"
296
-
297
- if custom_lora_path and custom_lora_path.strip() != "":
298
- repo, link = check_custom_model(custom_lora_path)
299
- lora_repo = repo
300
- print(f"Attempting to load custom LoRA: {lora_repo}")
301
- elif selected_index is not None and selected_index < len(loras):
302
- selected_lora = loras[selected_index]
303
- lora_repo = selected_lora["repo"]
304
- weight_name = selected_lora.get("weights", None)
305
- print(f"Loading Gallery LoRA: {selected_lora['title']}")
306
- else:
307
- print("No LoRA selected. Running Base Model.")
308
-
309
- # 3. Load LoRA
310
- if lora_repo:
311
- with calculateDuration(f"Loading LoRA weights"):
312
- try:
313
- pipe.load_lora_weights(
314
- lora_repo,
315
- weight_name=weight_name,
316
- adapter_name=adapter_name
317
- )
318
- pipe.set_adapters([adapter_name], adapter_weights=[1.0])
319
- except Exception as e:
320
- print(f"Error loading LoRA: {e}")
321
- gr.Warning(f"Failed to load LoRA {lora_repo}. Generating with base model.")
322
-
323
- # 4. Prepare Seed
324
- if randomize_seed:
325
- seed = random.randint(0, MAX_SEED)
326
- generator = torch.Generator(device=device).manual_seed(seed)
327
-
328
- # 5. Process Image
329
- original_image = input_image.convert("RGB")
330
- width, height = update_dimensions_on_upload(original_image)
331
-
332
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
333
-
334
- # 6. Generate
335
- with calculateDuration("Generating image"):
336
- final_image = pipe(
337
- image=original_image,
338
- prompt=prompt,
339
- negative_prompt=negative_prompt,
340
- height=height,
341
- width=width,
342
- num_inference_steps=int(steps),
343
- true_cfg_scale=guidance_scale,
344
- generator=generator,
345
- ).images[0]
346
-
347
- return final_image, seed, gr.update(visible=False)
348
-
349
- def add_custom_lora(custom_path):
350
- if not custom_path:
351
- return gr.update(visible=False), None, "No custom LoRA"
352
-
353
- repo, _ = check_custom_model(custom_path)
354
- card_html = f'''
355
- <div class="custom_lora_card" style="border:1px solid #ccc; padding:10px; border-radius:8px; margin-top:10px;">
356
- <h3>Custom LoRA Active</h3>
357
- <code>{repo}</code>
358
- </div>
359
- '''
360
- return gr.update(visible=True, value=card_html), None, repo
361
-
362
- def remove_custom_lora():
363
- return gr.update(visible=False), None, ""
364
-
365
- # =========================================
366
- # UI CONSTRUCTION
367
- # =========================================
368
-
369
- css = '''
370
- #gen_btn{height: 100%}
371
- #gen_column{align-self: stretch}
372
- #title{text-align: center}
373
- #title h1{font-size: 3em; display:inline-flex; align-items:center}
374
- #title img{width: 100px; margin-right: 0.5em}
375
- #gallery .grid-wrap{height: 15vh}
376
- #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
377
- .card_internal{display: flex;height: 100px;margin-top: .5em}
378
- .card_internal img{margin-right: 1em}
379
- #progress{height:30px}
380
- '''
381
-
382
- with gr.Blocks(delete_cache=(60, 60)) as demo:
383
- title = gr.HTML(
384
- """<h1>Qwen Image Edit 2.5 DLC 🧪</h1>""",
385
- elem_id="title",
386
- )
387
-
388
- selected_index = gr.State(None)
389
- custom_lora_state = gr.State("")
390
-
391
- with gr.Row():
392
- with gr.Column(scale=3):
393
- prompt = gr.Textbox(label="Edit Prompt", lines=2, placeholder="✦︎ Select a LoRA from the gallery below and describe the edit...")
394
-
395
- with gr.Column(scale=1, elem_id="gen_column"):
396
- generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
397
-
398
- with gr.Row():
399
- input_image = gr.Image(label="Upload Input Image (Required)", type="pil", height=300)
400
-
401
- with gr.Row():
402
- with gr.Column():
403
- selected_info = gr.Markdown("### No LoRA Selected (Base Model)")
404
-
405
- gallery = gr.Gallery(
406
- [(item.get("image", ""), item["title"]) for item in loras],
407
- label="Available LoRAs",
408
- allow_preview=False,
409
- columns=4,
410
- elem_id="gallery",
411
- )
412
-
413
- with gr.Group():
414
- custom_lora_input = gr.Textbox(label="Load Custom LoRA", placeholder="Enter HuggingFace Repo ID (e.g. autoweeb/Qwen-Image-Edit-...)")
415
- gr.Markdown("[See compatible Qwen LoRAs](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509)", elem_id="lora_list")
416
-
417
- custom_lora_info = gr.HTML(visible=False)
418
- remove_custom_btn = gr.Button("Remove Custom LoRA", visible=False)
419
-
420
- with gr.Column():
421
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
422
- result = gr.Image(label="Edited Image", format="png", height=630)
423
-
424
- with gr.Row():
425
- with gr.Accordion("Advanced Settings", open=False):
426
- with gr.Column():
427
- with gr.Row():
428
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
429
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=4)
430
-
431
- with gr.Row():
432
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
433
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
434
-
435
- # Event Wiring
436
- gallery.select(
437
- update_selection,
438
- inputs=[prompt],
439
- outputs=[prompt, selected_info, selected_index]
440
- )
441
-
442
- custom_lora_input.submit(
443
- add_custom_lora,
444
- inputs=[custom_lora_input],
445
- outputs=[custom_lora_info, selected_index, custom_lora_state]
446
- ).then(
447
- lambda: gr.update(visible=True), outputs=[remove_custom_btn]
448
- )
449
-
450
- remove_custom_btn.click(
451
- remove_custom_lora,
452
- outputs=[custom_lora_info, selected_index, custom_lora_state]
453
- ).then(
454
- lambda: gr.update(visible=False), outputs=[remove_custom_btn]
455
- ).then(
456
- lambda: "", outputs=[custom_lora_input]
457
- )
458
-
459
- gr.on(
460
- triggers=[generate_button.click, prompt.submit],
461
- fn=run_lora,
462
- inputs=[input_image, prompt, steps, guidance_scale, selected_index, randomize_seed, seed, custom_lora_state],
463
- outputs=[result, seed, progress_bar]
464
- )
465
-
466
- demo.queue()
467
- demo.launch(theme=orange_red_theme, css=css, mcp_server=True, ssr_mode=False, show_error=True)