prithivMLmods commited on
Commit
8b08c36
Β·
verified Β·
1 Parent(s): f2ae792

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +357 -0
app.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import gradio as gr
4
+ import numpy as np
5
+ import spaces
6
+ import torch
7
+ import random
8
+ from PIL import Image
9
+ from typing import Iterable
10
+ from gradio.themes import Soft
11
+ from gradio.themes.utils import colors, fonts, sizes
12
+
13
+ colors.orange_red = colors.Color(
14
+ name="orange_red",
15
+ c50="#FFF0E5",
16
+ c100="#FFE0CC",
17
+ c200="#FFC299",
18
+ c300="#FFA366",
19
+ c400="#FF8533",
20
+ c500="#FF4500",
21
+ c600="#E63E00",
22
+ c700="#CC3700",
23
+ c800="#B33000",
24
+ c900="#992900",
25
+ c950="#802200",
26
+ )
27
+
28
+ class OrangeRedTheme(Soft):
29
+ def __init__(
30
+ self,
31
+ *,
32
+ primary_hue: colors.Color | str = colors.gray,
33
+ secondary_hue: colors.Color | str = colors.orange_red,
34
+ neutral_hue: colors.Color | str = colors.slate,
35
+ text_size: sizes.Size | str = sizes.text_lg,
36
+ font: fonts.Font | str | Iterable[fonts.Font | str] = (
37
+ fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
38
+ ),
39
+ font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
40
+ fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
41
+ ),
42
+ ):
43
+ super().__init__(
44
+ primary_hue=primary_hue,
45
+ secondary_hue=secondary_hue,
46
+ neutral_hue=neutral_hue,
47
+ text_size=text_size,
48
+ font=font,
49
+ font_mono=font_mono,
50
+ )
51
+ super().set(
52
+ background_fill_primary="*primary_50",
53
+ background_fill_primary_dark="*primary_900",
54
+ body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
55
+ body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
56
+ button_primary_text_color="white",
57
+ button_primary_text_color_hover="white",
58
+ button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
59
+ button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
60
+ button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
61
+ button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
62
+ button_secondary_text_color="black",
63
+ button_secondary_text_color_hover="white",
64
+ button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
65
+ button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
66
+ button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
67
+ button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
68
+ slider_color="*secondary_500",
69
+ slider_color_dark="*secondary_600",
70
+ block_title_text_weight="600",
71
+ block_border_width="3px",
72
+ block_shadow="*shadow_drop_lg",
73
+ button_primary_shadow="*shadow_drop_lg",
74
+ button_large_padding="11px",
75
+ color_accent_soft="*primary_100",
76
+ block_label_background_fill="*primary_200",
77
+ )
78
+
79
+ orange_red_theme = OrangeRedTheme()
80
+
81
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
+
83
+ print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
84
+ print("torch.__version__ =", torch.__version__)
85
+ print("Using device:", device)
86
+
87
+ from diffusers import FlowMatchEulerDiscreteScheduler
88
+ from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
89
+ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
90
+ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
91
+
92
+ dtype = torch.bfloat16
93
+
94
+ pipe = QwenImageEditPlusPipeline.from_pretrained(
95
+ "Qwen/Qwen-Image-Edit-2511",
96
+ transformer=QwenImageTransformer2DModel.from_pretrained(
97
+ "linoyts/Qwen-Image-Edit-Rapid-AIO",
98
+ subfolder='transformer',
99
+ torch_dtype=dtype,
100
+ device_map='cuda'
101
+ ),
102
+ torch_dtype=dtype
103
+ ).to(device)
104
+
105
+ try:
106
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
107
+ print("Flash Attention 3 Processor set successfully.")
108
+ except Exception as e:
109
+ print(f"Warning: Could not set FA3 processor: {e}")
110
+
111
+ MAX_SEED = np.iinfo(np.int32).max
112
+
113
+ ADAPTER_SPECS = {
114
+ "Multiple-Angles": {
115
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Anime",
116
+ "weights": " Qwen-Image-Edit-2511-Anime-2000.safetensors",
117
+ "adapter_name": "multiple-angles"
118
+ },
119
+ "Photo-to-Anime": {
120
+ "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
121
+ "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
122
+ "adapter_name": "photo-to-anime"
123
+ },
124
+ "Any-Pose": {
125
+ "repo": "lilylilith/AnyPose",
126
+ "weights": "2511-AnyPose-helper-00006000.safetensors",
127
+ "adapter_name": "any-pose"
128
+ },
129
+ "Light-Migration": {
130
+ "repo": "dx8152/Qwen-Edit-2509-Light-Migration",
131
+ "weights": "参��色调.safetensors",
132
+ "adapter_name": "light-migration"
133
+ },
134
+ "Upscaler": {
135
+ "repo": "starsfriday/Qwen-Image-Edit-2511-Upscale2K",
136
+ "weights": "qwen_image_edit_2511_upscale.safetensors",
137
+ "adapter_name": "upscale-2k"
138
+ },
139
+ "Style-Transfer": {
140
+ "repo": "zooeyy/Style-Transfer",
141
+ "weights": "Style Transfer-Alpha-V0.1.safetensors",
142
+ "adapter_name": "style-transfer"
143
+ },
144
+ "Manga-Tone": {
145
+ "repo": "nappa114514/Qwen-Image-Edit-2509-Manga-Tone",
146
+ "weights": "tone001.safetensors",
147
+ "adapter_name": "manga-tone"
148
+ },
149
+ }
150
+
151
+ LOADED_ADAPTERS = set()
152
+
153
+ def update_dimensions_on_upload(image):
154
+ if image is None:
155
+ return 1024, 1024
156
+
157
+ original_width, original_height = image.size
158
+
159
+ if original_width > original_height:
160
+ new_width = 1024
161
+ aspect_ratio = original_height / original_width
162
+ new_height = int(new_width * aspect_ratio)
163
+ else:
164
+ new_height = 1024
165
+ aspect_ratio = original_width / original_height
166
+ new_width = int(new_height * aspect_ratio)
167
+
168
+ new_width = (new_width // 8) * 8
169
+ new_height = (new_height // 8) * 8
170
+
171
+ return new_width, new_height
172
+
173
+ @spaces.GPU
174
+ def infer(
175
+ images,
176
+ prompt,
177
+ lora_adapter,
178
+ seed,
179
+ randomize_seed,
180
+ guidance_scale,
181
+ steps,
182
+ progress=gr.Progress(track_tqdm=True)
183
+ ):
184
+ gc.collect()
185
+ torch.cuda.empty_cache()
186
+
187
+ if not images:
188
+ raise gr.Error("Please upload at least one image to edit.")
189
+
190
+ pil_images = []
191
+ if images is not None:
192
+ for item in images:
193
+ try:
194
+ if isinstance(item, tuple) or isinstance(item, list):
195
+ path_or_img = item[0]
196
+ else:
197
+ path_or_img = item
198
+
199
+ if isinstance(path_or_img, str):
200
+ pil_images.append(Image.open(path_or_img).convert("RGB"))
201
+ elif isinstance(path_or_img, Image.Image):
202
+ pil_images.append(path_or_img.convert("RGB"))
203
+ else:
204
+ pil_images.append(Image.open(path_or_img.name).convert("RGB"))
205
+ except Exception as e:
206
+ print(f"Skipping invalid image item: {e}")
207
+ continue
208
+
209
+ if not pil_images:
210
+ raise gr.Error("Could not process uploaded images.")
211
+
212
+ spec = ADAPTER_SPECS.get(lora_adapter)
213
+ if not spec:
214
+ raise gr.Error(f"Configuration not found for: {lora_adapter}")
215
+
216
+ adapter_name = spec["adapter_name"]
217
+
218
+ if adapter_name not in LOADED_ADAPTERS:
219
+ print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
220
+ try:
221
+ pipe.load_lora_weights(
222
+ spec["repo"],
223
+ weight_name=spec["weights"],
224
+ adapter_name=adapter_name
225
+ )
226
+ LOADED_ADAPTERS.add(adapter_name)
227
+ except Exception as e:
228
+ raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
229
+ else:
230
+ print(f"--- Adapter {lora_adapter} is already loaded. ---")
231
+
232
+ pipe.set_adapters([adapter_name], adapter_weights=[1.0])
233
+
234
+ if randomize_seed:
235
+ seed = random.randint(0, MAX_SEED)
236
+
237
+ generator = torch.Generator(device=device).manual_seed(seed)
238
+ negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
239
+
240
+ width, height = update_dimensions_on_upload(pil_images[0])
241
+
242
+ try:
243
+ result_image = pipe(
244
+ image=pil_images,
245
+ prompt=prompt,
246
+ negative_prompt=negative_prompt,
247
+ height=height,
248
+ width=width,
249
+ num_inference_steps=steps,
250
+ generator=generator,
251
+ true_cfg_scale=guidance_scale,
252
+ ).images[0]
253
+
254
+ return result_image, seed
255
+
256
+ except Exception as e:
257
+ raise e
258
+ finally:
259
+ gc.collect()
260
+ torch.cuda.empty_cache()
261
+
262
+ @spaces.GPU
263
+ def infer_example(images, prompt, lora_adapter):
264
+ if not images:
265
+ return None, 0
266
+
267
+ if isinstance(images, str):
268
+ images_list = [images]
269
+ else:
270
+ images_list = images
271
+
272
+ result, seed = infer(
273
+ images=images_list,
274
+ prompt=prompt,
275
+ lora_adapter=lora_adapter,
276
+ seed=0,
277
+ randomize_seed=True,
278
+ guidance_scale=1.0,
279
+ steps=4
280
+ )
281
+ return result, seed
282
+
283
+ css="""
284
+ #col-container {
285
+ margin: 0 auto;
286
+ max-width: 1000px;
287
+ }
288
+ #main-title h1 {font-size: 2.3em !important;}
289
+ """
290
+
291
+ with gr.Blocks() as demo:
292
+ with gr.Column(elem_id="col-container"):
293
+ gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
294
+ gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters. Upload one or more images.")
295
+
296
+ with gr.Row(equal_height=True):
297
+ with gr.Column():
298
+ images = gr.Gallery(
299
+ label="Upload Images",
300
+ type="filepath",
301
+ columns=2,
302
+ rows=1,
303
+ height=300,
304
+ allow_preview=True
305
+ )
306
+
307
+ prompt = gr.Text(
308
+ label="Edit Prompt",
309
+ show_label=True,
310
+ placeholder="e.g., transform into anime..",
311
+ )
312
+
313
+ run_button = gr.Button("Edit Image", variant="primary")
314
+
315
+ with gr.Column():
316
+ output_image = gr.Image(label="Output Image", interactive=False, format="png", height=363)
317
+
318
+ with gr.Row():
319
+ lora_adapter = gr.Dropdown(
320
+ label="Choose Editing Style",
321
+ choices=list(ADAPTER_SPECS.keys()),
322
+ value="Photo-to-Anime"
323
+ )
324
+
325
+ with gr.Accordion("Advanced Settings", open=False, visible=False):
326
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
327
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
328
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
329
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
330
+
331
+ gr.Examples(
332
+ examples=[
333
+ [["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime"],
334
+ [["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
335
+ [["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler"],
336
+ [["examples/MT.jpg"], "Paint with manga tone.", "Manga-Tone"],
337
+ [["examples/ST1.jpg", "examples/ST2.jpg"], "Convert Image 1 to the style of Image 2.", "Style-Transfer"],
338
+ [["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
339
+ [["examples/P1.jpg", "examples/P2.jpg"], "Make the person in image 1 do the exact same pose of the person in image 2. Changing the style and background of the image of the person in image 1 is undesirable, so don't do it.", "Any-Pose"],
340
+ ],
341
+ inputs=[images, prompt, lora_adapter],
342
+ outputs=[output_image, seed],
343
+ fn=infer_example,
344
+ cache_examples=False,
345
+ label="Examples"
346
+ )
347
+
348
+ gr.Markdown("[*](https://huggingface.co/spaces/prithivMLmods/Qwen-Image-Edit-2511-LoRAs-Fast)This is still an experimental Space for Qwen-Image-Edit-2511.")
349
+
350
+ run_button.click(
351
+ fn=infer,
352
+ inputs=[images, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
353
+ outputs=[output_image, seed]
354
+ )
355
+
356
+ if __name__ == "__main__":
357
+ demo.queue(max_size=30).launch(css=css, theme=orange_red_theme, mcp_server=True, ssr_mode=False, show_error=True)