Spaces:
Runtime error
Runtime error
Refactor app.py to comment out unused imports and disable stage2-only generation, while updating the inference output structure to return only the combined result. Adjust UI elements for clarity and maintainability.
Browse files
app.py
CHANGED
|
@@ -6,10 +6,10 @@ import spaces
|
|
| 6 |
|
| 7 |
from PIL import Image
|
| 8 |
from diffusers import FlowMatchEulerDiscreteScheduler, QwenImageEditPlusPipeline
|
| 9 |
-
from optimization import optimize_pipeline_
|
| 10 |
-
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
|
| 11 |
-
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 12 |
-
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 13 |
|
| 14 |
import math
|
| 15 |
import os
|
|
@@ -68,12 +68,12 @@ pipe.load_lora_weights(STAGE1_LORA_REPO, weight_name=STAGE1_LORA_WEIGHT, adapter
|
|
| 68 |
# Load Stage 2 LoRA
|
| 69 |
pipe.load_lora_weights(STAGE2_LORA_REPO, weight_name=STAGE2_LORA_WEIGHT, adapter_name="stage2")
|
| 70 |
|
| 71 |
-
# Apply the same optimizations from the first version
|
| 72 |
-
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 73 |
-
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 74 |
|
| 75 |
-
# --- Ahead-of-time compilation ---
|
| 76 |
-
optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
|
| 77 |
|
| 78 |
# --- UI Constants ---
|
| 79 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -93,7 +93,7 @@ def infer(
|
|
| 93 |
progress=gr.Progress(track_tqdm=True),
|
| 94 |
):
|
| 95 |
"""
|
| 96 |
-
Run
|
| 97 |
|
| 98 |
Parameters:
|
| 99 |
image: Input image (PIL Image or path string).
|
|
@@ -108,7 +108,7 @@ def infer(
|
|
| 108 |
progress: Gradio progress callback.
|
| 109 |
|
| 110 |
Returns:
|
| 111 |
-
tuple: (
|
| 112 |
"""
|
| 113 |
|
| 114 |
# Hardcode the negative prompt
|
|
@@ -117,8 +117,8 @@ def infer(
|
|
| 117 |
if randomize_seed:
|
| 118 |
seed = random.randint(0, MAX_SEED)
|
| 119 |
|
| 120 |
-
|
| 121 |
-
|
| 122 |
|
| 123 |
# Load input image into PIL Image
|
| 124 |
pil_image = None
|
|
@@ -131,27 +131,27 @@ def infer(
|
|
| 131 |
if height==256 and width==256:
|
| 132 |
height, width = None, None
|
| 133 |
|
| 134 |
-
# Stage2-only generation
|
| 135 |
-
print("Generating with Stage2 LoRA only...")
|
| 136 |
-
print(f"Prompt: '{STAGE2_PROMPT}'")
|
| 137 |
-
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
| 138 |
-
print("LoRA Weights - Stage2: 1.0")
|
| 139 |
-
|
| 140 |
-
pipe.set_adapters(["stage2"], adapter_weights=[1.0])
|
| 141 |
-
stage2_images = pipe(
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
).images
|
| 152 |
-
stage2_only_image = stage2_images[0] if stage2_images else None
|
| 153 |
-
|
| 154 |
-
#
|
| 155 |
print(f"Generating with combined LoRAs...")
|
| 156 |
print(f"Prompt: '{STAGE1_PROMPT}'")
|
| 157 |
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
|
@@ -167,7 +167,7 @@ def infer(
|
|
| 167 |
width=width,
|
| 168 |
negative_prompt=negative_prompt,
|
| 169 |
num_inference_steps=num_inference_steps,
|
| 170 |
-
generator=
|
| 171 |
true_cfg_scale=true_guidance_scale,
|
| 172 |
num_images_per_prompt=1,
|
| 173 |
).images
|
|
@@ -179,10 +179,12 @@ def infer(
|
|
| 179 |
if pil_image.size != generated_image.size:
|
| 180 |
pil_image = pil_image.resize(generated_image.size, Image.Resampling.LANCZOS)
|
| 181 |
blended_image = Image.blend(pil_image, generated_image, alpha=0.75)
|
| 182 |
-
return stage2_only_image, blended_image, seed
|
|
|
|
| 183 |
|
| 184 |
# Return first result image and seed
|
| 185 |
-
return stage2_only_image, result_images[0] if result_images else None, seed
|
|
|
|
| 186 |
|
| 187 |
# --- Examples and UI Layout ---
|
| 188 |
examples = []
|
|
@@ -201,7 +203,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 201 |
with gr.Column(elem_id="col-container"):
|
| 202 |
gr.HTML("""
|
| 203 |
<div id="logo-title">
|
| 204 |
-
<h1>🎨✨
|
| 205 |
<h3 style="color: #5b47d1;">Anime Character Converter with Combined LoRAs</h3>
|
| 206 |
</div>
|
| 207 |
""")
|
|
@@ -264,14 +266,13 @@ with gr.Blocks(css=css) as demo:
|
|
| 264 |
</script>
|
| 265 |
""")
|
| 266 |
|
| 267 |
-
with gr.
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
stage2_result = gr.Image(label="Result1", show_label=False, type="pil", interactive=False, height=350)
|
| 271 |
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
|
| 276 |
run_button = gr.Button("🚀 Generate", variant="primary", size="lg")
|
| 277 |
|
|
@@ -351,7 +352,8 @@ with gr.Blocks(css=css) as demo:
|
|
| 351 |
stage1_weight,
|
| 352 |
stage2_weight,
|
| 353 |
],
|
| 354 |
-
outputs=[stage2_result, result, seed],
|
|
|
|
| 355 |
)
|
| 356 |
|
| 357 |
if __name__ == "__main__":
|
|
|
|
| 6 |
|
| 7 |
from PIL import Image
|
| 8 |
from diffusers import FlowMatchEulerDiscreteScheduler, QwenImageEditPlusPipeline
|
| 9 |
+
# from optimization import optimize_pipeline_
|
| 10 |
+
# from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
|
| 11 |
+
# from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 12 |
+
# from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 13 |
|
| 14 |
import math
|
| 15 |
import os
|
|
|
|
| 68 |
# Load Stage 2 LoRA
|
| 69 |
pipe.load_lora_weights(STAGE2_LORA_REPO, weight_name=STAGE2_LORA_WEIGHT, adapter_name="stage2")
|
| 70 |
|
| 71 |
+
# # Apply the same optimizations from the first version
|
| 72 |
+
# pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 73 |
+
# pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 74 |
|
| 75 |
+
# # --- Ahead-of-time compilation ---
|
| 76 |
+
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
|
| 77 |
|
| 78 |
# --- UI Constants ---
|
| 79 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 93 |
progress=gr.Progress(track_tqdm=True),
|
| 94 |
):
|
| 95 |
"""
|
| 96 |
+
Run single inference with combined LoRAs: Lightning + Stage1 + Stage2.
|
| 97 |
|
| 98 |
Parameters:
|
| 99 |
image: Input image (PIL Image or path string).
|
|
|
|
| 108 |
progress: Gradio progress callback.
|
| 109 |
|
| 110 |
Returns:
|
| 111 |
+
tuple: (result_image, seed_used)
|
| 112 |
"""
|
| 113 |
|
| 114 |
# Hardcode the negative prompt
|
|
|
|
| 117 |
if randomize_seed:
|
| 118 |
seed = random.randint(0, MAX_SEED)
|
| 119 |
|
| 120 |
+
# Set up the generator for reproducibility
|
| 121 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 122 |
|
| 123 |
# Load input image into PIL Image
|
| 124 |
pil_image = None
|
|
|
|
| 131 |
if height==256 and width==256:
|
| 132 |
height, width = None, None
|
| 133 |
|
| 134 |
+
# --- Stage2-only generation (disabled) ---
|
| 135 |
+
# print("Generating with Stage2 LoRA only...")
|
| 136 |
+
# print(f"Prompt: '{STAGE2_PROMPT}'")
|
| 137 |
+
# print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
| 138 |
+
# print("LoRA Weights - Stage2: 1.0")
|
| 139 |
+
#
|
| 140 |
+
# pipe.set_adapters(["stage2"], adapter_weights=[1.0])
|
| 141 |
+
# stage2_images = pipe(
|
| 142 |
+
# image=[pil_image] if pil_image is not None else None,
|
| 143 |
+
# prompt=STAGE2_PROMPT,
|
| 144 |
+
# height=height,
|
| 145 |
+
# width=width,
|
| 146 |
+
# negative_prompt=negative_prompt,
|
| 147 |
+
# num_inference_steps=num_inference_steps,
|
| 148 |
+
# generator=generator,
|
| 149 |
+
# true_cfg_scale=true_guidance_scale,
|
| 150 |
+
# num_images_per_prompt=1,
|
| 151 |
+
# ).images
|
| 152 |
+
# stage2_only_image = stage2_images[0] if stage2_images else None
|
| 153 |
+
#
|
| 154 |
+
# --- Combined generation ---
|
| 155 |
print(f"Generating with combined LoRAs...")
|
| 156 |
print(f"Prompt: '{STAGE1_PROMPT}'")
|
| 157 |
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
|
|
|
|
| 167 |
width=width,
|
| 168 |
negative_prompt=negative_prompt,
|
| 169 |
num_inference_steps=num_inference_steps,
|
| 170 |
+
generator=generator,
|
| 171 |
true_cfg_scale=true_guidance_scale,
|
| 172 |
num_images_per_prompt=1,
|
| 173 |
).images
|
|
|
|
| 179 |
if pil_image.size != generated_image.size:
|
| 180 |
pil_image = pil_image.resize(generated_image.size, Image.Resampling.LANCZOS)
|
| 181 |
blended_image = Image.blend(pil_image, generated_image, alpha=0.75)
|
| 182 |
+
# return stage2_only_image, blended_image, seed
|
| 183 |
+
return blended_image, seed
|
| 184 |
|
| 185 |
# Return first result image and seed
|
| 186 |
+
# return stage2_only_image, result_images[0] if result_images else None, seed
|
| 187 |
+
return result_images[0] if result_images else None, seed
|
| 188 |
|
| 189 |
# --- Examples and UI Layout ---
|
| 190 |
examples = []
|
|
|
|
| 203 |
with gr.Column(elem_id="col-container"):
|
| 204 |
gr.HTML("""
|
| 205 |
<div id="logo-title">
|
| 206 |
+
<h1>🎨✨ Qwen Image Edit 2509 - Visualize Body Structure Lines</h1>
|
| 207 |
<h3 style="color: #5b47d1;">Anime Character Converter with Combined LoRAs</h3>
|
| 208 |
</div>
|
| 209 |
""")
|
|
|
|
| 266 |
</script>
|
| 267 |
""")
|
| 268 |
|
| 269 |
+
# with gr.Column(scale=1):
|
| 270 |
+
# gr.Markdown("### 🧪 Result1")
|
| 271 |
+
# stage2_result = gr.Image(label="Result1", show_label=False, type="pil", interactive=False, height=350)
|
|
|
|
| 272 |
|
| 273 |
+
with gr.Column(scale=1):
|
| 274 |
+
gr.Markdown("### 📤 Result2")
|
| 275 |
+
result = gr.Image(label="Result2", show_label=False, type="pil", interactive=False, height=350)
|
| 276 |
|
| 277 |
run_button = gr.Button("🚀 Generate", variant="primary", size="lg")
|
| 278 |
|
|
|
|
| 352 |
stage1_weight,
|
| 353 |
stage2_weight,
|
| 354 |
],
|
| 355 |
+
# outputs=[stage2_result, result, seed],
|
| 356 |
+
outputs=[result, seed],
|
| 357 |
)
|
| 358 |
|
| 359 |
if __name__ == "__main__":
|