Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -101,7 +101,8 @@ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
|
|
| 101 |
adapter_name="relight")
|
| 102 |
|
| 103 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 104 |
-
|
|
|
|
| 105 |
|
| 106 |
MAX_SEED = np.iinfo(np.int32).max
|
| 107 |
|
|
@@ -112,6 +113,7 @@ def update_dimensions_on_upload(image):
|
|
| 112 |
|
| 113 |
original_width, original_height = image.size
|
| 114 |
|
|
|
|
| 115 |
if original_width > original_height:
|
| 116 |
new_width = 1024
|
| 117 |
aspect_ratio = original_height / original_width
|
|
@@ -121,7 +123,7 @@ def update_dimensions_on_upload(image):
|
|
| 121 |
aspect_ratio = original_width / original_height
|
| 122 |
new_width = int(new_height * aspect_ratio)
|
| 123 |
|
| 124 |
-
# Ensure dimensions are multiples of 8
|
| 125 |
new_width = (new_width // 8) * 8
|
| 126 |
new_height = (new_height // 8) * 8
|
| 127 |
|
|
@@ -159,9 +161,13 @@ def infer(
|
|
| 159 |
|
| 160 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 161 |
|
|
|
|
|
|
|
|
|
|
| 162 |
result = pipe(
|
| 163 |
image=input_image.convert("RGB"),
|
| 164 |
prompt=prompt,
|
|
|
|
| 165 |
height=height,
|
| 166 |
width=width,
|
| 167 |
num_inference_steps=steps,
|
|
@@ -172,12 +178,13 @@ def infer(
|
|
| 172 |
|
| 173 |
return result, seed, gr.Button(visible=True)
|
| 174 |
|
| 175 |
-
# Wrapper for examples
|
| 176 |
@spaces.GPU
|
| 177 |
-
def infer_example(
|
| 178 |
-
input_pil = Image.open(
|
| 179 |
width, height = update_dimensions_on_upload(input_pil)
|
| 180 |
-
|
|
|
|
| 181 |
return result, seed
|
| 182 |
|
| 183 |
# --- UI Layout ---
|
|
@@ -215,11 +222,11 @@ with gr.Blocks(css=css, theme=qwen_theme) as demo:
|
|
| 215 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 216 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 217 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 218 |
-
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=
|
| 219 |
-
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=
|
| 220 |
# Hidden sliders to hold image dimensions
|
| 221 |
-
height = gr.Slider(label="Height", minimum=256, maximum=
|
| 222 |
-
width = gr.Slider(label="Width", minimum=256, maximum=
|
| 223 |
|
| 224 |
with gr.Column():
|
| 225 |
output_image = gr.Image(label="Output Image", show_label=True, interactive=False, format="png", height=480)
|
|
@@ -235,7 +242,7 @@ with gr.Blocks(css=css, theme=qwen_theme) as demo:
|
|
| 235 |
inputs=[input_image, prompt, lora_adapter],
|
| 236 |
outputs=[output_image, seed],
|
| 237 |
fn=infer_example,
|
| 238 |
-
cache_examples=
|
| 239 |
label="Examples"
|
| 240 |
)
|
| 241 |
|
|
|
|
| 101 |
adapter_name="relight")
|
| 102 |
|
| 103 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 104 |
+
# It's recommended to run optimization after loading all weights
|
| 105 |
+
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
|
| 106 |
|
| 107 |
MAX_SEED = np.iinfo(np.int32).max
|
| 108 |
|
|
|
|
| 113 |
|
| 114 |
original_width, original_height = image.size
|
| 115 |
|
| 116 |
+
# Cap max dimension to 1024 while preserving aspect ratio
|
| 117 |
if original_width > original_height:
|
| 118 |
new_width = 1024
|
| 119 |
aspect_ratio = original_height / original_width
|
|
|
|
| 123 |
aspect_ratio = original_width / original_height
|
| 124 |
new_width = int(new_height * aspect_ratio)
|
| 125 |
|
| 126 |
+
# Ensure dimensions are multiples of 8 for model compatibility
|
| 127 |
new_width = (new_width // 8) * 8
|
| 128 |
new_height = (new_height // 8) * 8
|
| 129 |
|
|
|
|
| 161 |
|
| 162 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 163 |
|
| 164 |
+
# *** FIX: Added a negative prompt to enable classifier-free guidance ***
|
| 165 |
+
negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
|
| 166 |
+
|
| 167 |
result = pipe(
|
| 168 |
image=input_image.convert("RGB"),
|
| 169 |
prompt=prompt,
|
| 170 |
+
negative_prompt=negative_prompt, # This line enables CFG
|
| 171 |
height=height,
|
| 172 |
width=width,
|
| 173 |
num_inference_steps=steps,
|
|
|
|
| 178 |
|
| 179 |
return result, seed, gr.Button(visible=True)
|
| 180 |
|
| 181 |
+
# Wrapper for examples to handle file paths
|
| 182 |
@spaces.GPU
|
| 183 |
+
def infer_example(input_image_path, prompt, lora_adapter):
|
| 184 |
+
input_pil = Image.open(input_image_path).convert("RGB")
|
| 185 |
width, height = update_dimensions_on_upload(input_pil)
|
| 186 |
+
# Set default values for example inference
|
| 187 |
+
result, seed, _ = infer(input_pil, prompt, lora_adapter, 0, True, 4.0, 20, width, height)
|
| 188 |
return result, seed
|
| 189 |
|
| 190 |
# --- UI Layout ---
|
|
|
|
| 222 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 223 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 224 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 225 |
+
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=4.0)
|
| 226 |
+
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=20)
|
| 227 |
# Hidden sliders to hold image dimensions
|
| 228 |
+
height = gr.Slider(label="Height", minimum=256, maximum=1024, step=8, value=1024, visible=False)
|
| 229 |
+
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=8, value=1024, visible=False)
|
| 230 |
|
| 231 |
with gr.Column():
|
| 232 |
output_image = gr.Image(label="Output Image", show_label=True, interactive=False, format="png", height=480)
|
|
|
|
| 242 |
inputs=[input_image, prompt, lora_adapter],
|
| 243 |
outputs=[output_image, seed],
|
| 244 |
fn=infer_example,
|
| 245 |
+
cache_examples=False,
|
| 246 |
label="Examples"
|
| 247 |
)
|
| 248 |
|