yeq6x commited on
Commit
b44c75f
·
1 Parent(s): 4a246ca

Enhance inference process in app.py to support stage2-only generation, updating the output structure to return both stage2 and combined results. Adjust UI elements for improved clarity and maintainability.

Browse files
Files changed (1) hide show
  1. app.py +28 -31
app.py CHANGED
@@ -93,7 +93,7 @@ def infer(
93
  progress=gr.Progress(track_tqdm=True),
94
  ):
95
  """
96
- Run single inference with combined LoRAs: Lightning + Stage1 + Stage2.
97
 
98
  Parameters:
99
  image: Input image (PIL Image or path string).
@@ -108,7 +108,7 @@ def infer(
108
  progress: Gradio progress callback.
109
 
110
  Returns:
111
- tuple: (result_image, seed_used)
112
  """
113
 
114
  # Hardcode the negative prompt
@@ -131,26 +131,26 @@ def infer(
131
  if height==256 and width==256:
132
  height, width = None, None
133
 
134
- # --- Stage2-only generation (disabled) ---
135
- # print("Generating with Stage2 LoRA only...")
136
- # print(f"Prompt: '{STAGE2_PROMPT}'")
137
- # print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
138
- # print("LoRA Weights - Stage2: 1.0")
139
- #
140
- # pipe.set_adapters(["stage2"], adapter_weights=[1.0])
141
- # stage2_images = pipe(
142
- # image=[pil_image] if pil_image is not None else None,
143
- # prompt=STAGE2_PROMPT,
144
- # height=height,
145
- # width=width,
146
- # negative_prompt=negative_prompt,
147
- # num_inference_steps=num_inference_steps,
148
- # generator=generator,
149
- # true_cfg_scale=true_guidance_scale,
150
- # num_images_per_prompt=1,
151
- # ).images
152
- # stage2_only_image = stage2_images[0] if stage2_images else None
153
- #
154
  # --- Combined generation ---
155
  print(f"Generating with combined LoRAs...")
156
  print(f"Prompt: '{STAGE1_PROMPT}'")
@@ -179,12 +179,10 @@ def infer(
179
  if pil_image.size != generated_image.size:
180
  pil_image = pil_image.resize(generated_image.size, Image.Resampling.LANCZOS)
181
  blended_image = Image.blend(pil_image, generated_image, alpha=0.75)
182
- # return stage2_only_image, blended_image, seed
183
- return blended_image, seed
184
 
185
  # Return first result image and seed
186
- # return stage2_only_image, result_images[0] if result_images else None, seed
187
- return result_images[0] if result_images else None, seed
188
 
189
  # --- Examples and UI Layout ---
190
  examples = []
@@ -266,9 +264,9 @@ with gr.Blocks(css=css) as demo:
266
  </script>
267
  """)
268
 
269
- # with gr.Column(scale=1):
270
- # gr.Markdown("### 🧪 Result1")
271
- # stage2_result = gr.Image(label="Result1", show_label=False, type="pil", interactive=False, height=350)
272
 
273
  with gr.Column(scale=1):
274
  gr.Markdown("### 📤 Result2")
@@ -352,8 +350,7 @@ with gr.Blocks(css=css) as demo:
352
  stage1_weight,
353
  stage2_weight,
354
  ],
355
- # outputs=[stage2_result, result, seed],
356
- outputs=[result, seed],
357
  )
358
 
359
  if __name__ == "__main__":
 
93
  progress=gr.Progress(track_tqdm=True),
94
  ):
95
  """
96
+ Run stage2-only inference, then combined LoRAs: Lightning + Stage1 + Stage2.
97
 
98
  Parameters:
99
  image: Input image (PIL Image or path string).
 
108
  progress: Gradio progress callback.
109
 
110
  Returns:
111
+ tuple: (stage2_only_image, result_image, seed_used)
112
  """
113
 
114
  # Hardcode the negative prompt
 
131
  if height==256 and width==256:
132
  height, width = None, None
133
 
134
+ # Stage2-only generation
135
+ print("Generating with Stage2 LoRA only...")
136
+ print(f"Prompt: '{STAGE2_PROMPT}'")
137
+ print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
138
+ print("LoRA Weights - Stage2: 1.0")
139
+
140
+ pipe.set_adapters(["stage2"], adapter_weights=[1.0])
141
+ stage2_images = pipe(
142
+ image=[pil_image] if pil_image is not None else None,
143
+ prompt=STAGE2_PROMPT,
144
+ height=height,
145
+ width=width,
146
+ negative_prompt=negative_prompt,
147
+ num_inference_steps=num_inference_steps,
148
+ generator=generator,
149
+ true_cfg_scale=true_guidance_scale,
150
+ num_images_per_prompt=1,
151
+ ).images
152
+ stage2_only_image = stage2_images[0] if stage2_images else None
153
+
154
  # --- Combined generation ---
155
  print(f"Generating with combined LoRAs...")
156
  print(f"Prompt: '{STAGE1_PROMPT}'")
 
179
  if pil_image.size != generated_image.size:
180
  pil_image = pil_image.resize(generated_image.size, Image.Resampling.LANCZOS)
181
  blended_image = Image.blend(pil_image, generated_image, alpha=0.75)
182
+ return stage2_only_image, blended_image, seed
 
183
 
184
  # Return first result image and seed
185
+ return stage2_only_image, result_images[0] if result_images else None, seed
 
186
 
187
  # --- Examples and UI Layout ---
188
  examples = []
 
264
  </script>
265
  """)
266
 
267
+ with gr.Column(scale=1):
268
+ gr.Markdown("### 🧪 Result1")
269
+ stage2_result = gr.Image(label="Result1", show_label=False, type="pil", interactive=False, height=350)
270
 
271
  with gr.Column(scale=1):
272
  gr.Markdown("### 📤 Result2")
 
350
  stage1_weight,
351
  stage2_weight,
352
  ],
353
+ outputs=[stage2_result, result, seed],
 
354
  )
355
 
356
  if __name__ == "__main__":