Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
9665fa2
1
Parent(s):
059d188
add lora support
Browse files
app.py
CHANGED
|
@@ -157,18 +157,36 @@ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix",torch_dtype=
|
|
| 157 |
vae.to('cuda')
|
| 158 |
|
| 159 |
pipe = StableDiffusionXLPipeline.from_pretrained("John6666/nova-anime-xl-il-v120-sdxl",torch_dtype=torch_dtype,vae=vae)
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
MAX_SEED = np.iinfo(np.int32).max
|
| 162 |
MAX_IMAGE_SIZE = 1024
|
| 163 |
|
| 164 |
accelerator = accelerate.Accelerator()
|
| 165 |
|
| 166 |
-
def generate_image_with_steps(prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
"""Helper function to generate image with specific number of steps"""
|
| 168 |
scheduler = CustomedUniPCMultistepScheduler.from_config(pipe.scheduler.config
|
| 169 |
, solver_order = 2 if num_inference_steps==8 else 1
|
| 170 |
,denoise_to_zero = False
|
| 171 |
, use_afs=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
pipe.scheduler = scheduler
|
| 173 |
pipe.to('cuda')
|
| 174 |
with torch.no_grad():
|
|
@@ -240,6 +258,7 @@ def infer(
|
|
| 240 |
resolution,
|
| 241 |
guidance_scale,
|
| 242 |
num_inference_steps,
|
|
|
|
| 243 |
progress=gr.Progress(track_tqdm=True),
|
| 244 |
):
|
| 245 |
if randomize_seed:
|
|
@@ -249,7 +268,7 @@ def infer(
|
|
| 249 |
width, height = map(int, resolution.split('x'))
|
| 250 |
|
| 251 |
# Generate image with selected steps
|
| 252 |
-
image_quick = generate_image_with_steps(prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps)
|
| 253 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, final_sigmas_type="sigma_min")
|
| 254 |
# Generate image with 50 steps for high quality
|
| 255 |
negative_prompts = '(worst quality:2), (low quality:2), (normal quality:2), bad anatomy, bad proportions, poorly drawn face, poorly drawn hands, missing fingers, extra limbs, blurry, pixelated, distorted, lowres, jpeg artifacts, watermark, signature, text, (deformed:1.5), (bad hands:1.3), overexposed, underexposed, censored, mutated, extra fingers, cloned face, bad eyes'
|
|
@@ -343,6 +362,11 @@ with gr.Blocks() as demo:
|
|
| 343 |
label="Number of inference steps",
|
| 344 |
)
|
| 345 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
gr.Examples(examples=examples, inputs=[prompt])
|
| 347 |
gr.on(
|
| 348 |
triggers=[run_button.click, prompt.submit],
|
|
@@ -355,6 +379,7 @@ with gr.Blocks() as demo:
|
|
| 355 |
resolution,
|
| 356 |
guidance_scale,
|
| 357 |
num_inference_steps,
|
|
|
|
| 358 |
],
|
| 359 |
outputs=[result, result_20_steps, seed],
|
| 360 |
)
|
|
|
|
| 157 |
vae.to('cuda')
|
| 158 |
|
| 159 |
pipe = StableDiffusionXLPipeline.from_pretrained("John6666/nova-anime-xl-il-v120-sdxl",torch_dtype=torch_dtype,vae=vae)
|
| 160 |
+
pipe.load_lora_weights('DervlexVenice/spo_sdxl_4k_p_10ep_lora_webui-base-model-sdxl'
|
| 161 |
+
, weight_name='SPO_SDXL_4k_p_10ep_LoRA_webui_510261.safetensors'
|
| 162 |
+
, adapter_name="spo")
|
| 163 |
+
pipe.load_lora_weights('DervlexVenice/aesthetic_quality_modifiers_masterpiece-style-illustrious'
|
| 164 |
+
, weight_name='Aesthetic_Quality_Modifiers_Masterpiece_929497.safetensors'
|
| 165 |
+
, adapter_name="aqm")
|
| 166 |
MAX_SEED = np.iinfo(np.int32).max
|
| 167 |
MAX_IMAGE_SIZE = 1024
|
| 168 |
|
| 169 |
accelerator = accelerate.Accelerator()
|
| 170 |
|
| 171 |
+
def generate_image_with_steps(prompt
|
| 172 |
+
, negative_prompt
|
| 173 |
+
, seed
|
| 174 |
+
, width
|
| 175 |
+
, height
|
| 176 |
+
, guidance_scale
|
| 177 |
+
, num_inference_steps
|
| 178 |
+
, need_lora: bool = False):
|
| 179 |
"""Helper function to generate image with specific number of steps"""
|
| 180 |
scheduler = CustomedUniPCMultistepScheduler.from_config(pipe.scheduler.config
|
| 181 |
, solver_order = 2 if num_inference_steps==8 else 1
|
| 182 |
,denoise_to_zero = False
|
| 183 |
, use_afs=True)
|
| 184 |
+
if not need_lora:
|
| 185 |
+
pipe.set_adapters(["spo", "aqm"], adapter_weights=[0.0, 0.0])
|
| 186 |
+
elif num_inference_steps > 6:
|
| 187 |
+
pipe.set_adapters(["spo", "aqm"], adapter_weights=[0.7, 0.7])
|
| 188 |
+
else:
|
| 189 |
+
pipe.set_adapters(["spo", "aqm"], adapter_weights=[0.25, 0.25])
|
| 190 |
pipe.scheduler = scheduler
|
| 191 |
pipe.to('cuda')
|
| 192 |
with torch.no_grad():
|
|
|
|
| 258 |
resolution,
|
| 259 |
guidance_scale,
|
| 260 |
num_inference_steps,
|
| 261 |
+
need_lora,
|
| 262 |
progress=gr.Progress(track_tqdm=True),
|
| 263 |
):
|
| 264 |
if randomize_seed:
|
|
|
|
| 268 |
width, height = map(int, resolution.split('x'))
|
| 269 |
|
| 270 |
# Generate image with selected steps
|
| 271 |
+
image_quick = generate_image_with_steps(prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps, need_lora=need_lora)
|
| 272 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, final_sigmas_type="sigma_min")
|
| 273 |
# Generate image with 50 steps for high quality
|
| 274 |
negative_prompts = '(worst quality:2), (low quality:2), (normal quality:2), bad anatomy, bad proportions, poorly drawn face, poorly drawn hands, missing fingers, extra limbs, blurry, pixelated, distorted, lowres, jpeg artifacts, watermark, signature, text, (deformed:1.5), (bad hands:1.3), overexposed, underexposed, censored, mutated, extra fingers, cloned face, bad eyes'
|
|
|
|
| 362 |
label="Number of inference steps",
|
| 363 |
)
|
| 364 |
|
| 365 |
+
need_lora = gr.Checkbox(
|
| 366 |
+
label="Use LoRA adapters",
|
| 367 |
+
value=True,
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
gr.Examples(examples=examples, inputs=[prompt])
|
| 371 |
gr.on(
|
| 372 |
triggers=[run_button.click, prompt.submit],
|
|
|
|
| 379 |
resolution,
|
| 380 |
guidance_scale,
|
| 381 |
num_inference_steps,
|
| 382 |
+
need_lora,
|
| 383 |
],
|
| 384 |
outputs=[result, result_20_steps, seed],
|
| 385 |
)
|