Update app.py
Browse files
app.py
CHANGED
|
@@ -67,11 +67,12 @@ def Generate(image_input, prompt, negative_prompt, strength, guidance_scale, num
|
|
| 67 |
return image, f"{minutes:02d}:{seconds:02d}"
|
| 68 |
def Loading(model):
|
| 69 |
global text2img, img2img
|
| 70 |
-
text2img = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16).to(device)
|
| 71 |
text2img.safety_checker = None
|
| 72 |
text2img.scheduler = EulerDiscreteScheduler.from_config(text2img.scheduler.config)
|
| 73 |
-
|
| 74 |
-
|
|
|
|
| 75 |
img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
|
| 76 |
return model
|
| 77 |
with gr.Blocks() as demo:
|
|
@@ -104,4 +105,4 @@ with gr.Blocks() as demo:
|
|
| 104 |
generate.click(Generate, [image_input, prompt, negative_prompt, strength, guidance_scale, num_inference_steps, width, height, seed], [image_output, text_output])
|
| 105 |
loading.click(Loading, model, model)
|
| 106 |
set_language.change(update_language, set_language, [model, loading, image_input, prompt, negative_prompt, generate, strength, guidance_scale, num_inference_steps, width, height, seed])
|
| 107 |
-
demo.queue().launch()
|
|
|
|
| 67 |
return image, f"{minutes:02d}:{seconds:02d}"
|
| 68 |
def Loading(model):
|
| 69 |
global text2img, img2img
|
| 70 |
+
text2img = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16, use_safetensors=True).to(device)
|
| 71 |
text2img.safety_checker = None
|
| 72 |
text2img.scheduler = EulerDiscreteScheduler.from_config(text2img.scheduler.config)
|
| 73 |
+
if device == "cuda":
|
| 74 |
+
text2img.enable_xformers_memory_efficient_attention()
|
| 75 |
+
text2img.vae.enable_xformers_memory_efficient_attention()
|
| 76 |
img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
|
| 77 |
return model
|
| 78 |
with gr.Blocks() as demo:
|
|
|
|
| 105 |
generate.click(Generate, [image_input, prompt, negative_prompt, strength, guidance_scale, num_inference_steps, width, height, seed], [image_output, text_output])
|
| 106 |
loading.click(Loading, model, model)
|
| 107 |
set_language.change(update_language, set_language, [model, loading, image_input, prompt, negative_prompt, generate, strength, guidance_scale, num_inference_steps, width, height, seed])
|
| 108 |
+
demo.queue(concurrency_count=24, max_size=32).launch(max_threads=128)
|