Update app.py
Browse files
app.py
CHANGED
|
@@ -4,16 +4,13 @@ import numpy as np
|
|
| 4 |
import spaces
|
| 5 |
import torch
|
| 6 |
import os
|
| 7 |
-
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
| 8 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 9 |
from compel import Compel, ReturnedEmbeddingsType
|
| 10 |
|
| 11 |
-
import re
|
| 12 |
-
|
| 13 |
# =====================================
|
| 14 |
# Prompt weights
|
| 15 |
# =====================================
|
| 16 |
-
import torch
|
| 17 |
import re
|
| 18 |
def parse_prompt_attention(text):
|
| 19 |
re_attention = re.compile(r"""
|
|
@@ -224,7 +221,7 @@ if not torch.cuda.is_available():
|
|
| 224 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
| 225 |
|
| 226 |
MAX_SEED = np.iinfo(np.int32).max
|
| 227 |
-
MAX_IMAGE_SIZE =
|
| 228 |
|
| 229 |
if torch.cuda.is_available():
|
| 230 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
|
@@ -240,6 +237,8 @@ if torch.cuda.is_available():
|
|
| 240 |
use_safetensors=True,
|
| 241 |
torch_dtype=torch.float16,
|
| 242 |
)
|
|
|
|
|
|
|
| 243 |
pipe.to("cuda")
|
| 244 |
|
| 245 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
@@ -398,4 +397,4 @@ with gr.Blocks(css=css) as demo:
|
|
| 398 |
)
|
| 399 |
|
| 400 |
if __name__ == "__main__":
|
| 401 |
-
demo.launch()
|
|
|
|
| 4 |
import spaces
|
| 5 |
import torch
|
| 6 |
import os
|
|
|
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
+
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
| 9 |
from compel import Compel, ReturnedEmbeddingsType
|
| 10 |
|
|
|
|
|
|
|
| 11 |
# =====================================
|
| 12 |
# Prompt weights
|
| 13 |
# =====================================
|
|
|
|
| 14 |
import re
|
| 15 |
def parse_prompt_attention(text):
|
| 16 |
re_attention = re.compile(r"""
|
|
|
|
| 221 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
| 222 |
|
| 223 |
MAX_SEED = np.iinfo(np.int32).max
|
| 224 |
+
MAX_IMAGE_SIZE = 2048
|
| 225 |
|
| 226 |
if torch.cuda.is_available():
|
| 227 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
|
|
|
| 237 |
use_safetensors=True,
|
| 238 |
torch_dtype=torch.float16,
|
| 239 |
)
|
| 240 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 241 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 242 |
pipe.to("cuda")
|
| 243 |
|
| 244 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
|
|
| 397 |
)
|
| 398 |
|
| 399 |
if __name__ == "__main__":
|
| 400 |
+
demo.launch(share=True)
|