Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,27 +2,50 @@ import gradio as gr
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
|
| 5 |
-
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
if torch.cuda.is_available():
|
| 13 |
-
|
| 14 |
-
else:
|
| 15 |
-
|
| 16 |
|
| 17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 18 |
-
pipe = pipe.to(device)
|
| 19 |
|
| 20 |
-
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
-
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
| 23 |
|
| 24 |
-
|
| 25 |
def infer(
|
|
|
|
| 26 |
prompt,
|
| 27 |
negative_prompt,
|
| 28 |
seed,
|
|
@@ -37,6 +60,8 @@ def infer(
|
|
| 37 |
seed = random.randint(0, MAX_SEED)
|
| 38 |
|
| 39 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
|
|
|
| 40 |
|
| 41 |
image = pipe(
|
| 42 |
prompt=prompt,
|
|
@@ -105,7 +130,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 105 |
minimum=256,
|
| 106 |
maximum=MAX_IMAGE_SIZE,
|
| 107 |
step=32,
|
| 108 |
-
value=1024,
|
| 109 |
)
|
| 110 |
|
| 111 |
height = gr.Slider(
|
|
@@ -113,7 +138,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 113 |
minimum=256,
|
| 114 |
maximum=MAX_IMAGE_SIZE,
|
| 115 |
step=32,
|
| 116 |
-
value=1024,
|
| 117 |
)
|
| 118 |
|
| 119 |
with gr.Row():
|
|
@@ -122,7 +147,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 122 |
minimum=0.0,
|
| 123 |
maximum=10.0,
|
| 124 |
step=0.1,
|
| 125 |
-
value=0.0,
|
| 126 |
)
|
| 127 |
|
| 128 |
num_inference_steps = gr.Slider(
|
|
@@ -130,7 +155,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 130 |
minimum=1,
|
| 131 |
maximum=50,
|
| 132 |
step=1,
|
| 133 |
-
value=2,
|
| 134 |
)
|
| 135 |
|
| 136 |
gr.Examples(examples=examples, inputs=[prompt])
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
|
| 5 |
+
import spaces #[uncomment to use ZeroGPU]
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
+
MODEL_OPTIONS = [
|
| 11 |
+
("stabilityai/sdxl-turbo", "SDXL Turbo (Быстро)"),
|
| 12 |
+
("hakurei/waifu-diffusion", "Что-то альтернативное"),
|
| 13 |
+
]
|
| 14 |
+
DEFAULT_MODEL_ID = "stabilityai/sdxl-turbo"
|
| 15 |
+
# model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
| 16 |
+
|
| 17 |
+
PIPELINES = {}
|
| 18 |
+
|
| 19 |
+
def load_pipelines():
|
| 20 |
+
# SDXL Turbo
|
| 21 |
+
mid = "stabilityai/sdxl-turbo"
|
| 22 |
+
pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
|
| 23 |
+
pipe = pipe.to(device)
|
| 24 |
+
PIPELINES[mid] = pipe
|
| 25 |
+
|
| 26 |
+
# SD v1-4
|
| 27 |
+
mid = "hakurei/waifu-diffusion"
|
| 28 |
+
pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
|
| 29 |
+
pipe = pipe.to(device)
|
| 30 |
+
PIPELINES[mid] = pipe
|
| 31 |
+
|
| 32 |
+
# load_pipelines()
|
| 33 |
|
| 34 |
+
# if torch.cuda.is_available():
|
| 35 |
+
# torch_dtype = torch.float16
|
| 36 |
+
# else:
|
| 37 |
+
# torch_dtype = torch.float32
|
| 38 |
|
| 39 |
+
# pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 40 |
+
# pipe = pipe.to(device)
|
| 41 |
|
| 42 |
+
# MAX_SEED = np.iinfo(np.int32).max
|
| 43 |
+
# MAX_IMAGE_SIZE = 1024
|
| 44 |
|
| 45 |
|
| 46 |
+
@spaces.GPU #[uncomment to use ZeroGPU]
|
| 47 |
def infer(
|
| 48 |
+
model_id,
|
| 49 |
prompt,
|
| 50 |
negative_prompt,
|
| 51 |
seed,
|
|
|
|
| 60 |
seed = random.randint(0, MAX_SEED)
|
| 61 |
|
| 62 |
generator = torch.Generator().manual_seed(seed)
|
| 63 |
+
|
| 64 |
+
pipe = PIPELINES[model_id]
|
| 65 |
|
| 66 |
image = pipe(
|
| 67 |
prompt=prompt,
|
|
|
|
| 130 |
minimum=256,
|
| 131 |
maximum=MAX_IMAGE_SIZE,
|
| 132 |
step=32,
|
| 133 |
+
value=1024,
|
| 134 |
)
|
| 135 |
|
| 136 |
height = gr.Slider(
|
|
|
|
| 138 |
minimum=256,
|
| 139 |
maximum=MAX_IMAGE_SIZE,
|
| 140 |
step=32,
|
| 141 |
+
value=1024,
|
| 142 |
)
|
| 143 |
|
| 144 |
with gr.Row():
|
|
|
|
| 147 |
minimum=0.0,
|
| 148 |
maximum=10.0,
|
| 149 |
step=0.1,
|
| 150 |
+
value=0.0,
|
| 151 |
)
|
| 152 |
|
| 153 |
num_inference_steps = gr.Slider(
|
|
|
|
| 155 |
minimum=1,
|
| 156 |
maximum=50,
|
| 157 |
step=1,
|
| 158 |
+
value=2,
|
| 159 |
)
|
| 160 |
|
| 161 |
gr.Examples(examples=examples, inputs=[prompt])
|