Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,9 +5,11 @@ import random
|
|
| 5 |
# import spaces #[uncomment to use ZeroGPU]
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
|
|
|
|
|
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
-
|
| 11 |
|
| 12 |
MODEL_OPTIONS = [
|
| 13 |
("stabilityai/sdxl-turbo", "SDXL Turbo (Быстро)"),
|
|
@@ -24,6 +26,31 @@ else:
|
|
| 24 |
MAX_SEED = np.iinfo(np.int32).max
|
| 25 |
MAX_IMAGE_SIZE = 1024
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 29 |
def infer(
|
|
@@ -43,9 +70,7 @@ def infer(
|
|
| 43 |
|
| 44 |
generator = torch.Generator().manual_seed(seed)
|
| 45 |
|
| 46 |
-
pipe =
|
| 47 |
-
pipe = pipe.to(device)
|
| 48 |
-
|
| 49 |
|
| 50 |
image = pipe(
|
| 51 |
prompt=prompt,
|
|
|
|
| 5 |
# import spaces #[uncomment to use ZeroGPU]
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
|
| 10 |
|
| 11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
+
torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
|
| 13 |
|
| 14 |
MODEL_OPTIONS = [
|
| 15 |
("stabilityai/sdxl-turbo", "SDXL Turbo (Быстро)"),
|
|
|
|
| 26 |
MAX_SEED = np.iinfo(np.int32).max
|
| 27 |
MAX_IMAGE_SIZE = 1024
|
| 28 |
|
| 29 |
+
PIPELINES = {}
|
| 30 |
+
|
| 31 |
+
def load_pipelines():
|
| 32 |
+
# SDXL Turbo
|
| 33 |
+
mid = "stabilityai/sdxl-turbo"
|
| 34 |
+
pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
|
| 35 |
+
pipe = pipe.to(device)
|
| 36 |
+
PIPELINES[mid] = pipe
|
| 37 |
+
|
| 38 |
+
# SD v1-4
|
| 39 |
+
mid = "CompVis/stable-diffusion-v1-4"
|
| 40 |
+
pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
|
| 41 |
+
pipe = pipe.to(device)
|
| 42 |
+
PIPELINES[mid] = pipe
|
| 43 |
+
|
| 44 |
+
# Qwen-Image
|
| 45 |
+
mid = "Qwen/Qwen-Image"
|
| 46 |
+
pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
|
| 47 |
+
pipe = pipe.to(device)
|
| 48 |
+
PIPELINES[mid] = pipe
|
| 49 |
+
|
| 50 |
+
# Вызываем сразу при импорте (на сборке образа и при старте Space)
|
| 51 |
+
load_pipelines()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
|
| 55 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 56 |
def infer(
|
|
|
|
| 70 |
|
| 71 |
generator = torch.Generator().manual_seed(seed)
|
| 72 |
|
| 73 |
+
pipe = PIPELINES[model_id]
|
|
|
|
|
|
|
| 74 |
|
| 75 |
image = pipe(
|
| 76 |
prompt=prompt,
|