Update src/pipeline.py
Browse files- src/pipeline.py +20 -28
src/pipeline.py
CHANGED
|
@@ -1,15 +1,11 @@
|
|
| 1 |
import torch
|
| 2 |
from PIL.Image import Image
|
| 3 |
-
from diffusers import StableDiffusionXLPipeline
|
| 4 |
-
from sfast.compilers.diffusion_pipeline_compiler import (compile,
|
| 5 |
-
CompilationConfig)
|
| 6 |
from pipelines.models import TextToImageRequest
|
| 7 |
from torch import Generator
|
| 8 |
|
| 9 |
import inspect
|
| 10 |
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 11 |
|
| 12 |
-
import torch
|
| 13 |
from transformers import (
|
| 14 |
CLIPImageProcessor,
|
| 15 |
CLIPTextModel,
|
|
@@ -809,7 +805,7 @@ class StableDiffusionXLPipeline(
|
|
| 809 |
return_dict: bool = True,
|
| 810 |
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 811 |
guidance_rescale: float = 0.0,
|
| 812 |
-
end_cfg: float = 0.
|
| 813 |
original_size: Optional[Tuple[int, int]] = None,
|
| 814 |
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 815 |
target_size: Optional[Tuple[int, int]] = None,
|
|
@@ -1245,31 +1241,26 @@ class StableDiffusionXLPipeline(
|
|
| 1245 |
|
| 1246 |
return StableDiffusionXLPipelineOutput(images=image)
|
| 1247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1248 |
|
| 1249 |
-
def load_pipeline() -> StableDiffusionXLPipeline:
|
| 1250 |
-
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 1251 |
-
"./models/newdream-sdxl-20",
|
| 1252 |
-
torch_dtype=torch.float16,
|
| 1253 |
-
local_files_only=True,
|
| 1254 |
-
)
|
| 1255 |
-
# pipeline.scheduler = UniPCMultistepScheduler.from_config('./src',)
|
| 1256 |
pipeline.to("cuda")
|
| 1257 |
-
|
| 1258 |
-
try:
|
| 1259 |
-
import xformers
|
| 1260 |
-
config.enable_xformers = True
|
| 1261 |
-
except ImportError:
|
| 1262 |
-
print('xformers not installed, skip')
|
| 1263 |
-
try:
|
| 1264 |
-
import triton
|
| 1265 |
-
config.enable_triton = True
|
| 1266 |
-
except ImportError:
|
| 1267 |
-
print('Triton not installed, skip')
|
| 1268 |
-
config.enable_cuda_graph = True
|
| 1269 |
-
|
| 1270 |
-
pipeline = compile(pipeline, config)
|
| 1271 |
for _ in range(4):
|
| 1272 |
-
pipeline(prompt="kamala harris", num_inference_steps=20)
|
|
|
|
|
|
|
| 1273 |
|
| 1274 |
return pipeline
|
| 1275 |
|
|
@@ -1286,5 +1277,6 @@ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> I
|
|
| 1286 |
width=request.width,
|
| 1287 |
height=request.height,
|
| 1288 |
generator=generator,
|
| 1289 |
-
|
|
|
|
| 1290 |
).images[0]
|
|
|
|
| 1 |
import torch
|
| 2 |
from PIL.Image import Image
|
|
|
|
|
|
|
|
|
|
| 3 |
from pipelines.models import TextToImageRequest
|
| 4 |
from torch import Generator
|
| 5 |
|
| 6 |
import inspect
|
| 7 |
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 8 |
|
|
|
|
| 9 |
from transformers import (
|
| 10 |
CLIPImageProcessor,
|
| 11 |
CLIPTextModel,
|
|
|
|
| 805 |
return_dict: bool = True,
|
| 806 |
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 807 |
guidance_rescale: float = 0.0,
|
| 808 |
+
end_cfg: float = 0.73,
|
| 809 |
original_size: Optional[Tuple[int, int]] = None,
|
| 810 |
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 811 |
target_size: Optional[Tuple[int, int]] = None,
|
|
|
|
| 1241 |
|
| 1242 |
return StableDiffusionXLPipelineOutput(images=image)
|
| 1243 |
|
| 1244 |
+
from onediff.schedulers import EulerDiscreteScheduler
|
| 1245 |
+
from onediffx import compile_pipe
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
|
| 1249 |
+
if not pipeline:
|
| 1250 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 1251 |
+
"./models/newdream-sdxl-20",
|
| 1252 |
+
torch_dtype=torch.float16,
|
| 1253 |
+
local_files_only=True,
|
| 1254 |
+
)
|
| 1255 |
+
pipeline.scheduler = EulerDiscreteScheduler.from_config(
|
| 1256 |
+
pipeline.scheduler.config,)
|
| 1257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1258 |
pipeline.to("cuda")
|
| 1259 |
+
pipeline = compile_pipe(pipeline)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1260 |
for _ in range(4):
|
| 1261 |
+
pipeline(prompt="kamala harris flying to the moon", num_inference_steps=20, end_cfg=0.73)
|
| 1262 |
+
|
| 1263 |
+
|
| 1264 |
|
| 1265 |
return pipeline
|
| 1266 |
|
|
|
|
| 1277 |
width=request.width,
|
| 1278 |
height=request.height,
|
| 1279 |
generator=generator,
|
| 1280 |
+
end_cfg=0.73,
|
| 1281 |
+
num_inference_steps=17,
|
| 1282 |
).images[0]
|